http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
deleted file mode 100644
index db9d374..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ /dev/null
@@ -1,351 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Strings;
-import com.google.common.net.HostAndPort;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.file.Paths;
-import java.util.Collection;
-import java.util.HashSet;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_NAMESERVER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
-
-/**
- * HDDS specific stateless utility functions.
- */
-public final class HddsUtils {
-
-
-  private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
-
-  /**
-   * The service ID of the solitary Ozone SCM service.
-   */
-  public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
-  public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
-      "OzoneScmServiceInstance";
-
-  private static final int NO_PORT = -1;
-
-  private HddsUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
-              + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
-              + "details"
-              + " on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM for block service. If
-   * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   * @throws IllegalArgumentException if configuration is not defined.
-   */
-  public static InetSocketAddress getScmAddressForBlockClients(
-      Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      host = getHostNameFromConfigKeys(conf,
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-      if (!host.isPresent()) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
-                + " must be defined. See"
-                + " https://wiki.apache.org/hadoop/Ozone#Configuration";
-                + " for details on configuring Ozone.");
-      }
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the hostname, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf  - Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first hostname component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
-      String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<String> hostName = getHostName(value);
-      if (hostName.isPresent()) {
-        return hostName;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Gets the hostname or Indicates that it is absent.
-   * @param value host or host:port
-   * @return hostname
-   */
-  public static Optional<String> getHostName(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    return Optional.of(HostAndPort.fromString(value).getHostText());
-  }
-
-  /**
-   * Gets the port if there is one, throws otherwise.
-   * @param value  String in host:port format.
-   * @return Port
-   */
-  public static Optional<Integer> getHostPort(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
-    if (port == NO_PORT) {
-      return Optional.absent();
-    } else {
-      return Optional.of(port);
-    }
-  }
-
-  /**
-   * Retrieve the port number, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first port number component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<Integer> getPortNumberFromConfigKeys(
-      Configuration conf, String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<Integer> hostPort = getHostPort(value);
-      if (hostPort.isPresent()) {
-        return hostPort;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Retrieve the socket addresses of all storage container managers.
-   *
-   * @param conf
-   * @return A collection of SCM addresses
-   * @throws IllegalArgumentException If the configuration is invalid
-   */
-  public static Collection<InetSocketAddress> getSCMAddresses(
-      Configuration conf) throws IllegalArgumentException {
-    Collection<InetSocketAddress> addresses =
-        new HashSet<InetSocketAddress>();
-    Collection<String> names =
-        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
-    if (names == null || names.isEmpty()) {
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
-          + " need to be a set of valid DNS names or IP addresses."
-          + " Null or empty address list found.");
-    }
-
-    final com.google.common.base.Optional<Integer>
-        defaultPort =  com.google.common.base.Optional.of(ScmConfigKeys
-        .OZONE_SCM_DEFAULT_PORT);
-    for (String address : names) {
-      com.google.common.base.Optional<String> hostname =
-          getHostName(address);
-      if (!hostname.isPresent()) {
-        throw new IllegalArgumentException("Invalid hostname for SCM: "
-            + hostname);
-      }
-      com.google.common.base.Optional<Integer> port =
-          getHostPort(address);
-      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
-          port.or(defaultPort.get()));
-      addresses.add(addr);
-    }
-    return addresses;
-  }
-
-  public static boolean isHddsEnabled(Configuration conf) {
-    String securityEnabled =
-        conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-            "simple");
-    boolean securityAuthorizationEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
-    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
-      LOG.error("Ozone is not supported in a security enabled cluster. ");
-      return false;
-    } else {
-      return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
-    }
-  }
-
-
-  /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIdFilePath(Configuration conf) {
-    String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-    if (dataNodeIDPath == null) {
-      String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-      if (Strings.isNullOrEmpty(metaPath)) {
-        // this means meta data is not found, in theory should not happen at
-        // this point because should've failed earlier.
-        throw new IllegalArgumentException("Unable to locate meta data" +
-            "directory when getting datanode id path");
-      }
-      dataNodeIDPath = Paths.get(metaPath,
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
-    }
-    return dataNodeIDPath;
-  }
-
-  /**
-   * Returns the hostname for this datanode. If the hostname is not
-   * explicitly configured in the given config, then it is determined
-   * via the DNS class.
-   *
-   * @param conf Configuration
-   *
-   * @return the hostname (NB: may not be a FQDN)
-   * @throws UnknownHostException if the dfs.datanode.dns.interface
-   *    option is used and the hostname can not be determined
-   */
-  public static String getHostName(Configuration conf)
-      throws UnknownHostException {
-    String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
-    if (name == null) {
-      String dnsInterface = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
-      String nameServer = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
-      boolean fallbackToHosts = false;
-
-      if (dnsInterface == null) {
-        // Try the legacy configuration keys.
-        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
-        nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
-      } else {
-        // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
-        // resolution if DNS fails. We will not use hosts file resolution
-        // by default to avoid breaking existing clusters.
-        fallbackToHosts = true;
-      }
-
-      name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
-    }
-    return name;
-  }
-
-  /**
-   * Checks if the container command is read only or not.
-   * @param proto ContainerCommand Request proto
-   * @return True if its readOnly , false otherwise.
-   */
-  public static boolean isReadOnly(
-      ContainerProtos.ContainerCommandRequestProto proto) {
-    switch (proto.getCmdType()) {
-    case ReadContainer:
-    case ReadChunk:
-    case ListBlock:
-    case GetBlock:
-    case GetSmallFile:
-    case ListContainer:
-    case ListChunk:
-    case GetCommittedBlockLength:
-      return true;
-    case CloseContainer:
-    case WriteChunk:
-    case UpdateContainer:
-    case CompactChunk:
-    case CreateContainer:
-    case DeleteChunk:
-    case DeleteContainer:
-    case DeleteBlock:
-    case PutBlock:
-    case PutSmallFile:
-    default:
-      return false;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
deleted file mode 100644
index e0c8150..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import com.google.common.annotations.VisibleForTesting;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.RunLast;
-
-/**
- * This is a generic parent class for all the ozone related cli tools.
- */
-public class GenericCli implements Callable<Void>, GenericParentCommand {
-
-  @Option(names = {"--verbose"},
-      description = "More verbose output. Show the stack trace of the errors.")
-  private boolean verbose;
-
-  @Option(names = {"-D", "--set"})
-  private Map<String, String> configurationOverrides = new HashMap<>();
-
-  private final CommandLine cmd;
-
-  public GenericCli() {
-    cmd = new CommandLine(this);
-  }
-
-  public void run(String[] argv) {
-    try {
-      execute(argv);
-    } catch (ExecutionException ex) {
-      printError(ex.getCause());
-      System.exit(-1);
-    }
-  }
-
-  @VisibleForTesting
-  public void execute(String[] argv) {
-    cmd.parseWithHandler(new RunLast(), argv);
-  }
-
-  private void printError(Throwable error) {
-    if (verbose) {
-      error.printStackTrace(System.err);
-    } else {
-      System.err.println(error.getMessage().split("\n")[0]);
-    }
-    if(error instanceof MissingSubcommandException){
-      System.err.println(((MissingSubcommandException) error).getUsage());
-    }
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(cmd.getUsageMessage());
-  }
-
-  public OzoneConfiguration createOzoneConfiguration() {
-    OzoneConfiguration ozoneConf = new OzoneConfiguration();
-    if (configurationOverrides != null) {
-      for (Entry<String, String> entry : configurationOverrides.entrySet()) {
-        ozoneConf
-            .set(entry.getKey(), entry.getValue());
-      }
-    }
-    return ozoneConf;
-  }
-
-  @VisibleForTesting
-  public picocli.CommandLine getCmd() {
-    return cmd;
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return verbose;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
deleted file mode 100644
index a1d2171..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-/**
- * Interface to access the higher level parameters.
- */
-public interface GenericParentCommand {
-
-  boolean isVerbose();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
deleted file mode 100644
index 7110839..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.utils.HddsVersionInfo;
-
-import picocli.CommandLine.IVersionProvider;
-
-/**
- * Version provider for the CLI interface.
- */
-public class HddsVersionProvider implements IVersionProvider {
-  @Override
-  public String[] getVersion() throws Exception {
-    String[] result = new String[] {
-        HddsVersionInfo.getBuildVersion()
-    };
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
deleted file mode 100644
index 9f0c494..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-/**
- * Exception to throw if subcommand is not selected but required.
- */
-public class MissingSubcommandException extends RuntimeException {
-
-  private String usage;
-
-  public MissingSubcommandException(String usage) {
-    super("Incomplete command");
-    this.usage = usage;
-  }
-
-  public String getUsage() {
-    return usage;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
deleted file mode 100644
index 8dcc1d1..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Generic helper class to make instantiate picocli based cli tools.
- */
-package org.apache.hadoop.hdds.cli;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
deleted file mode 100644
index 8149740..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.client;
-
-import org.apache.commons.lang3.builder.ToStringBuilder;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.Objects;
-
-/**
- * BlockID of ozone (containerID  localID).
- */
-public class BlockID {
-  private long containerID;
-  private long localID;
-
-  public BlockID(long containerID, long localID) {
-    this.containerID = containerID;
-    this.localID = localID;
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public long getLocalID() {
-    return localID;
-  }
-
-  @Override
-  public String toString() {
-    return new ToStringBuilder(this).
-        append("containerID", containerID).
-        append("localID", localID).
-        toString();
-  }
-
-  public HddsProtos.BlockID getProtobuf() {
-    return HddsProtos.BlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
-  }
-
-  public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
-  }
-
-  public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
-    return ContainerProtos.DatanodeBlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
-  }
-
-  public static BlockID getFromProtobuf(
-      ContainerProtos.DatanodeBlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    BlockID blockID = (BlockID) o;
-    return containerID == blockID.containerID && localID == blockID.localID;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(containerID, localID);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
deleted file mode 100644
index 59708a9..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-
-/**
- * represents an OzoneQuota Object that can be applied to
- * a storage volume.
- */
-public class OzoneQuota {
-
-  public static final String OZONE_QUOTA_BYTES = "BYTES";
-  public static final String OZONE_QUOTA_MB = "MB";
-  public static final String OZONE_QUOTA_GB = "GB";
-  public static final String OZONE_QUOTA_TB = "TB";
-
-  private Units unit;
-  private long size;
-
-  /** Quota Units.*/
-  public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
-
-  /**
-   * Returns size.
-   *
-   * @return long
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Returns Units.
-   *
-   * @return Unit in MB, GB or TB
-   */
-  public Units getUnit() {
-    return unit;
-  }
-
-  /**
-   * Constructs a default Quota object.
-   */
-  public OzoneQuota() {
-    this.size = 0;
-    this.unit = Units.UNDEFINED;
-  }
-
-  /**
-   * Constructor for Ozone Quota.
-   *
-   * @param size Long Size
-   * @param unit MB, GB  or TB
-   */
-  public OzoneQuota(long size, Units unit) {
-    this.size = size;
-    this.unit = unit;
-  }
-
-  /**
-   * Formats a quota as a string.
-   *
-   * @param quota the quota to format
-   * @return string representation of quota
-   */
-  public static String formatQuota(OzoneQuota quota) {
-    return String.valueOf(quota.size) + quota.unit;
-  }
-
-  /**
-   * Parses a user provided string and returns the
-   * Quota Object.
-   *
-   * @param quotaString Quota String
-   *
-   * @return OzoneQuota object
-   *
-   * @throws IllegalArgumentException
-   */
-  public static OzoneQuota parseQuota(String quotaString)
-      throws IllegalArgumentException {
-
-    if ((quotaString == null) || (quotaString.isEmpty())) {
-      throw new IllegalArgumentException(
-          "Quota string cannot be null or empty.");
-    }
-
-    String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
-    String size = "";
-    int nSize;
-    Units currUnit = Units.MB;
-    Boolean found = false;
-    if (uppercase.endsWith(OZONE_QUOTA_MB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_MB.length());
-      currUnit = Units.MB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_GB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_GB.length());
-      currUnit = Units.GB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_TB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_TB.length());
-      currUnit = Units.TB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_BYTES)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length());
-      currUnit = Units.BYTES;
-      found = true;
-    }
-
-    if (!found) {
-      throw new IllegalArgumentException(
-          "Quota unit not recognized. Supported values are BYTES, MB, GB and " 
+
-              "TB.");
-    }
-
-    nSize = Integer.parseInt(size);
-    if (nSize < 0) {
-      throw new IllegalArgumentException("Quota cannot be negative.");
-    }
-
-    return new OzoneQuota(nSize, currUnit);
-  }
-
-
-  /**
-   * Returns size in Bytes or -1 if there is no Quota.
-   */
-  public long sizeInBytes() {
-    switch (this.unit) {
-    case BYTES:
-      return this.getSize();
-    case MB:
-      return this.getSize() * OzoneConsts.MB;
-    case GB:
-      return this.getSize() * OzoneConsts.GB;
-    case TB:
-      return this.getSize() * OzoneConsts.TB;
-    case UNDEFINED:
-    default:
-      return -1;
-    }
-  }
-
-  /**
-   * Returns OzoneQuota corresponding to size in bytes.
-   *
-   * @param sizeInBytes size in bytes to be converted
-   *
-   * @return OzoneQuota object
-   */
-  public static OzoneQuota getOzoneQuota(long sizeInBytes) {
-    long size;
-    Units unit;
-    if (sizeInBytes % OzoneConsts.TB == 0) {
-      size = sizeInBytes / OzoneConsts.TB;
-      unit = Units.TB;
-    } else if (sizeInBytes % OzoneConsts.GB == 0) {
-      size = sizeInBytes / OzoneConsts.GB;
-      unit = Units.GB;
-    } else if (sizeInBytes % OzoneConsts.MB == 0) {
-      size = sizeInBytes / OzoneConsts.MB;
-      unit = Units.MB;
-    } else {
-      size = sizeInBytes;
-      unit = Units.BYTES;
-    }
-    return new OzoneQuota((int)size, unit);
-  }
-
-  @Override
-  public String toString() {
-    return size + " " + unit;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
deleted file mode 100644
index 0215964..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * The replication factor to be used while writing key into ozone.
- */
-public enum ReplicationFactor {
-  ONE(1),
-  THREE(3);
-
-  /**
-   * Integer representation of replication.
-   */
-  private int value;
-
-  /**
-   * Initializes ReplicationFactor with value.
-   * @param value replication value
-   */
-  ReplicationFactor(int value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns enum value corresponding to the int value.
-   * @param value replication value
-   * @return ReplicationFactor
-   */
-  public static ReplicationFactor valueOf(int value) {
-    if(value == 1) {
-      return ONE;
-    }
-    if (value == 3) {
-      return THREE;
-    }
-    throw new IllegalArgumentException("Unsupported value: " + value);
-  }
-
-  /**
-   * Returns integer representation of ReplicationFactor.
-   * @return replication value
-   */
-  public int getValue() {
-    return value;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
deleted file mode 100644
index 259a1a2..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * The replication type to be used while writing key into ozone.
- */
-public enum ReplicationType {
-    RATIS,
-    STAND_ALONE,
-    CHAINED
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
deleted file mode 100644
index e81f134..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * Base property types for HDDS containers and replications.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
deleted file mode 100644
index 677b752..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import com.google.gson.Gson;
-import java.io.IOException;
-import java.io.Writer;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.HttpHeaders;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer2;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
-
-/**
- * A servlet to print out the running configuration data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class HddsConfServlet extends HttpServlet {
-
-  private static final long serialVersionUID = 1L;
-
-  protected static final String FORMAT_JSON = "json";
-  protected static final String FORMAT_XML = "xml";
-  private static final String COMMAND = "cmd";
-  private static final OzoneConfiguration OZONE_CONFIG =
-      new OzoneConfiguration();
-  private static final transient Logger LOG =
-      LoggerFactory.getLogger(HddsConfServlet.class);
-
-
-  /**
-   * Return the Configuration of the daemon hosting this servlet.
-   * This is populated when the HttpServer starts.
-   */
-  private Configuration getConfFromContext() {
-    Configuration conf = (Configuration) getServletContext().getAttribute(
-        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
-    assert conf != null;
-    return conf;
-  }
-
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws ServletException, IOException {
-
-    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
-        request, response)) {
-      return;
-    }
-
-    String format = parseAcceptHeader(request);
-    if (FORMAT_XML.equals(format)) {
-      response.setContentType("text/xml; charset=utf-8");
-    } else if (FORMAT_JSON.equals(format)) {
-      response.setContentType("application/json; charset=utf-8");
-    }
-
-    String name = request.getParameter("name");
-    Writer out = response.getWriter();
-    String cmd = request.getParameter(COMMAND);
-
-    processCommand(cmd, format, request, response, out, name);
-    out.close();
-  }
-
-  private void processCommand(String cmd, String format,
-      HttpServletRequest request, HttpServletResponse response, Writer out,
-      String name)
-      throws IOException {
-    try {
-      if (cmd == null) {
-        if (FORMAT_XML.equals(format)) {
-          response.setContentType("text/xml; charset=utf-8");
-        } else if (FORMAT_JSON.equals(format)) {
-          response.setContentType("application/json; charset=utf-8");
-        }
-
-        writeResponse(getConfFromContext(), out, format, name);
-      } else {
-        processConfigTagRequest(request, out);
-      }
-    } catch (BadFormatException bfe) {
-      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
-    } catch (IllegalArgumentException iae) {
-      response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  static String parseAcceptHeader(HttpServletRequest request) {
-    String format = request.getHeader(HttpHeaders.ACCEPT);
-    return format != null && format.contains(FORMAT_JSON) ?
-        FORMAT_JSON : FORMAT_XML;
-  }
-
-  /**
-   * Guts of the servlet - extracted for easy testing.
-   */
-  static void writeResponse(Configuration conf,
-      Writer out, String format, String propertyName)
-      throws IOException, IllegalArgumentException, BadFormatException {
-    if (FORMAT_JSON.equals(format)) {
-      Configuration.dumpConfiguration(conf, propertyName, out);
-    } else if (FORMAT_XML.equals(format)) {
-      conf.writeXml(propertyName, out);
-    } else {
-      throw new BadFormatException("Bad format: " + format);
-    }
-  }
-
-  public static class BadFormatException extends Exception {
-
-    private static final long serialVersionUID = 1L;
-
-    public BadFormatException(String msg) {
-      super(msg);
-    }
-  }
-
-  private void processConfigTagRequest(HttpServletRequest request,
-      Writer out) throws IOException {
-    String cmd = request.getParameter(COMMAND);
-    Gson gson = new Gson();
-    Configuration config = getOzoneConfig();
-
-    switch (cmd) {
-    case "getOzoneTags":
-      out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY)
-          .split(",")));
-      break;
-    case "getPropertyByTag":
-      String tags = request.getParameter("tags");
-      Map<String, Properties> propMap = new HashMap<>();
-
-      for (String tag : tags.split(",")) {
-        if (config.isPropertyTag(tag)) {
-          Properties properties = config.getAllPropertiesByTag(tag);
-          propMap.put(tag, properties);
-        } else {
-          LOG.debug("Not a valid tag" + tag);
-        }
-      }
-      out.write(gson.toJsonTree(propMap).toString());
-      break;
-    default:
-      throw new IllegalArgumentException(cmd + " is not a valid command.");
-    }
-
-  }
-
-  private static Configuration getOzoneConfig() {
-    return OZONE_CONFIG;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
deleted file mode 100644
index 36d953c..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Configuration for ozone.
- */
-@InterfaceAudience.Private
-public class OzoneConfiguration extends Configuration {
-  static {
-    activate();
-  }
-
-  public OzoneConfiguration() {
-    OzoneConfiguration.activate();
-  }
-
-  public OzoneConfiguration(Configuration conf) {
-    super(conf);
-  }
-
-  public List<Property> readPropertyFromXml(URL url) throws JAXBException {
-    JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class);
-    Unmarshaller um = context.createUnmarshaller();
-
-    XMLConfiguration config = (XMLConfiguration) um.unmarshal(url);
-    return config.getProperties();
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "configuration")
-  public static class XMLConfiguration {
-
-    @XmlElement(name = "property", type = Property.class)
-    private List<Property> properties = new ArrayList<>();
-
-    public XMLConfiguration() {
-    }
-
-    public XMLConfiguration(List<Property> properties) {
-      this.properties = properties;
-    }
-
-    public List<Property> getProperties() {
-      return properties;
-    }
-
-    public void setProperties(List<Property> properties) {
-      this.properties = properties;
-    }
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration properties from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "property")
-  public static class Property implements Comparable<Property> {
-
-    private String name;
-    private String value;
-    private String tag;
-    private String description;
-
-    public String getName() {
-      return name;
-    }
-
-    public void setName(String name) {
-      this.name = name;
-    }
-
-    public String getValue() {
-      return value;
-    }
-
-    public void setValue(String value) {
-      this.value = value;
-    }
-
-    public String getTag() {
-      return tag;
-    }
-
-    public void setTag(String tag) {
-      this.tag = tag;
-    }
-
-    public String getDescription() {
-      return description;
-    }
-
-    public void setDescription(String description) {
-      this.description = description;
-    }
-
-    @Override
-    public int compareTo(Property o) {
-      if (this == o) {
-        return 0;
-      }
-      return this.getName().compareTo(o.getName());
-    }
-
-    @Override
-    public String toString() {
-      return this.getName() + " " + this.getValue() + " " + this.getTag();
-    }
-
-    @Override
-    public int hashCode(){
-      return this.getName().hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      return (obj instanceof Property) && (((Property) obj).getName())
-          .equals(this.getName());
-    }
-  }
-
-  public static void activate() {
-    // adds the default resources
-    Configuration.addDefaultResource("hdfs-default.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
-    Configuration.addDefaultResource("ozone-default.xml");
-    Configuration.addDefaultResource("ozone-site.xml");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index 948057e..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
deleted file mode 100644
index f8894e6..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-/**
- * Generic HDDS specific configurator and helper classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
deleted file mode 100644
index bae22a2..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ /dev/null
@@ -1,401 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocol;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * DatanodeDetails class contains details about DataNode like:
- * - UUID of the DataNode.
- * - IP and Hostname details.
- * - Port details to which the DataNode will be listening.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeDetails implements Comparable<DatanodeDetails> {
-
-  /**
-   * DataNode's unique identifier in the cluster.
-   */
-  private final UUID uuid;
-
-  private String ipAddress;
-  private String hostName;
-  private List<Port> ports;
-
-
-  /**
-   * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used
-   * for instantiating DatanodeDetails.
-   * @param uuid DataNode's UUID
-   * @param ipAddress IP Address of this DataNode
-   * @param hostName DataNode's hostname
-   * @param ports Ports used by the DataNode
-   */
-  private DatanodeDetails(String uuid, String ipAddress, String hostName,
-      List<Port> ports) {
-    this.uuid = UUID.fromString(uuid);
-    this.ipAddress = ipAddress;
-    this.hostName = hostName;
-    this.ports = ports;
-  }
-
-  protected DatanodeDetails(DatanodeDetails datanodeDetails) {
-    this.uuid = datanodeDetails.uuid;
-    this.ipAddress = datanodeDetails.ipAddress;
-    this.hostName = datanodeDetails.hostName;
-    this.ports = datanodeDetails.ports;
-  }
-
-  /**
-   * Returns the DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public UUID getUuid() {
-    return uuid;
-  }
-
-  /**
-   * Returns the string representation of DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public String getUuidString() {
-    return uuid.toString();
-  }
-
-  /**
-   * Sets the IP address of Datanode.
-   *
-   * @param ip IP Address
-   */
-  public void setIpAddress(String ip) {
-    this.ipAddress = ip;
-  }
-
-  /**
-   * Returns IP address of DataNode.
-   *
-   * @return IP address
-   */
-  public String getIpAddress() {
-    return ipAddress;
-  }
-
-  /**
-   * Sets the Datanode hostname.
-   *
-   * @param host hostname
-   */
-  public void setHostName(String host) {
-    this.hostName = host;
-  }
-
-  /**
-   * Returns Hostname of DataNode.
-   *
-   * @return Hostname
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * Sets a DataNode Port.
-   *
-   * @param port DataNode port
-   */
-  public void setPort(Port port) {
-    // If the port is already in the list remove it first and add the
-    // new/updated port value.
-    ports.remove(port);
-    ports.add(port);
-  }
-
-  /**
-   * Returns all the Ports used by DataNode.
-   *
-   * @return DataNode Ports
-   */
-  public List<Port> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Given the name returns port number, null if the asked port is not found.
-   *
-   * @param name Name of the port
-   *
-   * @return Port
-   */
-  public Port getPort(Port.Name name) {
-    for (Port port : ports) {
-      if (port.getName().equals(name)) {
-        return port;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns a DatanodeDetails from the protocol buffers.
-   *
-   * @param datanodeDetailsProto - protoBuf Message
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails getFromProtoBuf(
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
-    DatanodeDetails.Builder builder = newBuilder();
-    builder.setUuid(datanodeDetailsProto.getUuid());
-    if (datanodeDetailsProto.hasIpAddress()) {
-      builder.setIpAddress(datanodeDetailsProto.getIpAddress());
-    }
-    if (datanodeDetailsProto.hasHostName()) {
-      builder.setHostName(datanodeDetailsProto.getHostName());
-    }
-    for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) {
-      builder.addPort(newPort(
-          Port.Name.valueOf(port.getName().toUpperCase()), port.getValue()));
-    }
-    return builder.build();
-  }
-
-  /**
-   * Returns a DatanodeDetails protobuf message from a datanode ID.
-   * @return HddsProtos.DatanodeDetailsProto
-   */
-  public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
-    HddsProtos.DatanodeDetailsProto.Builder builder =
-        HddsProtos.DatanodeDetailsProto.newBuilder()
-            .setUuid(getUuidString());
-    if (ipAddress != null) {
-      builder.setIpAddress(ipAddress);
-    }
-    if (hostName != null) {
-      builder.setHostName(hostName);
-    }
-    for (Port port : ports) {
-      builder.addPorts(HddsProtos.Port.newBuilder()
-          .setName(port.getName().toString())
-          .setValue(port.getValue())
-          .build());
-    }
-    return builder.build();
-  }
-
-  @Override
-  public String toString() {
-    return uuid.toString() + "{" +
-        "ip: " +
-        ipAddress +
-        ", host: " +
-        hostName +
-        "}";
-  }
-
-  @Override
-  public int compareTo(DatanodeDetails that) {
-    return this.getUuid().compareTo(that.getUuid());
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return obj instanceof DatanodeDetails &&
-        uuid.equals(((DatanodeDetails) obj).uuid);
-  }
-
-  @Override
-  public int hashCode() {
-    return uuid.hashCode();
-  }
-
-  /**
-   * Returns DatanodeDetails.Builder instance.
-   *
-   * @return DatanodeDetails.Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for building DatanodeDetails.
-   */
-  public static final class Builder {
-    private String id;
-    private String ipAddress;
-    private String hostName;
-    private List<Port> ports;
-
-    /**
-     * Default private constructor. To create Builder instance use
-     * DatanodeDetails#newBuilder.
-     */
-    private Builder() {
-      ports = new ArrayList<>();
-    }
-
-    /**
-     * Sets the DatanodeUuid.
-     *
-     * @param uuid DatanodeUuid
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setUuid(String uuid) {
-      this.id = uuid;
-      return this;
-    }
-
-    /**
-     * Sets the IP address of DataNode.
-     *
-     * @param ip address
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setIpAddress(String ip) {
-      this.ipAddress = ip;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of DataNode.
-     *
-     * @param host hostname
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setHostName(String host) {
-      this.hostName = host;
-      return this;
-    }
-
-    /**
-     * Adds a DataNode Port.
-     *
-     * @param port DataNode port
-     *
-     * @return DatanodeDetails.Builder
-     */
-    public Builder addPort(Port port) {
-      this.ports.add(port);
-      return this;
-    }
-
-    /**
-     * Builds and returns DatanodeDetails instance.
-     *
-     * @return DatanodeDetails
-     */
-    public DatanodeDetails build() {
-      Preconditions.checkNotNull(id);
-      return new DatanodeDetails(id, ipAddress, hostName, ports);
-    }
-
-  }
-
-  /**
-   * Constructs a new Port with name and value.
-   *
-   * @param name Name of the port
-   * @param value Port number
-   *
-   * @return {@code Port} instance
-   */
-  public static Port newPort(Port.Name name, Integer value) {
-    return new Port(name, value);
-  }
-
-  /**
-   * Container to hold DataNode Port details.
-   */
-  public static final class Port {
-
-    /**
-     * Ports that are supported in DataNode.
-     */
-    public enum Name {
-      STANDALONE, RATIS, REST
-    }
-
-    private Name name;
-    private Integer value;
-
-    /**
-     * Private constructor for constructing Port object. Use
-     * DatanodeDetails#newPort to create a new Port object.
-     *
-     * @param name
-     * @param value
-     */
-    private Port(Name name, Integer value) {
-      this.name = name;
-      this.value = value;
-    }
-
-    /**
-     * Returns the name of the port.
-     *
-     * @return Port name
-     */
-    public Name getName() {
-      return name;
-    }
-
-    /**
-     * Returns the port number.
-     *
-     * @return Port number
-     */
-    public Integer getValue() {
-      return value;
-    }
-
-    @Override
-    public int hashCode() {
-      return name.hashCode();
-    }
-
-    /**
-     * Ports are considered equal if they have the same name.
-     *
-     * @param anObject
-     *          The object to compare this {@code Port} against
-     * @return {@code true} if the given object represents a {@code Port}
-               and has the same name, {@code false} otherwise
-     */
-    @Override
-    public boolean equals(Object anObject) {
-      if (this == anObject) {
-        return true;
-      }
-      if (anObject instanceof Port) {
-        return name.equals(((Port) anObject).name);
-      }
-      return false;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
deleted file mode 100644
index 7dae0fc..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains HDDS protocol related classes.
- */
-package org.apache.hadoop.hdds.protocol;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
deleted file mode 100644
index 63f5916..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.ratis.shaded.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class contains constants for configuration keys used in SCM.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ScmConfigKeys {
-
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
-      "scm.container.client.idle.threshold";
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
-      "10s";
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
-      "scm.container.client.max.size";
-  public static final int SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT =
-      256;
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS =
-      "scm.container.client.max.outstanding.requests";
-  public static final int SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT
-      = 100;
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = "dfs.container.ratis.enabled";
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = false;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = "dfs.container.ratis.rpc.type";
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = "GRPC";
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = "dfs.container.ratis.num.write.chunk.threads";
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = 60;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = "dfs.container.ratis.replication.level";
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = 
ReplicationLevel.MAJORITY;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
-      "dfs.container.ratis.segment.size";
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-      1 * 1024 * 1024 * 1024;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
-      "dfs.container.ratis.segment.preallocated.size";
-  public static final int
-      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 
1024;
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.client.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      "dfs.ratis.client.request.max.retries";
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      "dfs.ratis.client.request.retry.interval";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      TimeDuration.valueOf(100, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY 
=
-      "dfs.ratis.server.retry-cache.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.leader.election.minimum.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(1, TimeUnit.SECONDS);
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      "dfs.ratis.server.failure.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
-  // TODO : this is copied from OzoneConsts, may need to move to a better place
-  public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
-  // 16 MB by default
-  public static final int OZONE_SCM_CHUNK_SIZE_DEFAULT = 16 * 1024 * 1024;
-  public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
-
-  public static final String OZONE_SCM_CLIENT_PORT_KEY =
-      "ozone.scm.client.port";
-  public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
-
-  public static final String OZONE_SCM_DATANODE_PORT_KEY =
-      "ozone.scm.datanode.port";
-  public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
-
-  // OZONE_OM_PORT_DEFAULT = 9862
-  public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
-      "ozone.scm.block.client.port";
-  public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
-
-  // Container service client
-  public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
-      "ozone.scm.client.address";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.client.bind.host";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  // Block service client
-  public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
-      "ozone.scm.block.client.address";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.block.client.bind.host";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
-      "ozone.scm.datanode.address";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
-      "ozone.scm.datanode.bind.host";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_HTTP_ENABLED_KEY =
-      "ozone.scm.http.enabled";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
-      "ozone.scm.http-bind-host";
-  public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
-      "ozone.scm.https-bind-host";
-  public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
-      "ozone.scm.http-address";
-  public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
-      "ozone.scm.https-address";
-  public static final String OZONE_SCM_KEYTAB_FILE =
-      "ozone.scm.keytab.file";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
-  public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
-
-  public static final String HDDS_REST_HTTP_ADDRESS_KEY =
-      "hdds.rest.http-address";
-  public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
-  public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
-  public static final String HDDS_REST_CSRF_ENABLED_KEY =
-      "hdds.rest.rest-csrf.enabled";
-  public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
-  public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
-      "hdds.rest.netty.high.watermark";
-  public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
-  public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
-  public static final String HDDS_REST_NETTY_LOW_WATERMARK =
-      "hdds.rest.netty.low.watermark";
-
-  public static final String OZONE_SCM_HANDLER_COUNT_KEY =
-      "ozone.scm.handler.count.key";
-  public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
-
-  public static final String OZONE_SCM_DEADNODE_INTERVAL =
-      "ozone.scm.dead.node.interval";
-  public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
-      "10m";
-
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
-      "ozone.scm.heartbeat.thread.interval";
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
-      "3s";
-
-  public static final String OZONE_SCM_STALENODE_INTERVAL =
-      "ozone.scm.stale.node.interval";
-  public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-      "90s";
-
-  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
-      "ozone.scm.heartbeat.rpc-timeout";
-  public static final long OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
-      1000;
-
-  /**
-   * Defines how frequently we will log the missing of heartbeat to a specific
-   * SCM. In the default case we will write a warning message for each 10
-   * sequential heart beats that we miss to a specific SCM. This is to avoid
-   * overrunning the log with lots of HB missed Log statements.
-   */
-  public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
-      "ozone.scm.heartbeat.log.warn.interval.count";
-  public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
-      10;
-
-  // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
-  // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
-  //
-  // If this key is not specified datanodes will not be able to find
-  // SCM. The SCM membership can be dynamic, so this key should contain
-  // all possible SCM names. Once the SCM leader is discovered datanodes will
-  // get the right list of SCMs to heartbeat to from the leader.
-  // While it is good for the datanodes to know the names of all SCM nodes,
-  // it is sufficient to actually know the name of on working SCM. That SCM
-  // will be able to return the information about other SCMs that are part of
-  // the SCM replicated Log.
-  //
-  //In case of a membership change, any one of the SCM machines will be
-  // able to send back a new list to the datanodes.
-  public static final String OZONE_SCM_NAMES = "ozone.scm.names";
-
-  public static final int OZONE_SCM_DEFAULT_PORT =
-      OZONE_SCM_DATANODE_PORT_DEFAULT;
-  // File Name and path where datanode ID is to written to.
-  // if this value is not set then container startup will fail.
-  public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id";
-
-  public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = 
"datanode.id";
-
-  public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
-      "ozone.scm.db.cache.size.mb";
-  public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_SCM_CONTAINER_SIZE =
-      "ozone.scm.container.size";
-  public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB";
-
-  public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
-      "ozone.scm.container.placement.impl";
-
-  public static final String OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE =
-      "ozone.scm.container.provision_batch_size";
-  public static final int OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT = 
20;
-
-  public static final String
-      OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY =
-      "ozone.scm.keyvalue.container.deletion-choosing.policy";
-
-  public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.container.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.pipeline.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
-      "ozone.scm.block.deletion.max.retry";
-  public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT =
-      "hdds.scm.watcher.timeout";
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
-      "10m";
-
-  /**
-   * Never constructed.
-   */
-  private ScmConfigKeys() {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
deleted file mode 100644
index 6236feb..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * ScmInfo wraps the result returned from SCM#getScmInfo which
- * contains clusterId and the SCM Id.
- */
-public final class ScmInfo {
-  private String clusterId;
-  private String scmId;
-
-  /**
-   * Builder for ScmInfo.
-   */
-  public static class Builder {
-    private String clusterId;
-    private String scmId;
-
-    /**
-     * sets the cluster id.
-     * @param cid clusterId to be set
-     * @return Builder for ScmInfo
-     */
-    public Builder setClusterId(String cid) {
-      this.clusterId = cid;
-      return this;
-    }
-
-    /**
-     * sets the scmId.
-     * @param id scmId
-     * @return Builder for scmInfo
-     */
-    public Builder setScmId(String id) {
-      this.scmId = id;
-      return this;
-    }
-
-    public ScmInfo build() {
-      return new ScmInfo(clusterId, scmId);
-    }
-  }
-
-  private ScmInfo(String clusterId, String scmId) {
-    this.clusterId = clusterId;
-    this.scmId = scmId;
-  }
-
-  /**
-   * Gets the clusterId from the Version file.
-   * @return ClusterId
-   */
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * Gets the SCM Id from the Version file.
-   * @return SCM Id
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
deleted file mode 100644
index 571d148..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public abstract class XceiverClientSpi implements Closeable {
-
-  final private AtomicInteger referenceCount;
-  private boolean isEvicted;
-
-  XceiverClientSpi() {
-    this.referenceCount = new AtomicInteger(0);
-    this.isEvicted = false;
-  }
-
-  void incrementReference() {
-    this.referenceCount.incrementAndGet();
-  }
-
-  void decrementReference() {
-    this.referenceCount.decrementAndGet();
-    cleanup();
-  }
-
-  void setEvicted() {
-    isEvicted = true;
-    cleanup();
-  }
-
-  // close the xceiverClient only if,
-  // 1) there is no refcount on the client
-  // 2) it has been evicted from the cache.
-  private void cleanup() {
-    if (referenceCount.get() == 0 && isEvicted) {
-      close();
-    }
-  }
-
-  @VisibleForTesting
-  public int getRefcount() {
-    return referenceCount.get();
-  }
-
-  /**
-   * Connects to the leader in the pipeline.
-   */
-  public abstract void connect() throws Exception;
-
-  @Override
-  public abstract void close();
-
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
-  public abstract Pipeline getPipeline();
-
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException {
-    try {
-      return sendCommandAsync(request).get();
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to command " + request, e);
-    }
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public abstract CompletableFuture<ContainerCommandResponseProto>
-      sendCommandAsync(ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException;
-
-  /**
-   * Create a pipeline.
-   */
-  public abstract void createPipeline() throws IOException;
-
-  /**
-   * Destroy a pipeline.
-   * @throws IOException
-   */
-  public abstract void destroyPipeline() throws IOException;
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - {Stand_Alone, Ratis or Chained}
-   */
-  public abstract HddsProtos.ReplicationType getPipelineType();
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to