http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
deleted file mode 100644
index 8acca8a..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeySpaceManagerService;
-
-/**
- * Protocol used to communicate with KSM.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface KeySpaceManagerProtocolPB
-    extends KeySpaceManagerService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
deleted file mode 100644
index 67f9f7b..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.protocolPB;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index 764ff3c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-/**
- This package contains ozone client side libraries.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
deleted file mode 100644
index fdc3ce7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
-
-/**
- * Utilities for converting protobuf classes.
- */
-public final class KSMPBHelper {
-
-  private KSMPBHelper() {
-    /** Hidden constructor */
-  }
-
-  /**
-   * Converts OzoneAcl into protobuf's OzoneAclInfo.
-   * @return OzoneAclInfo
-   */
-  public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
-    OzoneAclInfo.OzoneAclType aclType;
-    switch(acl.getType()) {
-    case USER:
-      aclType = OzoneAclType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAclType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAclType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAclInfo.OzoneAclRights aclRights;
-    switch(acl.getRights()) {
-    case READ:
-      aclRights = OzoneAclRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAclRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAclRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return OzoneAclInfo.newBuilder().setType(aclType)
-        .setName(acl.getName())
-        .setRights(aclRights)
-        .build();
-  }
-
-  /**
-   * Converts protobuf's OzoneAclInfo into OzoneAcl.
-   * @return OzoneAcl
-   */
-  public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
-    OzoneAcl.OzoneACLType aclType;
-    switch(aclInfo.getType()) {
-    case USER:
-      aclType = OzoneAcl.OzoneACLType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAcl.OzoneACLType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAcl.OzoneACLType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAcl.OzoneACLRights aclRights;
-    switch(aclInfo.getRights()) {
-    case READ:
-      aclRights = OzoneAcl.OzoneACLRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 860386d..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * This package contains classes for the Protocol Buffers binding of Ozone
- * protocols.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerID.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerID.java
deleted file mode 100644
index a51d3b7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerID.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.scm.container.ContainerStates;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.math3.util.MathUtils;
-
-/**
- * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
- * <p>
- * We are creating a specific type for this to avoid mixing this with
- * normal integers in code.
- */
-public class ContainerID implements Comparable {
-
-  private final long id;
-
-  /**
-   * Constructs ContainerID.
-   *
-   * @param id int
-   */
-  public ContainerID(long id) {
-    Preconditions.checkState(id > 0,
-        "Container ID should be a positive int");
-    this.id = id;
-  }
-
-  /**
-   * Factory method for creation of ContainerID.
-   * @param containerID  long
-   * @return ContainerID.
-   */
-  public static ContainerID valueof(long containerID) {
-    Preconditions.checkState(containerID > 0);
-    return new ContainerID(containerID);
-  }
-
-  /**
-   * Returns int representation of ID.
-   *
-   * @return int
-   */
-  public long getId() {
-    return id;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerID that = (ContainerID) o;
-
-    return id == that.id;
-  }
-
-  @Override
-  public int hashCode() {
-    return MathUtils.hash(id);
-  }
-
-  @Override
-  public int compareTo(Object o) {
-    Preconditions.checkNotNull(o);
-    if (o instanceof ContainerID) {
-      return Long.compare(((ContainerID) o).getId(), this.getId());
-    }
-    throw new IllegalArgumentException("Object O, should be an instance " +
-        "of ContainerID");
-  }
-
-  @Override
-  public String toString() {
-    return "id=" + id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java
deleted file mode 100644
index 61f5609..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-/**
- * Container States.
- */
-package org.apache.hadoop.ozone.scm.container.ContainerStates;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
deleted file mode 100644
index fbe9637..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * This class contains constants for configuration keys used in SCM.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ScmConfigKeys {
-
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
-      "scm.container.client.idle.threshold";
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
-      "10s";
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
-      "scm.container.client.max.size";
-  public static final int SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT =
-      256;
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS =
-      "scm.container.client.max.outstanding.requests";
-  public static final int SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT
-      = 100;
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = "dfs.container.ratis.enabled";
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = false;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = "dfs.container.ratis.rpc.type";
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = "GRPC";
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = "dfs.container.ratis.num.write.chunk.threads";
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = 60;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
-      "dfs.container.ratis.segment.size";
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-      1 * 1024 * 1024 * 1024;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
-      "dfs.container.ratis.segment.preallocated.size";
-  public static final int
-      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 
1024;
-
-  // TODO : this is copied from OzoneConsts, may need to move to a better place
-  public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
-  // 16 MB by default
-  public static final int OZONE_SCM_CHUNK_SIZE_DEFAULT = 16 * 1024 * 1024;
-  public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
-
-  public static final String OZONE_SCM_CLIENT_PORT_KEY =
-      "ozone.scm.client.port";
-  public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
-
-  public static final String OZONE_SCM_DATANODE_PORT_KEY =
-      "ozone.scm.datanode.port";
-  public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
-
-  // OZONE_KSM_PORT_DEFAULT = 9862
-  public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
-      "ozone.scm.block.client.port";
-  public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
-
-  // Container service client
-  public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
-      "ozone.scm.client.address";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.client.bind.host";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  // Block service client
-  public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
-      "ozone.scm.block.client.address";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.block.client.bind.host";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
-      "ozone.scm.datanode.address";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
-      "ozone.scm.datanode.bind.host";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_HTTP_ENABLED_KEY =
-      "ozone.scm.http.enabled";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
-      "ozone.scm.http-bind-host";
-  public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
-      "ozone.scm.https-bind-host";
-  public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
-      "ozone.scm.http-address";
-  public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
-      "ozone.scm.https-address";
-  public static final String OZONE_SCM_KEYTAB_FILE =
-      "ozone.scm.keytab.file";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
-  public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
-
-
-  public static final String OZONE_SCM_HANDLER_COUNT_KEY =
-      "ozone.scm.handler.count.key";
-  public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
-
-  public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
-      "ozone.scm.heartbeat.interval";
-  public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
-      "30s";
-
-  public static final String OZONE_SCM_DEADNODE_INTERVAL =
-      "ozone.scm.dead.node.interval";
-  public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
-      "10m";
-
-  public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS =
-      "ozone.scm.max.hb.count.to.process";
-  public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000;
-
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
-      "ozone.scm.heartbeat.thread.interval";
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
-      "3s";
-
-  public static final String OZONE_SCM_STALENODE_INTERVAL =
-      "ozone.scm.stale.node.interval";
-  public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-      "90s";
-
-  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
-      "ozone.scm.heartbeat.rpc-timeout";
-  public static final long OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
-      1000;
-
-  /**
-   * Defines how frequently we will log the missing of heartbeat to a specific
-   * SCM. In the default case we will write a warning message for each 10
-   * sequential heart beats that we miss to a specific SCM. This is to avoid
-   * overrunning the log with lots of HB missed Log statements.
-   */
-  public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
-      "ozone.scm.heartbeat.log.warn.interval.count";
-  public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
-      10;
-
-  // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
-  // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
-  //
-  // If this key is not specified datanodes will not be able to find
-  // SCM. The SCM membership can be dynamic, so this key should contain
-  // all possible SCM names. Once the SCM leader is discovered datanodes will
-  // get the right list of SCMs to heartbeat to from the leader.
-  // While it is good for the datanodes to know the names of all SCM nodes,
-  // it is sufficient to actually know the name of on working SCM. That SCM
-  // will be able to return the information about other SCMs that are part of
-  // the SCM replicated Log.
-  //
-  //In case of a membership change, any one of the SCM machines will be
-  // able to send back a new list to the datanodes.
-  public static final String OZONE_SCM_NAMES = "ozone.scm.names";
-
-  public static final int OZONE_SCM_DEFAULT_PORT =
-      OZONE_SCM_DATANODE_PORT_DEFAULT;
-  // File Name and path where datanode ID is to written to.
-  // if this value is not set then container startup will fail.
-  public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id";
-
-  public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = 
"datanode.id";
-
-  public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
-      "ozone.scm.db.cache.size.mb";
-  public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_SCM_CONTAINER_SIZE_GB =
-      "ozone.scm.container.size.gb";
-  public static final int OZONE_SCM_CONTAINER_SIZE_DEFAULT = 5;
-
-  public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
-      "ozone.scm.container.placement.impl";
-
-  public static final String OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE =
-      "ozone.scm.container.provision_batch_size";
-  public static final int OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT = 
20;
-
-  public static final String OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY =
-      "ozone.scm.container.deletion-choosing.policy";
-
-  public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.container.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  /**
-   * Don't start processing a pool if we have not had a minimum number of
-   * seconds from the last processing.
-   */
-  public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
-      "ozone.scm.container.report.processing.interval";
-  public static final String
-      OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
-
-  /**
-   * This determines the total number of pools to be processed in parallel.
-   */
-  public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
-      "ozone.scm.max.nodepool.processing.threads";
-  public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 
1;
-  /**
-   * These 2 settings control the number of threads in executor pool and time
-   * outs for thw container reports from all nodes.
-   */
-  public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
-      "ozone.scm.max.container.report.threads";
-  public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
-  public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
-      "ozone.scm.container.reports.wait.timeout";
-  public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
-      "5m";
-
-  public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
-      "ozone.scm.block.deletion.max.retry";
-  public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
-
-  // Once a container usage crosses this threshold, it is eligible for
-  // closing.
-  public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
-      "ozone.scm.container.close.threshold";
-  public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
-
-  /**
-   * Never constructed.
-   */
-  private ScmConfigKeys() {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmInfo.java
deleted file mode 100644
index e442fe2..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.scm;
-
-/**
- * ScmInfo wraps the result returned from SCM#getScmInfo which
- * contains clusterId and the SCM Id.
- */
-public final class ScmInfo {
-  private String clusterId;
-  private String scmId;
-
-  /**
-   * Builder for ScmInfo.
-   */
-  public static class Builder {
-    private String clusterId;
-    private String scmId;
-
-    /**
-     * sets the cluster id.
-     * @param cid clusterId to be set
-     * @return Builder for ScmInfo
-     */
-    public Builder setClusterId(String cid) {
-      this.clusterId = cid;
-      return this;
-    }
-
-    /**
-     * sets the scmId.
-     * @param id scmId
-     * @return Builder for scmInfo
-     */
-    public Builder setScmId(String id) {
-      this.scmId = id;
-      return this;
-    }
-
-    public ScmInfo build() {
-      return new ScmInfo(clusterId, scmId);
-    }
-  }
-
-  private ScmInfo(String clusterId, String scmId) {
-    this.clusterId = clusterId;
-    this.scmId = scmId;
-  }
-
-  /**
-   * Gets the clusterId from the Version file.
-   * @return ClusterId
-   */
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * Gets the SCM Id from the Version file.
-   * @return SCM Id
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
deleted file mode 100644
index bde9064..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import io.netty.bootstrap.Bootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.List;
-import java.util.concurrent.Semaphore;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public class XceiverClient extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
-  private final Pipeline pipeline;
-  private final Configuration config;
-  private Channel channel;
-  private Bootstrap b;
-  private EventLoopGroup group;
-  private final Semaphore semaphore;
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   *
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config -- Ozone Config
-   */
-  public XceiverClient(Pipeline pipeline, Configuration config) {
-    super();
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkNotNull(config);
-    this.pipeline = pipeline;
-    this.config = config;
-    this.semaphore =
-        new Semaphore(OzoneClientUtils.getMaxOutstandingRequests(config));
-  }
-
-  @Override
-  public void connect() throws Exception {
-    if (channel != null && channel.isActive()) {
-      throw new IOException("This client is already connected to a host.");
-    }
-
-    group = new NioEventLoopGroup();
-    b = new Bootstrap();
-    b.group(group)
-        .channel(NioSocketChannel.class)
-        .handler(new LoggingHandler(LogLevel.INFO))
-        .handler(new XceiverClientInitializer(this.pipeline, semaphore));
-    DatanodeID leader = this.pipeline.getLeader();
-
-    // read port from the data node, on failure use default configured
-    // port.
-    int port = leader.getContainerPort();
-    if (port == 0) {
-      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    }
-    LOG.debug("Connecting to server Port : " + port);
-    channel = b.connect(leader.getHostName(), port).sync().channel();
-  }
-
-  /**
-   * Returns if the exceiver client connects to a server.
-   *
-   * @return True if the connection is alive, false otherwise.
-   */
-  @VisibleForTesting
-  public boolean isConnected() {
-    return channel.isActive();
-  }
-
-  @Override
-  public void close() {
-    if (group != null) {
-      group.shutdownGracefully().awaitUninterruptibly();
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  @Override
-  public ContainerProtos.ContainerCommandResponseProto sendCommand(
-      ContainerProtos.ContainerCommandRequestProto request) throws IOException 
{
-    try {
-      if ((channel == null) || (!channel.isActive())) {
-        throw new IOException("This channel is not connected.");
-      }
-      XceiverClientHandler handler =
-          channel.pipeline().get(XceiverClientHandler.class);
-
-      return handler.sendCommand(request);
-    } catch (ExecutionException | InterruptedException e) {
-      /**
-       * In case the netty channel handler throws an exception,
-       * the exception thrown will be wrapped within {@link 
ExecutionException}.
-       * Unwarpping here so that original exception gets passed
-       * to to the client.
-       */
-      if (e instanceof ExecutionException) {
-        Throwable cause = e.getCause();
-        if (cause instanceof IOException) {
-          throw (IOException) cause;
-        }
-      }
-      throw new IOException(
-          "Unexpected exception during execution:" + e.getMessage());
-    }
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  @Override
-  public CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
-      sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException {
-    if ((channel == null) || (!channel.isActive())) {
-      throw new IOException("This channel is not connected.");
-    }
-    XceiverClientHandler handler =
-        channel.pipeline().get(XceiverClientHandler.class);
-    return handler.sendCommandAsync(request);
-  }
-
-  /**
-   * Create a pipeline.
-   *
-   * @param pipelineID - Name of the pipeline.
-   * @param datanodes - Datanodes
-   */
-  @Override
-  public void createPipeline(String pipelineID, List<DatanodeID> datanodes)
-      throws IOException {
-    // For stand alone pipeline, there is no notion called setup pipeline.
-    return;
-  }
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - Stand Alone as the type.
-   */
-  @Override
-  public OzoneProtos.ReplicationType getPipelineType() {
-    return OzoneProtos.ReplicationType.STAND_ALONE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
deleted file mode 100644
index ac2cf1a..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.scm;
-
-import com.google.common.base.Preconditions;
-
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Iterator;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.Semaphore;
-
-/**
- * Netty client handler.
- */
-public class XceiverClientHandler extends
-    SimpleChannelInboundHandler<ContainerCommandResponseProto> {
-
-  static final Logger LOG = 
LoggerFactory.getLogger(XceiverClientHandler.class);
-  private final ConcurrentMap<String, ResponseFuture> responses =
-      new ConcurrentHashMap<>();
-
-  private final Pipeline pipeline;
-  private volatile Channel channel;
-  private XceiverClientMetrics metrics;
-  private final Semaphore semaphore;
-
-  /**
-   * Constructs a client that can communicate to a container server.
-   */
-  public XceiverClientHandler(Pipeline pipeline, Semaphore semaphore) {
-    super(false);
-    Preconditions.checkNotNull(pipeline);
-    this.pipeline = pipeline;
-    this.metrics = XceiverClientManager.getXceiverClientMetrics();
-    this.semaphore = semaphore;
-  }
-
-  /**
-   * <strong>Please keep in mind that this method will be renamed to {@code
-   * messageReceived(ChannelHandlerContext, I)} in 5.0.</strong>
-   * <p>
-   * Is called for each message of type {@link ContainerProtos
-   * .ContainerCommandResponseProto}.
-   *
-   * @param ctx the {@link ChannelHandlerContext} which this {@link
-   * SimpleChannelInboundHandler} belongs to
-   * @param msg the message to handle
-   * @throws Exception is thrown if an error occurred
-   */
-  @Override
-  public void channelRead0(ChannelHandlerContext ctx,
-      ContainerProtos.ContainerCommandResponseProto msg)
-      throws Exception {
-    Preconditions.checkNotNull(msg);
-    metrics.decrPendingContainerOpsMetrics(msg.getCmdType());
-
-    String key = msg.getTraceID();
-    ResponseFuture response = responses.remove(key);
-    semaphore.release();
-
-    if (response != null) {
-      response.getFuture().complete(msg);
-
-      long requestTime = response.getRequestTime();
-      metrics.addContainerOpsLatency(msg.getCmdType(),
-          Time.monotonicNowNanos() - requestTime);
-    } else {
-      LOG.error("A reply received for message that was not queued. trace " +
-          "ID: {}", msg.getTraceID());
-    }
-  }
-
-  @Override
-  public void channelRegistered(ChannelHandlerContext ctx) {
-    LOG.debug("channelRegistered: Connected to ctx");
-    channel = ctx.channel();
-  }
-
-  @Override
-  public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
-    LOG.info("Exception in client " + cause.toString());
-    Iterator<String> keyIterator = responses.keySet().iterator();
-    while (keyIterator.hasNext()) {
-      ResponseFuture response = responses.remove(keyIterator.next());
-      response.getFuture().completeExceptionally(cause);
-      semaphore.release();
-    }
-    ctx.close();
-  }
-
-  /**
-   * Since netty is async, we send a work request and then wait until a 
response
-   * appears in the reply queue. This is simple sync interface for clients. we
-   * should consider building async interfaces for client if this turns out to
-   * be a performance bottleneck.
-   *
-   * @param request - request.
-   * @return -- response
-   */
-
-  public ContainerCommandResponseProto sendCommand(
-      ContainerProtos.ContainerCommandRequestProto request)
-      throws ExecutionException, InterruptedException {
-    Future<ContainerCommandResponseProto> future = sendCommandAsync(request);
-    return future.get();
-  }
-
-  /**
-   * SendCommandAsyc queues a command to the Netty Subsystem and returns a
-   * CompletableFuture. This Future is marked compeleted in the channelRead0
-   * when the call comes back.
-   * @param request - Request to execute
-   * @return CompletableFuture
-   */
-  public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
-      ContainerProtos.ContainerCommandRequestProto request)
-      throws InterruptedException {
-
-    // Throw an exception of request doesn't have traceId
-    if (StringUtils.isEmpty(request.getTraceID())) {
-      throw new IllegalArgumentException("Invalid trace ID");
-    }
-
-    // Setting the datanode ID in the commands, so that we can distinguish
-    // commands when the cluster simulator is running.
-    if(!request.hasDatanodeID()) {
-      throw new IllegalArgumentException("Invalid Datanode ID");
-    }
-
-    metrics.incrPendingContainerOpsMetrics(request.getCmdType());
-
-    CompletableFuture<ContainerCommandResponseProto> future
-        = new CompletableFuture<>();
-    ResponseFuture response = new ResponseFuture(future,
-        Time.monotonicNowNanos());
-    semaphore.acquire();
-    ResponseFuture previous = responses.putIfAbsent(
-        request.getTraceID(), response);
-    if (previous != null) {
-      LOG.error("Command with Trace already exists. Ignoring this command. " +
-              "{}. Previous Command: {}", request.getTraceID(),
-          previous.toString());
-      throw new IllegalStateException("Duplicate trace ID. Command with this " 
+
-          "trace ID is already executing. Please ensure that " +
-          "trace IDs are not reused. ID: " + request.getTraceID());
-    }
-
-    channel.writeAndFlush(request);
-    return response.getFuture();
-  }
-
-  /**
-   * Class wraps response future info.
-   */
-  static class ResponseFuture {
-    private final long requestTime;
-    private final CompletableFuture<ContainerCommandResponseProto> future;
-
-    ResponseFuture(CompletableFuture<ContainerCommandResponseProto> future,
-        long requestTime) {
-      this.future = future;
-      this.requestTime = requestTime;
-    }
-
-    public long getRequestTime() {
-      return requestTime;
-    }
-
-    public CompletableFuture<ContainerCommandResponseProto> getFuture() {
-      return future;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
deleted file mode 100644
index 6aac960..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.scm;
-
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-
-import java.util.concurrent.Semaphore;
-
-/**
- * Setup the netty pipeline.
- */
-public class XceiverClientInitializer extends
-    ChannelInitializer<SocketChannel> {
-  private final Pipeline pipeline;
-  private final Semaphore semaphore;
-
-  /**
-   * Constructs an Initializer for the client pipeline.
-   * @param pipeline  - Pipeline.
-   */
-  public XceiverClientInitializer(Pipeline pipeline, Semaphore semaphore) {
-    this.pipeline = pipeline;
-    this.semaphore = semaphore;
-  }
-
-  /**
-   * This method will be called once when the Channel is registered. After
-   * the method returns this instance will be removed from the
-   * ChannelPipeline of the Channel.
-   *
-   * @param ch   Channel which was registered.
-   * @throws Exception is thrown if an error occurs. In that case the
-   *                   Channel will be closed.
-   */
-  @Override
-  protected void initChannel(SocketChannel ch) throws Exception {
-    ChannelPipeline p = ch.pipeline();
-
-    p.addLast(new ProtobufVarint32FrameDecoder());
-    p.addLast(new ProtobufDecoder(ContainerProtos
-        .ContainerCommandResponseProto.getDefaultInstance()));
-
-    p.addLast(new ProtobufVarint32LengthFieldPrepender());
-    p.addLast(new ProtobufEncoder());
-
-    p.addLast(new XceiverClientHandler(this.pipeline, this.semaphore));
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
deleted file mode 100644
index 161cdce..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.scm;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.Callable;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneProtos
-    .ReplicationType.RATIS;
-
-/**
- * XceiverClientManager is responsible for the lifecycle of XceiverClient
- * instances.  Callers use this class to acquire an XceiverClient instance
- * connected to the desired container pipeline.  When done, the caller also 
uses
- * this class to release the previously acquired XceiverClient instance.
- *
- *
- * This class caches connection to container for reuse purpose, such that
- * accessing same container frequently will be through the same connection
- * without reestablishing connection. But the connection will be closed if
- * not being used for a period of time.
- */
-public class XceiverClientManager implements Closeable {
-
-  //TODO : change this to SCM configuration class
-  private final Configuration conf;
-  private final Cache<String, XceiverClientSpi> clientCache;
-  private final boolean useRatis;
-
-  private static XceiverClientMetrics metrics;
-  /**
-   * Creates a new XceiverClientManager.
-   *
-   * @param conf configuration
-   */
-  public XceiverClientManager(Configuration conf) {
-    Preconditions.checkNotNull(conf);
-    int maxSize = conf.getInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
-        SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT);
-    long staleThresholdMs = conf.getTimeDuration(
-        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY,
-        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT, TimeUnit.MILLISECONDS);
-    this.useRatis = conf.getBoolean(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    this.conf = conf;
-    this.clientCache = CacheBuilder.newBuilder()
-        .expireAfterAccess(staleThresholdMs, TimeUnit.MILLISECONDS)
-        .maximumSize(maxSize)
-        .removalListener(
-            new RemovalListener<String, XceiverClientSpi>() {
-            @Override
-            public void onRemoval(
-                RemovalNotification<String, XceiverClientSpi>
-                  removalNotification) {
-              synchronized (clientCache) {
-                // Mark the entry as evicted
-                XceiverClientSpi info = removalNotification.getValue();
-                info.setEvicted();
-              }
-            }
-          }).build();
-  }
-
-  @VisibleForTesting
-  public Cache<String, XceiverClientSpi> getClientCache() {
-    return clientCache;
-  }
-
-  /**
-   * Acquires a XceiverClientSpi connected to a container capable of
-   * storing the specified key.
-   *
-   * If there is already a cached XceiverClientSpi, simply return
-   * the cached otherwise create a new one.
-   *
-   * @param pipeline the container pipeline for the client connection
-   * @return XceiverClientSpi connected to a container
-   * @throws IOException if a XceiverClientSpi cannot be acquired
-   */
-  public XceiverClientSpi acquireClient(Pipeline pipeline)
-      throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkArgument(pipeline.getMachines() != null);
-    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
-
-    synchronized (clientCache) {
-      XceiverClientSpi info = getClient(pipeline);
-      info.incrementReference();
-      return info;
-    }
-  }
-
-  /**
-   * Releases a XceiverClientSpi after use.
-   *
-   * @param client client to release
-   */
-  public void releaseClient(XceiverClientSpi client) {
-    Preconditions.checkNotNull(client);
-    synchronized (clientCache) {
-      client.decrementReference();
-    }
-  }
-
-  private XceiverClientSpi getClient(Pipeline pipeline)
-      throws IOException {
-    String containerName = pipeline.getContainerName();
-    try {
-      return clientCache.get(containerName,
-          new Callable<XceiverClientSpi>() {
-          @Override
-          public XceiverClientSpi call() throws Exception {
-            XceiverClientSpi client = pipeline.getType() == RATIS ?
-                    XceiverClientRatis.newXceiverClientRatis(pipeline, conf)
-                    : new XceiverClient(pipeline, conf);
-            client.connect();
-            return client;
-          }
-        });
-    } catch (Exception e) {
-      throw new IOException(
-          "Exception getting XceiverClient: " + e.toString(), e);
-    }
-  }
-
-  /**
-   * Close and remove all the cached clients.
-   */
-  public void close() {
-    //closing is done through RemovalListener
-    clientCache.invalidateAll();
-    clientCache.cleanUp();
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-  }
-
-  /**
-   * Tells us if Ratis is enabled for this cluster.
-   * @return True if Ratis is enabled.
-   */
-  public boolean isUseRatis() {
-    return useRatis;
-  }
-
-  /**
-   * Returns hard coded 3 as replication factor.
-   * @return 3
-   */
-  public  OzoneProtos.ReplicationFactor getFactor() {
-    if(isUseRatis()) {
-      return OzoneProtos.ReplicationFactor.THREE;
-    }
-    return OzoneProtos.ReplicationFactor.ONE;
-  }
-
-  /**
-   * Returns the default replication type.
-   * @return Ratis or Standalone
-   */
-  public OzoneProtos.ReplicationType getType() {
-    // TODO : Fix me and make Ratis default before release.
-    // TODO: Remove this as replication factor and type are pipeline properties
-    if(isUseRatis()) {
-      return OzoneProtos.ReplicationType.RATIS;
-    }
-    return OzoneProtos.ReplicationType.STAND_ALONE;
-  }
-
-  /**
-   * Get xceiver client metric.
-   */
-  public synchronized static XceiverClientMetrics getXceiverClientMetrics() {
-    if (metrics == null) {
-      metrics = XceiverClientMetrics.create();
-    }
-
-    return metrics;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java
deleted file mode 100644
index 6359db1..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- * The client metrics for the Storage Container protocol.
- */
-@InterfaceAudience.Private
-@Metrics(about = "Storage Container Client Metrics", context = "dfs")
-public class XceiverClientMetrics {
-  public static final String SOURCE_NAME = XceiverClientMetrics.class
-      .getSimpleName();
-
-  private @Metric MutableCounterLong pendingOps;
-  private MutableCounterLong[] pendingOpsArray;
-  private MutableRate[] containerOpsLatency;
-  private MetricsRegistry registry;
-
-  public XceiverClientMetrics() {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    this.registry = new MetricsRegistry(SOURCE_NAME);
-
-    this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
-    this.containerOpsLatency = new MutableRate[numEnumEntries];
-    for (int i = 0; i < numEnumEntries; i++) {
-      pendingOpsArray[i] = registry.newCounter(
-          "numPending" + ContainerProtos.Type.valueOf(i + 1),
-          "number of pending" + ContainerProtos.Type.valueOf(i + 1) + " ops",
-          (long) 0);
-
-      containerOpsLatency[i] = registry.newRate(
-          ContainerProtos.Type.valueOf(i + 1) + "Latency",
-          "latency of " + ContainerProtos.Type.valueOf(i + 1)
-          + " ops");
-    }
-  }
-
-  public static XceiverClientMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "Storage Container Client Metrics",
-        new XceiverClientMetrics());
-  }
-
-  public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr();
-    pendingOpsArray[type.ordinal()].incr();
-  }
-
-  public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr(-1);
-    pendingOpsArray[type.ordinal()].incr(-1);
-  }
-
-  public void addContainerOpsLatency(ContainerProtos.Type type,
-      long latencyNanos) {
-    containerOpsLatency[type.ordinal()].add(latencyNanos);
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type) {
-    return pendingOpsArray[type.ordinal()].value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
deleted file mode 100644
index 3bc70ed..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.scm;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.conf.Configuration;
-import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.apache.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * An abstract implementation of {@link XceiverClientSpi} using Ratis.
- * The underlying RPC mechanism can be chosen via the constructor.
- */
-public final class XceiverClientRatis extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClientRatis.class);
-
-  public static XceiverClientRatis newXceiverClientRatis(
-      Pipeline pipeline, Configuration ozoneConf) {
-    final String rpcType = ozoneConf.get(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final int maxOutstandingRequests =
-        OzoneClientUtils.getMaxOutstandingRequests(ozoneConf);
-    return new XceiverClientRatis(pipeline,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests);
-  }
-
-  private final Pipeline pipeline;
-  private final RpcType rpcType;
-  private final AtomicReference<RaftClient> client = new AtomicReference<>();
-  private final int maxOutstandingRequests;
-
-  /**
-   * Constructs a client.
-   */
-  private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
-      int maxOutStandingChunks) {
-    super();
-    this.pipeline = pipeline;
-    this.rpcType = rpcType;
-    this.maxOutstandingRequests = maxOutStandingChunks;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public void createPipeline(String clusterId, List<DatanodeID> datanodes)
-      throws IOException {
-    RaftGroup group = RatisHelper.newRaftGroup(datanodes);
-    LOG.debug("initializing pipeline:{} with nodes:{}", clusterId,
-        group.getPeers());
-    reinitialize(datanodes, group);
-  }
-
-  /**
-   * Returns Ratis as pipeline Type.
-   *
-   * @return - Ratis
-   */
-  @Override
-  public OzoneProtos.ReplicationType getPipelineType() {
-    return OzoneProtos.ReplicationType.RATIS;
-  }
-
-  private void reinitialize(List<DatanodeID> datanodes, RaftGroup group)
-      throws IOException {
-    if (datanodes.isEmpty()) {
-      return;
-    }
-
-    IOException exception = null;
-    for (DatanodeID d : datanodes) {
-      try {
-        reinitialize(d, group);
-      } catch (IOException ioe) {
-        if (exception == null) {
-          exception = new IOException(
-              "Failed to reinitialize some of the RaftPeer(s)", ioe);
-        } else {
-          exception.addSuppressed(ioe);
-        }
-      }
-    }
-    if (exception != null) {
-      throw exception;
-    }
-  }
-
-  /**
-   * Adds a new peers to the Ratis Ring.
-   *
-   * @param datanode - new datanode
-   * @param group    - Raft group
-   * @throws IOException - on Failure.
-   */
-  private void reinitialize(DatanodeID datanode, RaftGroup group)
-      throws IOException {
-    final RaftPeer p = RatisHelper.toRaftPeer(datanode);
-    try (RaftClient client = RatisHelper.newRaftClient(rpcType, p)) {
-      client.reinitialize(group, p.getId());
-    } catch (IOException ioe) {
-      LOG.error("Failed to reinitialize RaftPeer:{} datanode: {}  ",
-          p, datanode, ioe);
-      throw new IOException("Failed to reinitialize RaftPeer " + p
-          + "(datanode=" + datanode + ")", ioe);
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  @Override
-  public void connect() throws Exception {
-    LOG.debug("Connecting to pipeline:{} leader:{}",
-        getPipeline().getPipelineName(),
-        RatisHelper.toRaftPeerId(pipeline.getLeader()));
-    // TODO : XceiverClient ratis should pass the config value of
-    // maxOutstandingRequests so as to set the upper bound on max no of async
-    // requests to be handled by raft client
-    if (!client.compareAndSet(null,
-        RatisHelper.newRaftClient(rpcType, getPipeline()))) {
-      throw new IllegalStateException("Client is already connected.");
-    }
-  }
-
-  @Override
-  public void close() {
-    final RaftClient c = client.getAndSet(null);
-    if (c != null) {
-      try {
-        c.close();
-      } catch (IOException e) {
-        throw new IllegalStateException(e);
-      }
-    }
-  }
-
-  private RaftClient getClient() {
-    return Objects.requireNonNull(client.get(), "client is null");
-  }
-
-  private boolean isReadOnly(ContainerCommandRequestProto proto) {
-    switch (proto.getCmdType()) {
-    case ReadContainer:
-    case ReadChunk:
-    case ListKey:
-    case GetKey:
-    case GetSmallFile:
-    case ListContainer:
-    case ListChunk:
-      return true;
-    case CloseContainer:
-    case WriteChunk:
-    case UpdateContainer:
-    case CompactChunk:
-    case CreateContainer:
-    case DeleteChunk:
-    case DeleteContainer:
-    case DeleteKey:
-    case PutKey:
-    case PutSmallFile:
-    default:
-      return false;
-    }
-  }
-
-  private RaftClientReply sendRequest(ContainerCommandRequestProto request)
-      throws IOException {
-    boolean isReadOnlyRequest = isReadOnly(request);
-    ByteString byteString =
-        ShadedProtoUtil.asShadedByteString(request.toByteArray());
-    LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
-    final RaftClientReply reply =  isReadOnlyRequest ?
-        getClient().sendReadOnly(() -> byteString) :
-        getClient().send(() -> byteString);
-    LOG.debug("reply {} {}", isReadOnlyRequest, reply);
-    return reply;
-  }
-
-  private CompletableFuture<RaftClientReply> sendRequestAsync(
-      ContainerCommandRequestProto request) throws IOException {
-    boolean isReadOnlyRequest = isReadOnly(request);
-    ByteString byteString =
-        ShadedProtoUtil.asShadedByteString(request.toByteArray());
-    LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
-    return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) 
:
-        getClient().sendAsync(() -> byteString);
-  }
-
-  @Override
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException {
-    final RaftClientReply reply = sendRequest(request);
-    Preconditions.checkState(reply.isSuccess());
-    return ContainerCommandResponseProto.parseFrom(
-        ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  @Override
-  public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
-      ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException {
-    return sendRequestAsync(request).whenComplete((reply, e) ->
-          LOG.debug("received reply {} for request: {} exception: {}", request,
-              reply, e))
-        .thenApply(reply -> {
-          try {
-            return ContainerCommandResponseProto.parseFrom(
-                ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
-          } catch (InvalidProtocolBufferException e) {
-            throw new CompletionException(e);
-          }
-        });
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java
deleted file mode 100644
index 3711a51..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public abstract class XceiverClientSpi implements Closeable {
-
-  final private AtomicInteger referenceCount;
-  private boolean isEvicted;
-
-  XceiverClientSpi() {
-    this.referenceCount = new AtomicInteger(0);
-    this.isEvicted = false;
-  }
-
-  void incrementReference() {
-    this.referenceCount.incrementAndGet();
-  }
-
-  void decrementReference() {
-    this.referenceCount.decrementAndGet();
-    cleanup();
-  }
-
-  void setEvicted() {
-    isEvicted = true;
-    cleanup();
-  }
-
-  // close the xceiverClient only if,
-  // 1) there is no refcount on the client
-  // 2) it has been evicted from the cache.
-  private void cleanup() {
-    if (referenceCount.get() == 0 && isEvicted) {
-      close();
-    }
-  }
-
-  @VisibleForTesting
-  public int getRefcount() {
-    return referenceCount.get();
-  }
-
-  /**
-   * Connects to the leader in the pipeline.
-   */
-  public abstract void connect() throws Exception;
-
-  @Override
-  public abstract void close();
-
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
-  public abstract Pipeline getPipeline();
-
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public abstract ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException;
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public abstract CompletableFuture<ContainerCommandResponseProto>
-      sendCommandAsync(ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException;
-
-  /**
-   * Create a pipeline.
-   *
-   * @param pipelineID - Name of the pipeline.
-   * @param datanodes - Datanodes
-   */
-  public abstract void createPipeline(String pipelineID,
-      List<DatanodeID> datanodes) throws IOException;
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - {Stand_Alone, Ratis or Chained}
-   */
-  public abstract OzoneProtos.ReplicationType getPipelineType();
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to