http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
deleted file mode 100644
index 3a5da72..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
-    .ErrorCode;
-
-/**
- * Response to Datanode Register call.
- */
-public class RegisteredCommand {
-  private String datanodeUUID;
-  private String clusterID;
-  private ErrorCode error;
-  private String hostname;
-  private String ipAddress;
-
-  public RegisteredCommand(final ErrorCode error, final String datanodeUUID,
-      final String clusterID) {
-    this(error, datanodeUUID, clusterID, null, null);
-  }
-  public RegisteredCommand(final ErrorCode error, final String datanodeUUID,
-      final String clusterID, final String hostname, final String ipAddress) {
-    this.datanodeUUID = datanodeUUID;
-    this.clusterID = clusterID;
-    this.error = error;
-    this.hostname = hostname;
-    this.ipAddress = ipAddress;
-  }
-
-  /**
-   * Returns a new builder.
-   *
-   * @return - Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Returns datanode UUID.
-   *
-   * @return - Datanode ID.
-   */
-  public String getDatanodeUUID() {
-    return datanodeUUID;
-  }
-
-  /**
-   * Returns cluster ID.
-   *
-   * @return -- ClusterID
-   */
-  public String getClusterID() {
-    return clusterID;
-  }
-
-  /**
-   * Returns ErrorCode.
-   *
-   * @return - ErrorCode
-   */
-  public ErrorCode getError() {
-    return error;
-  }
-
-  /**
-   * Returns the hostname.
-   *
-   * @return - hostname
-   */
-  public String getHostName() {
-    return hostname;
-  }
-
-  /**
-   * Returns the ipAddress of the dataNode.
-   */
-  public String getIpAddress() {
-    return ipAddress;
-  }
-
-  /**
-   * Gets the protobuf message of this object.
-   *
-   * @return A protobuf message.
-   */
-  public byte[] getProtoBufMessage() {
-    SCMRegisteredResponseProto.Builder builder =
-        SCMRegisteredResponseProto.newBuilder()
-            .setClusterID(this.clusterID)
-            .setDatanodeUUID(this.datanodeUUID)
-            .setErrorCode(this.error);
-    if (hostname != null && ipAddress != null) {
-      builder.setHostname(hostname).setIpAddress(ipAddress);
-    }
-    return builder.build().toByteArray();
-  }
-
-  /**
-   * A builder class to verify all values are sane.
-   */
-  public static class Builder {
-    private String datanodeUUID;
-    private String clusterID;
-    private ErrorCode error;
-    private String ipAddress;
-    private String hostname;
-
-    /**
-     * sets UUID.
-     *
-     * @param dnUUID - datanode UUID
-     * @return Builder
-     */
-    public Builder setDatanodeUUID(String dnUUID) {
-      this.datanodeUUID = dnUUID;
-      return this;
-    }
-
-    /**
-     * Create this object from a Protobuf message.
-     *
-     * @param response - RegisteredCmdResponseProto
-     * @return RegisteredCommand
-     */
-    public  RegisteredCommand getFromProtobuf(SCMRegisteredResponseProto
-                                                        response) {
-      Preconditions.checkNotNull(response);
-      if (response.hasHostname() && response.hasIpAddress()) {
-        return new RegisteredCommand(response.getErrorCode(),
-            response.getDatanodeUUID(), response.getClusterID(),
-            response.getHostname(), response.getIpAddress());
-      } else {
-        return new RegisteredCommand(response.getErrorCode(),
-            response.getDatanodeUUID(), response.getClusterID());
-      }
-    }
-
-    /**
-     * Sets cluster ID.
-     *
-     * @param cluster - clusterID
-     * @return Builder
-     */
-    public Builder setClusterID(String cluster) {
-      this.clusterID = cluster;
-      return this;
-    }
-
-    /**
-     * Sets Error code.
-     *
-     * @param errorCode - error code
-     * @return Builder
-     */
-    public Builder setErrorCode(ErrorCode errorCode) {
-      this.error = errorCode;
-      return this;
-    }
-
-    /**
-     * sets the hostname.
-     */
-    public Builder setHostname(String host) {
-      this.hostname = host;
-      return this;
-    }
-
-    public Builder setIpAddress(String ipAddr) {
-      this.ipAddress = ipAddr;
-      return this;
-    }
-
-    /**
-     * Build the command object.
-     *
-     * @return RegisteredCommand
-     */
-    public RegisteredCommand build() {
-      if ((this.error == ErrorCode.success) && (this.datanodeUUID == null
-          || this.datanodeUUID.isEmpty()) || (this.clusterID == null
-          || this.clusterID.isEmpty())) {
-        throw new IllegalArgumentException("On success, RegisteredCommand "
-            + "needs datanodeUUID and ClusterID.");
-      }
-      if (hostname != null && ipAddress != null) {
-        return new RegisteredCommand(this.error, this.datanodeUUID,
-            this.clusterID, this.hostname, this.ipAddress);
-      } else {
-        return new RegisteredCommand(this.error, this.datanodeUUID,
-            this.clusterID);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
deleted file mode 100644
index 8530285..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto
-    .Builder;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-
-import com.google.common.base.Preconditions;
-
-/**
- * SCM command to request replication of a container.
- */
-public class ReplicateContainerCommand
-    extends SCMCommand<ReplicateContainerCommandProto> {
-
-  private final long containerID;
-  private final List<DatanodeDetails> sourceDatanodes;
-
-  public ReplicateContainerCommand(long containerID,
-      List<DatanodeDetails> sourceDatanodes) {
-    super();
-    this.containerID = containerID;
-    this.sourceDatanodes = sourceDatanodes;
-  }
-
-  // Should be called only for protobuf conversion
-  public ReplicateContainerCommand(long containerID,
-      List<DatanodeDetails> sourceDatanodes, long id) {
-    super(id);
-    this.containerID = containerID;
-    this.sourceDatanodes = sourceDatanodes;
-  }
-
-  @Override
-  public Type getType() {
-    return SCMCommandProto.Type.replicateContainerCommand;
-  }
-
-  @Override
-  public byte[] getProtoBufMessage() {
-    return getProto().toByteArray();
-  }
-
-  public ReplicateContainerCommandProto getProto() {
-    Builder builder = ReplicateContainerCommandProto.newBuilder()
-        .setCmdId(getId())
-        .setContainerID(containerID);
-    for (DatanodeDetails dd : sourceDatanodes) {
-      builder.addSources(dd.getProtoBufMessage());
-    }
-    return builder.build();
-  }
-
-  public static ReplicateContainerCommand getFromProtobuf(
-      ReplicateContainerCommandProto protoMessage) {
-    Preconditions.checkNotNull(protoMessage);
-
-    List<DatanodeDetails> datanodeDetails =
-        protoMessage.getSourcesList()
-            .stream()
-            .map(DatanodeDetails::getFromProtoBuf)
-            .collect(Collectors.toList());
-
-    return new ReplicateContainerCommand(protoMessage.getContainerID(),
-        datanodeDetails, protoMessage.getCmdId());
-
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public List<DatanodeDetails> getSourceDatanodes() {
-    return sourceDatanodes;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
deleted file mode 100644
index 09f361d..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
-
-/**
- * Informs a datanode to register itself with SCM again.
- */
-public class ReregisterCommand extends
-    SCMCommand<ReregisterCommandProto>{
-
-  /**
-   * Returns the type of this command.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCommandProto.Type getType() {
-    return SCMCommandProto.Type.reregisterCommand;
-  }
-
-  /**
-   * Gets the protobuf message of this object.
-   *
-   * @return A protobuf message.
-   */
-  @Override
-  public byte[] getProtoBufMessage() {
-    return getProto().toByteArray();
-  }
-
-  /**
-   * Not implemented for ReregisterCommand.
-   *
-   * @return cmdId.
-   */
-  @Override
-  public long getId() {
-    return 0;
-  }
-
-  public ReregisterCommandProto getProto() {
-    return ReregisterCommandProto
-        .newBuilder()
-        .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
deleted file mode 100644
index 5773bf1..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
-
-/**
- * A class that acts as the base class to convert between Java and SCM
- * commands in protobuf format.
- * @param <T>
- */
-public abstract class SCMCommand<T extends GeneratedMessage> implements
-    IdentifiableEventPayload {
-  private long id;
-
-  SCMCommand() {
-    this.id = HddsIdFactory.getLongId();
-  }
-
-  SCMCommand(long id) {
-    this.id = id;
-  }
-  /**
-   * Returns the type of this command.
-   * @return Type
-   */
-  public  abstract SCMCommandProto.Type getType();
-
-  /**
-   * Gets the protobuf message of this object.
-   * @return A protobuf message.
-   */
-  public abstract byte[] getProtoBufMessage();
-
-  /**
-   * Gets the commandId of this object.
-   * @return uuid.
-   */
-  public long getId() {
-    return id;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
deleted file mode 100644
index 7083c1b..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-/**
- Set of classes that help in protoc conversions.
- **/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
deleted file mode 100644
index a718fa7..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocol;
-
-/**
- * This package contains classes for HDDS protocol definitions.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 4e1e27e..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link StorageContainerDatanodeProtocol} interface to the RPC server
- * implementing {@link StorageContainerDatanodeProtocolPB}.
- */
-public class StorageContainerDatanodeProtocolClientSideTranslatorPB
-    implements StorageContainerDatanodeProtocol, ProtocolTranslator, Closeable 
{
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-  private final StorageContainerDatanodeProtocolPB rpcProxy;
-
-  /**
-   * Constructs a Client side interface that calls into SCM datanode protocol.
-   *
-   * @param rpcProxy - Proxy for RPC.
-   */
-  public StorageContainerDatanodeProtocolClientSideTranslatorPB(
-      StorageContainerDatanodeProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. 
If
-   * the stream is already closed then invoking this method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful attention. It is strongly advised to relinquish the
-   * underlying resources and to internally <em>mark</em> the {@code Closeable}
-   * as closed, prior to throwing the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    RPC.stopProxy(rpcProxy);
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  /**
-   * Returns SCM version.
-   *
-   * @param unused - set to null and unused.
-   * @return Version info.
-   */
-  @Override
-  public SCMVersionResponseProto getVersion(SCMVersionRequestProto
-      unused) throws IOException {
-    SCMVersionRequestProto request =
-        SCMVersionRequestProto.newBuilder().build();
-    final SCMVersionResponseProto response;
-    try {
-      response = rpcProxy.getVersion(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException ex) {
-      throw ProtobufHelper.getRemoteException(ex);
-    }
-    return response;
-  }
-
-  /**
-   * Send by datanode to SCM.
-   *
-   * @param heartbeat node heartbeat
-   * @throws IOException
-   */
-
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(
-      SCMHeartbeatRequestProto heartbeat) throws IOException {
-    final SCMHeartbeatResponseProto resp;
-    try {
-      resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, heartbeat);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    return resp;
-  }
-
-  /**
-   * Register Datanode.
-   *
-   * @param datanodeDetailsProto - Datanode Details
-   * @param nodeReport - Node Report.
-   * @param containerReportsRequestProto - Container Reports.
-   * @return SCM Command.
-   */
-  @Override
-  public SCMRegisteredResponseProto register(
-      DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
-      ContainerReportsProto containerReportsRequestProto,
-      PipelineReportsProto pipelineReportsProto)
-      throws IOException {
-    SCMRegisterRequestProto.Builder req =
-        SCMRegisterRequestProto.newBuilder();
-    req.setDatanodeDetails(datanodeDetailsProto);
-    req.setContainerReport(containerReportsRequestProto);
-    req.setPipelineReports(pipelineReportsProto);
-    req.setNodeReport(nodeReport);
-    final SCMRegisteredResponseProto response;
-    try {
-      response = rpcProxy.register(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    return response;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
deleted file mode 100644
index 9b28b5a..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .StorageContainerDatanodeProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-
-/**
- * Protocol used from a datanode to StorageContainerManager.  This extends
- * the Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
-    protocolVersion = 1)
-public interface StorageContainerDatanodeProtocolPB extends
-    StorageContainerDatanodeProtocolService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 8622332..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-
-import java.io.IOException;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerDatanodeProtocolPB} to the {@link
- * StorageContainerDatanodeProtocol} server implementation.
- */
-public class StorageContainerDatanodeProtocolServerSideTranslatorPB
-    implements StorageContainerDatanodeProtocolPB {
-
-  private final StorageContainerDatanodeProtocol impl;
-
-  public StorageContainerDatanodeProtocolServerSideTranslatorPB(
-      StorageContainerDatanodeProtocol impl) {
-    this.impl = impl;
-  }
-
-  @Override
-  public SCMVersionResponseProto getVersion(RpcController controller,
-      SCMVersionRequestProto request)
-      throws ServiceException {
-    try {
-      return impl.getVersion(request);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMRegisteredResponseProto register(RpcController controller,
-      SCMRegisterRequestProto request) throws ServiceException {
-    try {
-      ContainerReportsProto containerRequestProto = request
-          .getContainerReport();
-      NodeReportProto dnNodeReport = request.getNodeReport();
-      PipelineReportsProto pipelineReport = request.getPipelineReports();
-      return impl.register(request.getDatanodeDetails(), dnNodeReport,
-          containerRequestProto, pipelineReport);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(RpcController controller,
-      SCMHeartbeatRequestProto request) throws ServiceException {
-    try {
-      return impl.sendHeartbeat(request);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 378a8f3..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
 
b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
deleted file mode 100644
index 982029c..0000000
--- 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ /dev/null
@@ -1,386 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-
-option java_outer_classname = "StorageContainerDatanodeProtocolProtos";
-
-option java_generic_services = true;
-
-option java_generate_equals_and_hash = true;
-
-package hadoop.hdds;
-
-import "hdds.proto";
-
-/**
- * Request for version info of the software stack on the server.
- */
-message SCMVersionRequestProto {}
-
-/**
-* Generic response that is send to a version request. This allows keys to be
-* added on the fly and protocol to remain stable.
-*/
-message SCMVersionResponseProto {
-  required uint32 softwareVersion = 1;
-  repeated hadoop.hdds.KeyValue keys = 2;
-}
-
-message SCMRegisterRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  required NodeReportProto nodeReport = 2;
-  required ContainerReportsProto containerReport = 3;
-  required PipelineReportsProto pipelineReports = 4;
-}
-
-/**
- * Datanode ID returned by the SCM. This is similar to name node
- * registeration of a datanode.
- */
-message SCMRegisteredResponseProto {
-  enum ErrorCode {
-    success = 1;
-    errorNodeNotPermitted = 2;
-  }
-  required ErrorCode errorCode = 1;
-  required string datanodeUUID = 2;
-  required string clusterID = 3;
-  optional SCMNodeAddressList addressList = 4;
-  optional string hostname = 5;
-  optional string ipAddress = 6;
-}
-
-/**
-* This message is send by data node to indicate that it is alive or it is
-* registering with the node manager.
-*/
-message SCMHeartbeatRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  optional NodeReportProto nodeReport = 2;
-  optional ContainerReportsProto containerReport = 3;
-  repeated CommandStatusReportsProto commandStatusReports = 4;
-  optional ContainerActionsProto containerActions = 5;
-  optional PipelineActionsProto pipelineActions = 6;
-  optional PipelineReportsProto pipelineReports = 7;
-}
-
-/*
- * A group of commands for the datanode to execute
- */
-message SCMHeartbeatResponseProto {
-  required string datanodeUUID = 1;
-  repeated SCMCommandProto commands = 2;
-}
-
-message SCMNodeAddressList {
-  repeated string addressList = 1;
-}
-
-/**
-* This message is send along with the heart beat to report datanode
-* storage utilization to SCM.
-*/
-message NodeReportProto {
-  repeated StorageReportProto storageReport = 1;
-}
-
-message StorageReportProto {
-  required string storageUuid = 1;
-  required string storageLocation = 2;
-  optional uint64 capacity = 3 [default = 0];
-  optional uint64 scmUsed = 4 [default = 0];
-  optional uint64 remaining = 5 [default = 0];
-  optional StorageTypeProto storageType = 6 [default = DISK];
-  optional bool failed = 7 [default = false];
-}
-
-/**
- * Types of recognized storage media.
- */
-enum StorageTypeProto {
-  DISK = 1;
-  SSD = 2;
-  ARCHIVE = 3;
-  RAM_DISK = 4;
-  PROVIDED = 5;
-}
-
-message ContainerReportsProto {
-  repeated ContainerInfo reports = 1;
-}
-
-message CommandStatusReportsProto {
-  repeated CommandStatus cmdStatus = 1;
-}
-
-message CommandStatus {
-  enum Status {
-    PENDING = 1;
-    EXECUTED = 2;
-    FAILED = 3;
-  }
-  required int64 cmdId = 1;
-  required Status status = 2 [default = PENDING];
-  required SCMCommandProto.Type type = 3;
-  optional string msg = 4;
-  optional ContainerBlocksDeletionACKProto blockDeletionAck = 5;
-}
-
-message ContainerActionsProto {
-  repeated ContainerAction containerActions = 1;
-}
-
-message ContainerAction {
-  enum Action {
-    CLOSE = 1;
-  }
-
-  enum Reason {
-    CONTAINER_FULL = 1;
-  }
-
-  required int64 containerID = 1;
-  required Action action = 2;
-  optional Reason reason = 3;
-}
-
-message PipelineReport {
-  required PipelineID pipelineID = 1;
-}
-
-message PipelineReportsProto {
-  repeated PipelineReport pipelineReport = 1;
-}
-
-message PipelineActionsProto {
-  repeated PipelineAction pipelineActions = 1;
-}
-
-message ClosePipelineInfo {
-  enum Reason {
-    PIPELINE_FAILED = 1;
-  }
-  required PipelineID pipelineID = 1;
-  optional Reason reason = 3;
-  optional string detailedReason = 4;
-}
-
-message PipelineAction {
-  enum Action {
-    CLOSE = 1;
-  }
-
-  /**
-   * Action will be used to identify the correct pipeline action.
-   */
-  required Action action = 1;
-  optional ClosePipelineInfo closePipeline = 2;
-}
-
-/**
-A container report contains the following information.
-*/
-message ContainerInfo {
-  required int64 containerID = 1;
-  optional int64 size = 2;
-  optional int64 used = 3;
-  optional int64 keyCount = 4;
-  // TODO: move the io count to separate message
-  optional int64 readCount = 5;
-  optional int64 writeCount = 6;
-  optional int64 readBytes = 7;
-  optional int64 writeBytes = 8;
-  optional string finalhash = 9;
-  optional hadoop.hdds.LifeCycleState state = 10;
-  optional int64 deleteTransactionId = 11;
-}
-
-/*
- * These are commands returned by SCM for to the datanode to execute.
- */
-message SCMCommandProto {
-  enum Type {
-    reregisterCommand = 1;
-    deleteBlocksCommand = 2;
-    closeContainerCommand = 3;
-    deleteContainerCommand = 4;
-    replicateContainerCommand = 5;
-  }
-  // TODO: once we start using protoc 3.x, refactor this message using "oneof"
-  required Type commandType = 1;
-  optional ReregisterCommandProto reregisterCommandProto = 2;
-  optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3;
-  optional CloseContainerCommandProto closeContainerCommandProto = 4;
-  optional DeleteContainerCommandProto deleteContainerCommandProto = 5;
-  optional ReplicateContainerCommandProto replicateContainerCommandProto = 6;
-}
-
-/**
- * SCM informs a datanode to register itself again.
- * With recieving this command, datanode will transit to REGISTER state.
- */
-message ReregisterCommandProto {}
-
-
-// HB response from SCM, contains a list of block deletion transactions.
-message DeleteBlocksCommandProto {
-  repeated DeletedBlocksTransaction deletedBlocksTransactions = 1;
-  required int64 cmdId = 3;
-}
-
-// The deleted blocks which are stored in deletedBlock.db of scm.
-// We don't use BlockID because this only contians multiple localIDs
-// of the same containerID.
-message DeletedBlocksTransaction {
-  required int64 txID = 1;
-  required int64 containerID = 2;
-  repeated int64 localID = 3;
-  // the retry time of sending deleting command to datanode.
-  required int32 count = 4;
-}
-
-// ACK message datanode sent to SCM, contains the result of
-// block deletion transactions.
-message ContainerBlocksDeletionACKProto {
-  message DeleteBlockTransactionResult {
-    required int64 txID = 1;
-    required int64 containerID = 2;
-    required bool success = 3;
-  }
-  repeated DeleteBlockTransactionResult results = 1;
-  required string dnId = 2;
-}
-
-/**
-This command asks the datanode to close a specific container.
-*/
-message CloseContainerCommandProto {
-  required int64 containerID = 1;
-  required hadoop.hdds.ReplicationType replicationType = 2;
-  required int64 cmdId = 3;
-  required PipelineID pipelineID = 4;
-}
-
-/**
-This command asks the datanode to delete a specific container.
-*/
-message DeleteContainerCommandProto {
-  required int64 containerID = 1;
-  required int64 cmdId = 2;
-}
-
-/**
-This command asks the datanode to replicate a container from specific sources.
-*/
-message ReplicateContainerCommandProto {
-  required int64 containerID = 1;
-  repeated DatanodeDetailsProto sources = 2;
-  required int64 cmdId = 3;
-}
-
-/**
- * Protocol used from a datanode to StorageContainerManager.
- *
- * Please see the request and response messages for details of the RPC calls.
- *
- * Here is a simple state diagram that shows how a datanode would boot up and
- * communicate with SCM.
- *
- *           -----------------------
- *          |         Start         |
- *           ---------- ------------
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *                     |
- *           ----------v-------------
- *          |   Searching for  SCM    ------------
- *           ---------- -------------             |
- *                     |                          |
- *                     |                          |
- *                     |                ----------v-------------
- *                     |               | Register if needed     |
- *                     |                ----------- ------------
- *                     |                           |
- *                     v                           |
- *            ----------- ----------------         |
- *  ---------   Heartbeat state           <--------
- * |          --------^-------------------
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- * |                  |
- *  ------------------
- *
- *
- *
- * Here is how this protocol is used by the datanode. When a datanode boots up
- * it moves into a stated called SEARCHING_SCM. In this state datanode is
- * trying to establish communication with the SCM. The address of the SCMs are
- * retrieved from the configuration information.
- *
- * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion
- * call to SCM. Once any of the SCMs reply, datanode checks if it has a local
- * persisted datanode ID. If it has this means that this datanode is already
- * registered with some SCM. If this file is not found, datanode assumes that
- * it needs to do a registration.
- *
- * If registration is need datanode moves into REGISTER state. It will
- * send a register call with DatanodeDetailsProto data structure and presist
- * that info.
- *
- * The response to the command contains clusterID. This information is
- * also persisted by the datanode and moves into heartbeat state.
- *
- * Once in the heartbeat state, datanode sends heartbeats and container reports
- * to SCM and process commands issued by SCM until it is shutdown.
- *
- */
-service StorageContainerDatanodeProtocolService {
-
-  /**
-  * Gets the version information from the SCM.
-  */
-  rpc getVersion (SCMVersionRequestProto) returns (SCMVersionResponseProto);
-
-  /**
-  * Registers a data node with SCM.
-  */
-  rpc register (SCMRegisterRequestProto) returns (SCMRegisteredResponseProto);
-
-  /**
-   * Send heartbeat from datanode to SCM. HB's under SCM looks more
-   * like life line protocol than HB's under HDFS. In other words, it is
-   * extremely light weight and contains no data payload.
-   */
-  rpc sendHeartbeat (SCMHeartbeatRequestProto) returns 
(SCMHeartbeatResponseProto);
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
 
b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
deleted file mode 100644
index 2e103fe..0000000
--- 
a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
deleted file mode 100644
index 923440e..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.net.InetSocketAddress;
-
-/**
- * Helper utility to test containers.
- */
-public final class ContainerTestUtils {
-
-  private ContainerTestUtils() {
-  }
-
-  /**
-   * Creates an Endpoint class for testing purpose.
-   *
-   * @param conf - Conf
-   * @param address - InetAddres
-   * @param rpcTimeout - rpcTimeOut
-   * @return EndPoint
-   * @throws Exception
-   */
-  public static EndpointStateMachine createEndpoint(Configuration conf,
-      InetSocketAddress address, int rpcTimeout) throws Exception {
-    RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long version =
-        RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
-
-    StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
-        StorageContainerDatanodeProtocolPB.class, version,
-        address, UserGroupInformation.getCurrentUser(), conf,
-        NetUtils.getDefaultSocketFactory(conf), rpcTimeout,
-        RetryPolicies.TRY_ONCE_THEN_FAIL).getProxy();
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
-        new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
-    return new EndpointStateMachine(address, rpcClient, conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
deleted file mode 100644
index a24f096..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.protobuf.BlockingService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .StorageContainerDatanodeProtocolService;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolServerSideTranslatorPB;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-
-/**
- * Test Endpoint class.
- */
-public final class SCMTestUtils {
-  /**
-   * Never constructed.
-   */
-  private SCMTestUtils() {
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private static RPC.Server startRpcServer(Configuration conf,
-      InetSocketAddress addr, Class<?>
-      protocol, BlockingService instance, int handlerCount)
-      throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(null)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-
-  /**
-   * Start Datanode RPC server.
-   */
-  public static RPC.Server startScmRpcServer(Configuration configuration,
-      StorageContainerDatanodeProtocol server,
-      InetSocketAddress rpcServerAddresss, int handlerCount) throws
-      IOException {
-    RPC.setProtocolEngine(configuration,
-        StorageContainerDatanodeProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    BlockingService scmDatanodeService =
-        StorageContainerDatanodeProtocolService.
-            newReflectiveBlockingService(
-                new StorageContainerDatanodeProtocolServerSideTranslatorPB(
-                    server));
-
-    RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss,
-        StorageContainerDatanodeProtocolPB.class, scmDatanodeService,
-        handlerCount);
-
-    scmServer.start();
-    return scmServer;
-  }
-
-  public static InetSocketAddress getReuseableAddress() throws IOException {
-    try (ServerSocket socket = new ServerSocket(0)) {
-      socket.setReuseAddress(true);
-      int port = socket.getLocalPort();
-      String addr = InetAddress.getLoopbackAddress().getHostAddress();
-      return new InetSocketAddress(addr, port);
-    }
-  }
-
-  public static OzoneConfiguration getConf() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils
-        .getRandomizedTempPath());
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
-        .getRandomizedTempPath());
-    return conf;
-  }
-
-  public static OzoneConfiguration getOzoneConf() {
-    return new OzoneConfiguration();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
deleted file mode 100644
index 3e45596..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ /dev/null
@@ -1,353 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.common.base.Preconditions;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.scm.VersionInfo;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * SCM RPC mock class.
- */
-public class ScmTestMock implements StorageContainerDatanodeProtocol {
-  private int rpcResponseDelay;
-  private AtomicInteger heartbeatCount = new AtomicInteger(0);
-  private AtomicInteger rpcCount = new AtomicInteger(0);
-  private AtomicInteger containerReportsCount = new AtomicInteger(0);
-  private String clusterId;
-  private String scmId;
-
-  public ScmTestMock() {
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-  }
-
-  // Map of datanode to containers
-  private Map<DatanodeDetails, Map<String, ContainerInfo>> nodeContainers =
-      new HashMap();
-  private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
-  private AtomicInteger commandStatusReport = new AtomicInteger(0);
-  private List<CommandStatus> cmdStatusList = new LinkedList<>();
-  private List<SCMCommandProto> scmCommandRequests = new LinkedList<>();
-  /**
-   * Returns the number of heartbeats made to this class.
-   *
-   * @return int
-   */
-  public int getHeartbeatCount() {
-    return heartbeatCount.get();
-  }
-
-  /**
-   * Returns the number of RPC calls made to this mock class instance.
-   *
-   * @return - Number of RPC calls serviced by this class.
-   */
-  public int getRpcCount() {
-    return rpcCount.get();
-  }
-
-  /**
-   * Gets the RPC response delay.
-   *
-   * @return delay in milliseconds.
-   */
-  public int getRpcResponseDelay() {
-    return rpcResponseDelay;
-  }
-
-  /**
-   * Sets the RPC response delay.
-   *
-   * @param rpcResponseDelay - delay in milliseconds.
-   */
-  public void setRpcResponseDelay(int rpcResponseDelay) {
-    this.rpcResponseDelay = rpcResponseDelay;
-  }
-
-  /**
-   * Returns the number of container reports server has seen.
-   * @return int
-   */
-  public int getContainerReportsCount() {
-    return containerReportsCount.get();
-  }
-
-  /**
-   * Returns the number of containers that have been reported so far.
-   * @return - count of reported containers.
-   */
-  public long getContainerCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.size();
-    }).sum();
-  }
-
-  /**
-   * Get the number keys reported from container reports.
-   * @return - number of keys reported.
-   */
-  public long getKeyCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getKeyCount();
-      }).sum();
-    }).sum();
-  }
-
-  /**
-   * Get the number of bytes used from container reports.
-   * @return - number of bytes used.
-   */
-  public long getBytesUsed() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getUsed();
-      }).sum();
-    }).sum();
-  }
-
-  /**
-   * Returns SCM version.
-   *
-   * @return Version info.
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto
-      getVersion(StorageContainerDatanodeProtocolProtos
-      .SCMVersionRequestProto unused) throws IOException {
-    rpcCount.incrementAndGet();
-    sleepIfNeeded();
-    VersionInfo versionInfo = VersionInfo.getLatestVersion();
-    return VersionResponse.newBuilder()
-        .setVersion(versionInfo.getVersion())
-        .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
-        .addValue(OzoneConsts.SCM_ID, scmId)
-        .addValue(OzoneConsts.CLUSTER_ID, clusterId)
-        .build().getProtobufMessage();
-
-  }
-
-  private void sleepIfNeeded() {
-    if (getRpcResponseDelay() > 0) {
-      try {
-        Thread.sleep(getRpcResponseDelay());
-      } catch (InterruptedException ex) {
-        // Just ignore this exception.
-      }
-    }
-  }
-
-  /**
-   * Used by data node to send a Heartbeat.
-   *
-   * @param heartbeat - node heartbeat.
-   * @return - SCMHeartbeatResponseProto
-   * @throws IOException
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto
-      sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException {
-    rpcCount.incrementAndGet();
-    heartbeatCount.incrementAndGet();
-    if (heartbeat.getCommandStatusReportsCount() != 0) {
-      for (CommandStatusReportsProto statusReport : heartbeat
-          .getCommandStatusReportsList()) {
-        cmdStatusList.addAll(statusReport.getCmdStatusList());
-        commandStatusReport.incrementAndGet();
-      }
-    }
-    sleepIfNeeded();
-    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(
-        scmCommandRequests)
-        .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid())
-        .build();
-  }
-
-  /**
-   * Register Datanode.
-   *
-   * @param datanodeDetailsProto DatanodDetailsProto.
-   * @return SCM Command.
-   */
-  @Override
-  public StorageContainerDatanodeProtocolProtos
-      .SCMRegisteredResponseProto register(
-          DatanodeDetailsProto datanodeDetailsProto, NodeReportProto 
nodeReport,
-          ContainerReportsProto containerReportsRequestProto,
-          PipelineReportsProto pipelineReportsProto)
-      throws IOException {
-    rpcCount.incrementAndGet();
-    updateNodeReport(datanodeDetailsProto, nodeReport);
-    updateContainerReport(containerReportsRequestProto, datanodeDetailsProto);
-    sleepIfNeeded();
-    return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
-        .newBuilder().setClusterID(UUID.randomUUID().toString())
-        .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode(
-            StorageContainerDatanodeProtocolProtos
-                .SCMRegisteredResponseProto.ErrorCode.success).build();
-  }
-
-  /**
-   * Update nodeReport.
-   * @param datanodeDetailsProto
-   * @param nodeReport
-   */
-  public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto,
-      NodeReportProto nodeReport) {
-    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
-        datanodeDetailsProto);
-    NodeReportProto.Builder nodeReportProto = NodeReportProto.newBuilder();
-
-    List<StorageReportProto> storageReports =
-        nodeReport.getStorageReportList();
-
-    for(StorageReportProto report : storageReports) {
-      nodeReportProto.addStorageReport(report);
-    }
-
-    nodeReports.put(datanode, nodeReportProto.build());
-
-  }
-
-  /**
-   * Update the cotainerReport.
-   *
-   * @param reports Container report
-   * @param datanodeDetails DataNode Info
-   * @throws IOException
-   */
-  public void updateContainerReport(
-      StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports,
-      DatanodeDetailsProto datanodeDetails) throws IOException {
-    Preconditions.checkNotNull(reports);
-    containerReportsCount.incrementAndGet();
-    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
-        datanodeDetails);
-    if (reports.getReportsCount() > 0) {
-      Map containers = nodeContainers.get(datanode);
-      if (containers == null) {
-        containers = new LinkedHashMap();
-        nodeContainers.put(datanode, containers);
-      }
-
-      for (StorageContainerDatanodeProtocolProtos.ContainerInfo report : 
reports
-          .getReportsList()) {
-        containers.put(report.getContainerID(), report);
-      }
-    }
-  }
-
-
-  /**
-   * Return the number of StorageReports of a datanode.
-   * @param datanodeDetails
-   * @return count of containers of a datanode
-   */
-  public int getNodeReportsCount(DatanodeDetails datanodeDetails) {
-    return nodeReports.get(datanodeDetails).getStorageReportCount();
-  }
-
-  /**
-   * Returns the number of containers of a datanode.
-   * @param datanodeDetails
-   * @return count of storage reports of a datanode
-   */
-  public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) {
-    Map<String, ContainerInfo> cr = nodeContainers.get(datanodeDetails);
-    if(cr != null) {
-      return cr.size();
-    }
-    return 0;
-  }
-
-  /**
-   * Reset the mock Scm for test to get a fresh start without rebuild MockScm.
-   */
-  public void reset() {
-    heartbeatCount.set(0);
-    rpcCount.set(0);
-    containerReportsCount.set(0);
-    nodeContainers.clear();
-
-  }
-
-  public int getCommandStatusReportCount() {
-    return commandStatusReport.get();
-  }
-
-  public List<CommandStatus> getCmdStatusList() {
-    return cmdStatusList;
-  }
-
-  public List<SCMCommandProto> getScmCommandRequests() {
-    return scmCommandRequests;
-  }
-
-  public void clearScmCommandRequests() {
-    scmCommandRequests.clear();
-  }
-
-  public void addScmCommandRequest(SCMCommandProto scmCmd) {
-    scmCommandRequests.add(scmCmd);
-  }
-
-  /**
-   * Set scmId.
-   * @param id
-   */
-  public void setScmId(String id) {
-    this.scmId = id;
-  }
-
-  /**
-   * Set scmId.
-   * @return scmId
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
deleted file mode 100644
index a4e0028..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * This class tests ChunkLayOutVersion.
- */
-public class TestChunkLayOutVersion {
-
-  @Test
-  public void testChunkLayOutVersion() {
-
-    // Check Latest Version and description
-    Assert.assertEquals(1, ChunkLayOutVersion.getLatestVersion().getVersion());
-    Assert.assertEquals("Data without checksums.", ChunkLayOutVersion
-        .getLatestVersion().getDescription());
-
-    Assert.assertEquals(1, ChunkLayOutVersion.getAllVersions().length);
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
deleted file mode 100644
index 5cabef2..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * This class tests DatanodeLayOutVersion.
- */
-public class TestDatanodeLayOutVersion {
-
-  @Test
-  public void testDatanodeLayOutVersion() {
-    // Check Latest Version and description
-    Assert.assertEquals(1, DataNodeLayoutVersion.getLatestVersion()
-        .getVersion());
-    Assert.assertEquals("HDDS Datanode LayOut Version 1", DataNodeLayoutVersion
-        .getLatestVersion().getDescription());
-    Assert.assertEquals(DataNodeLayoutVersion.getAllVersions().length,
-        DataNodeLayoutVersion.getAllVersions().length);
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to