http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
deleted file mode 100644
index 23ba00b..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableQuantiles;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-/**
- *
- * This class is for maintaining  the various Storage Container
- * DataNode statistics and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #numOps}.inc()
- *
- */
-@InterfaceAudience.Private
-@Metrics(about="Storage Container DataNode Metrics", context="dfs")
-public class ContainerMetrics {
-  @Metric private MutableCounterLong numOps;
-  private MutableCounterLong[] numOpsArray;
-  private MutableCounterLong[] opsBytesArray;
-  private MutableRate[] opsLatency;
-  private MutableQuantiles[][] opsLatQuantiles;
-  private MetricsRegistry registry = null;
-
-  public ContainerMetrics(int[] intervals) {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    final int len = intervals.length;
-    this.numOpsArray = new MutableCounterLong[numEnumEntries];
-    this.opsBytesArray = new MutableCounterLong[numEnumEntries];
-    this.opsLatency = new MutableRate[numEnumEntries];
-    this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len];
-    this.registry = new MetricsRegistry("StorageContainerMetrics");
-    for (int i = 0; i < numEnumEntries; i++) {
-      numOpsArray[i] = registry.newCounter(
-          "num" + ContainerProtos.Type.valueOf(i + 1),
-          "number of " + ContainerProtos.Type.valueOf(i + 1) + " ops",
-          (long) 0);
-      opsBytesArray[i] = registry.newCounter(
-          "bytes" + ContainerProtos.Type.valueOf(i + 1),
-          "bytes used by " + ContainerProtos.Type.valueOf(i + 1) + "op",
-          (long) 0);
-      opsLatency[i] = registry.newRate(
-          "latency" + ContainerProtos.Type.valueOf(i + 1),
-          ContainerProtos.Type.valueOf(i + 1) + " op");
-
-      for (int j = 0; j < len; j++) {
-        int interval = intervals[j];
-        String quantileName = ContainerProtos.Type.valueOf(i + 1) + "Nanos"
-            + interval + "s";
-        opsLatQuantiles[i][j] = registry.newQuantiles(quantileName,
-            "latency of Container ops", "ops", "latency", interval);
-      }
-    }
-  }
-
-  public static ContainerMetrics create(Configuration conf) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    // Percentile measurement is off by default, by watching no intervals
-    int[] intervals =
-             conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
-    return ms.register("StorageContainerMetrics",
-                       "Storage Container Node Metrics",
-                       new ContainerMetrics(intervals));
-  }
-
-  public void incContainerOpcMetrics(ContainerProtos.Type type){
-    numOps.incr();
-    numOpsArray[type.ordinal()].incr();
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type){
-    return numOpsArray[type.ordinal()].value();
-  }
-
-  public void incContainerOpsLatencies(ContainerProtos.Type type,
-                                       long latencyNanos) {
-    opsLatency[type.ordinal()].add(latencyNanos);
-    for (MutableQuantiles q: opsLatQuantiles[type.ordinal()]) {
-      q.add(latencyNanos);
-    }
-  }
-
-  public void incContainerBytesStats(ContainerProtos.Type type, long bytes) {
-    opsBytesArray[type.ordinal()].incr(bytes);
-  }
-
-  public long getContainerBytesMetrics(ContainerProtos.Type type){
-    return opsBytesArray[type.ordinal()].value();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index 81c41bb..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-
-/**
- * Container Report iterates the closed containers and sends a container report
- * to SCM.
- */
-public class ContainerReport {
-  private static final int UNKNOWN = -1;
-  private final String containerName;
-  private final String finalhash;
-  private long size;
-  private long keyCount;
-  private long bytesUsed;
-  private long readCount;
-  private long writeCount;
-  private long readBytes;
-  private long writeBytes;
-  private long containerID;
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public void setContainerID(long containerID) {
-    this.containerID = containerID;
-  }
-
-
-
-
-  /**
-   * Constructs the ContainerReport.
-   *
-   * @param containerName - Container Name.
-   * @param finalhash - Final Hash.
-   */
-  public ContainerReport(String containerName, String finalhash) {
-    this.containerName = containerName;
-    this.finalhash = finalhash;
-    this.size = UNKNOWN;
-    this.keyCount = UNKNOWN;
-    this.bytesUsed = 0L;
-    this.readCount = 0L;
-    this.readBytes = 0L;
-    this.writeCount = 0L;
-    this.writeBytes = 0L;
-  }
-
-  /**
-   * Gets a containerReport from protobuf class.
-   *
-   * @param info - ContainerInfo.
-   * @return - ContainerReport.
-   */
-  public static ContainerReport getFromProtoBuf(ContainerInfo info) {
-    Preconditions.checkNotNull(info);
-    ContainerReport report = new ContainerReport(info.getContainerName(),
-        info.getFinalhash());
-    if (info.hasSize()) {
-      report.setSize(info.getSize());
-    }
-    if (info.hasKeyCount()) {
-      report.setKeyCount(info.getKeyCount());
-    }
-    if (info.hasUsed()) {
-      report.setBytesUsed(info.getUsed());
-    }
-    if (info.hasReadCount()) {
-      report.setReadCount(info.getReadCount());
-    }
-    if (info.hasReadBytes()) {
-      report.setReadBytes(info.getReadBytes());
-    }
-    if (info.hasWriteCount()) {
-      report.setWriteCount(info.getWriteCount());
-    }
-    if (info.hasWriteBytes()) {
-      report.setWriteBytes(info.getWriteBytes());
-    }
-
-    report.setContainerID(info.getContainerID());
-    return report;
-  }
-
-  /**
-   * Gets the container name.
-   *
-   * @return - Name
-   */
-  public String getContainerName() {
-    return containerName;
-  }
-
-  /**
-   * Returns the final signature for this container.
-   *
-   * @return - hash
-   */
-  public String getFinalhash() {
-    return finalhash;
-  }
-
-  /**
-   * Returns a positive number it is a valid number, -1 if not known.
-   *
-   * @return size or -1
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Sets the size of the container on disk.
-   *
-   * @param size - int
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Gets number of keys in the container if known.
-   *
-   * @return - Number of keys or -1 for not known.
-   */
-  public long getKeyCount() {
-    return keyCount;
-  }
-
-  /**
-   * Sets the key count.
-   *
-   * @param keyCount - Key Count
-   */
-  public void setKeyCount(long keyCount) {
-    this.keyCount = keyCount;
-  }
-
-  public long getReadCount() {
-    return readCount;
-  }
-
-  public void setReadCount(long readCount) {
-    this.readCount = readCount;
-  }
-
-  public long getWriteCount() {
-    return writeCount;
-  }
-
-  public void setWriteCount(long writeCount) {
-    this.writeCount = writeCount;
-  }
-
-  public long getReadBytes() {
-    return readBytes;
-  }
-
-  public void setReadBytes(long readBytes) {
-    this.readBytes = readBytes;
-  }
-
-  public long getWriteBytes() {
-    return writeBytes;
-  }
-
-  public void setWriteBytes(long writeBytes) {
-    this.writeBytes = writeBytes;
-  }
-
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Gets a containerInfo protobuf message from ContainerReports.
-   *
-   * @return ContainerInfo
-   */
-  public ContainerInfo getProtoBufMessage() {
-    return ContainerInfo.newBuilder()
-        .setContainerName(this.getContainerName())
-        .setKeyCount(this.getKeyCount())
-        .setSize(this.getSize())
-        .setUsed(this.getBytesUsed())
-        .setReadCount(this.getReadCount())
-        .setReadBytes(this.getReadBytes())
-        .setWriteCount(this.getWriteCount())
-        .setWriteBytes(this.getWriteBytes())
-        .setFinalhash(this.getFinalhash())
-        .setContainerID(this.getContainerID())
-        .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
deleted file mode 100644
index f78e646..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
-import 
org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.INVALID_ARGUMENT;
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.UNABLE_TO_FIND_DATA_DIR;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
-
-/**
- * A set of helper functions to create proper responses.
- */
-public final class ContainerUtils {
-
-  private ContainerUtils() {
-    //never constructed.
-  }
-
-  /**
-   * Returns a CreateContainer Response. This call is used by create and delete
-   * containers which have null success responses.
-   *
-   * @param msg Request
-   * @return Response.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      getContainerResponse(ContainerProtos.ContainerCommandRequestProto msg) {
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        getContainerResponse(msg, ContainerProtos.Result.SUCCESS, "");
-    return builder.build();
-  }
-
-  /**
-   * Returns a ReadContainer Response.
-   *
-   * @param msg Request
-   * @param containerData - data
-   * @return Response.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      getReadContainerResponse(ContainerProtos.ContainerCommandRequestProto 
msg,
-      ContainerData containerData) {
-    Preconditions.checkNotNull(containerData);
-
-    ContainerProtos.ReadContainerResponseProto.Builder response =
-        ContainerProtos.ReadContainerResponseProto.newBuilder();
-    response.setContainerData(containerData.getProtoBufMessage());
-
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        getContainerResponse(msg, ContainerProtos.Result.SUCCESS, "");
-    builder.setReadContainer(response);
-    return builder.build();
-  }
-
-  /**
-   * We found a command type but no associated payload for the command. Hence
-   * return malformed Command as response.
-   *
-   * @param msg - Protobuf message.
-   * @param result - result
-   * @param message - Error message.
-   * @return ContainerCommandResponseProto - MALFORMED_REQUEST.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto.Builder
-      getContainerResponse(ContainerProtos.ContainerCommandRequestProto msg,
-      ContainerProtos.Result result, String message) {
-    return
-        ContainerProtos.ContainerCommandResponseProto.newBuilder()
-            .setCmdType(msg.getCmdType())
-            .setTraceID(msg.getTraceID())
-            .setResult(result)
-            .setMessage(message);
-  }
-
-  /**
-   * Logs the error and returns a response to the caller.
-   *
-   * @param log - Logger
-   * @param ex - Exception
-   * @param msg - Request Object
-   * @return Response
-   */
-  public static ContainerProtos.ContainerCommandResponseProto 
logAndReturnError(
-      Logger log, StorageContainerException ex,
-      ContainerProtos.ContainerCommandRequestProto msg) {
-    log.info("Operation: {} : Trace ID: {} : Message: {} : Result: {}",
-        msg.getCmdType().name(), msg.getTraceID(),
-        ex.getMessage(), ex.getResult().getValueDescriptor().getName());
-    return getContainerResponse(msg, ex.getResult(), ex.getMessage()).build();
-  }
-
-  /**
-   * Logs the error and returns a response to the caller.
-   *
-   * @param log - Logger
-   * @param ex - Exception
-   * @param msg - Request Object
-   * @return Response
-   */
-  public static ContainerProtos.ContainerCommandResponseProto 
logAndReturnError(
-      Logger log, RuntimeException ex,
-      ContainerProtos.ContainerCommandRequestProto msg) {
-    log.info("Operation: {} : Trace ID: {} : Message: {} ",
-        msg.getCmdType().name(), msg.getTraceID(), ex.getMessage());
-    return getContainerResponse(msg, INVALID_ARGUMENT, 
ex.getMessage()).build();
-  }
-
-  /**
-   * We found a command type but no associated payload for the command. Hence
-   * return malformed Command as response.
-   *
-   * @param msg - Protobuf message.
-   * @return ContainerCommandResponseProto - MALFORMED_REQUEST.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      malformedRequest(ContainerProtos.ContainerCommandRequestProto msg) {
-    return getContainerResponse(msg, ContainerProtos.Result.MALFORMED_REQUEST,
-        "Cmd type does not match the payload.").build();
-  }
-
-  /**
-   * We found a command type that is not supported yet.
-   *
-   * @param msg - Protobuf message.
-   * @return ContainerCommandResponseProto - MALFORMED_REQUEST.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      unsupportedRequest(ContainerProtos.ContainerCommandRequestProto msg) {
-    return getContainerResponse(msg, 
ContainerProtos.Result.UNSUPPORTED_REQUEST,
-        "Server does not support this command yet.").build();
-  }
-
-  /**
-   * get containerName from a container file.
-   *
-   * @param containerFile - File
-   * @return Name of the container.
-   */
-  public static String getContainerNameFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
-    return Paths.get(containerFile.getParent()).resolve(
-        removeExtension(containerFile.getName())).toString();
-  }
-
-  /**
-   * Verifies that this in indeed a new container.
-   *
-   * @param containerFile - Container File to verify
-   * @param metadataFile - metadata File to verify
-   * @throws IOException
-   */
-  public static void verifyIsNewContainer(File containerFile, File 
metadataFile)
-      throws IOException {
-    Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
-    if (containerFile.exists()) {
-      log.error("container already exists on disk. File: {}",
-          containerFile.toPath());
-      throw new FileAlreadyExistsException("container already exists on " +
-          "disk.");
-    }
-
-    if (metadataFile.exists()) {
-      log.error("metadata found on disk, but missing container. Refusing to" +
-          " write this container. File: {} ", metadataFile.toPath());
-      throw new FileAlreadyExistsException(("metadata found on disk, but " +
-          "missing container. Refusing to write this container."));
-    }
-
-    File parentPath = new File(containerFile.getParent());
-
-    if (!parentPath.exists() && !parentPath.mkdirs()) {
-      log.error("Unable to create parent path. Path: {}",
-          parentPath.toString());
-      throw new IOException("Unable to create container directory.");
-    }
-
-    if (!containerFile.createNewFile()) {
-      log.error("creation of a new container file failed. File: {}",
-          containerFile.toPath());
-      throw new IOException("creation of a new container file failed.");
-    }
-
-    if (!metadataFile.createNewFile()) {
-      log.error("creation of the metadata file failed. File: {}",
-          metadataFile.toPath());
-      throw new IOException("creation of a new container file failed.");
-    }
-  }
-
-  public static String getContainerDbFileName(String containerName) {
-    return containerName + OzoneConsts.DN_CONTAINER_DB;
-  }
-
-  /**
-   * creates a Metadata DB for the specified container.
-   *
-   * @param containerPath - Container Path.
-   * @throws IOException
-   */
-  public static Path createMetadata(Path containerPath, String containerName,
-      Configuration conf)
-      throws IOException {
-    Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
-    Preconditions.checkNotNull(containerPath);
-    Path metadataPath = containerPath.resolve(OzoneConsts.CONTAINER_META_PATH);
-    if (!metadataPath.toFile().mkdirs()) {
-      log.error("Unable to create directory for metadata storage. Path: {}",
-          metadataPath);
-      throw new IOException("Unable to create directory for metadata storage." 
+
-          " Path: " + metadataPath);
-    }
-    MetadataStore store = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setCreateIfMissing(true)
-        .setDbFile(metadataPath
-            .resolve(getContainerDbFileName(containerName)).toFile())
-        .build();
-
-    // we close since the SCM pre-creates containers.
-    // we will open and put Db handle into a cache when keys are being created
-    // in a container.
-
-    store.close();
-
-    Path dataPath = containerPath.resolve(OzoneConsts.CONTAINER_DATA_PATH);
-    if (!dataPath.toFile().mkdirs()) {
-
-      // If we failed to create data directory, we cleanup the
-      // metadata directory completely. That is, we will delete the
-      // whole directory including LevelDB file.
-      log.error("Unable to create directory for data storage. cleaning up the" 
+
-              " container path: {} dataPath: {}",
-          containerPath, dataPath);
-      FileUtils.deleteDirectory(containerPath.toFile());
-      throw new IOException("Unable to create directory for data storage." +
-          " Path: " + dataPath);
-    }
-    return metadataPath;
-  }
-
-  /**
-   * Returns Metadata location.
-   *
-   * @param containerData - Data
-   * @param location - Path
-   * @return Path
-   */
-  public static File getMetadataFile(ContainerData containerData,
-      Path location) {
-    return location.resolve(containerData
-        .getContainerName().concat(CONTAINER_META))
-        .toFile();
-  }
-
-  /**
-   * Returns container file location.
-   *
-   * @param containerData - Data
-   * @param location - Root path
-   * @return Path
-   */
-  public static File getContainerFile(ContainerData containerData,
-      Path location) {
-    return location.resolve(containerData
-        .getContainerName().concat(CONTAINER_EXTENSION))
-        .toFile();
-  }
-
-  /**
-   * Container metadata directory -- here is where the level DB lives.
-   *
-   * @param cData - cData.
-   * @return Path to the parent directory where the DB lives.
-   */
-  public static Path getMetadataDirectory(ContainerData cData) {
-    Path dbPath = Paths.get(cData.getDBPath());
-    Preconditions.checkNotNull(dbPath);
-    Preconditions.checkState(dbPath.toString().length() > 0);
-    return dbPath.getParent();
-  }
-
-  /**
-   * Returns the path where data or chunks live for a given container.
-   *
-   * @param cData - cData container
-   * @return - Path
-   * @throws StorageContainerException
-   */
-  public static Path getDataDirectory(ContainerData cData)
-      throws StorageContainerException {
-    Path path = getMetadataDirectory(cData);
-    Preconditions.checkNotNull(path);
-    Path parentPath = path.getParent();
-    if (parentPath == null) {
-      throw new StorageContainerException("Unable to get Data directory."
-          + path, UNABLE_TO_FIND_DATA_DIR);
-    }
-    return parentPath.resolve(OzoneConsts.CONTAINER_DATA_PATH);
-  }
-
-  /**
-   * remove Container if it is empty.
-   * <p/>
-   * There are three things we need to delete.
-   * <p/>
-   * 1. Container file and metadata file. 2. The Level DB file 3. The path that
-   * we created on the data location.
-   *
-   * @param containerData - Data of the container to remove.
-   * @param conf - configuration of the cluster.
-   * @param forceDelete - whether this container should be deleted forcibly.
-   * @throws IOException
-   */
-  public static void removeContainer(ContainerData containerData,
-      Configuration conf, boolean forceDelete) throws IOException {
-    Preconditions.checkNotNull(containerData);
-    Path dbPath = Paths.get(containerData.getDBPath());
-
-    MetadataStore db = KeyUtils.getDB(containerData, conf);
-    // If the container is not empty and cannot be deleted forcibly,
-    // then throw a SCE to stop deleting.
-    if(!forceDelete && !db.isEmpty()) {
-      throw new StorageContainerException(
-          "Container cannot be deleted because it is not empty.",
-          ContainerProtos.Result.ERROR_CONTAINER_NOT_EMPTY);
-    }
-    // Close the DB connection and remove the DB handler from cache
-    KeyUtils.removeDB(containerData, conf);
-
-    // Delete the DB File.
-    FileUtils.forceDelete(dbPath.toFile());
-    dbPath = dbPath.getParent();
-
-    // Delete all Metadata in the Data directories for this containers.
-    if (dbPath != null) {
-      FileUtils.deleteDirectory(dbPath.toFile());
-      dbPath = dbPath.getParent();
-    }
-
-    // now delete the container directory, this means that all key data dirs
-    // will be removed too.
-    if (dbPath != null) {
-      FileUtils.deleteDirectory(dbPath.toFile());
-    }
-
-    // Delete the container metadata from the metadata locations.
-    String rootPath = getContainerNameFromFile(new File(containerData
-        .getContainerPath()));
-    Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION));
-    Path metaPath = Paths.get(rootPath.concat(CONTAINER_META));
-
-    FileUtils.forceDelete(containerPath.toFile());
-    FileUtils.forceDelete(metaPath.toFile());
-  }
-
-  /**
-   * Write datanode ID protobuf messages to an ID file.
-   * The old ID file will be overwritten.
-   *
-   * @param ids A set of {@link DatanodeID}
-   * @param path Local ID file path
-   * @throws IOException When read/write error occurs
-   */
-  private synchronized static void writeDatanodeIDs(List<DatanodeID> ids,
-      File path) throws IOException {
-    if (path.exists()) {
-      if (!path.delete() || !path.createNewFile()) {
-        throw new IOException("Unable to overwrite the datanode ID file.");
-      }
-    } else {
-      if(!path.getParentFile().exists() &&
-          !path.getParentFile().mkdirs()) {
-        throw new IOException("Unable to create datanode ID directories.");
-      }
-    }
-    try (FileOutputStream out = new FileOutputStream(path)) {
-      for (DatanodeID id : ids) {
-        HdfsProtos.DatanodeIDProto dnId = id.getProtoBufMessage();
-        dnId.writeDelimitedTo(out);
-      }
-    }
-  }
-
-  /**
-   * Persistent a {@link DatanodeID} to a local file.
-   * It reads the IDs first and append a new entry only if the ID is new.
-   * This is to avoid on some dirty environment, this file gets too big.
-   *
-   * @throws IOException when read/write error occurs
-   */
-  public synchronized static void writeDatanodeIDTo(DatanodeID dnID,
-      File path) throws IOException {
-    List<DatanodeID> ids = ContainerUtils.readDatanodeIDsFrom(path);
-    // Only create or overwrite the file
-    // if the ID doesn't exist in the ID file
-    for (DatanodeID id : ids) {
-      if (id.getProtoBufMessage()
-          .equals(dnID.getProtoBufMessage())) {
-        return;
-      }
-    }
-    ids.add(dnID);
-    writeDatanodeIDs(ids, path);
-  }
-
-  /**
-   * Read {@link DatanodeID} from a local ID file and return a set of
-   * datanode IDs. If the ID file doesn't exist, an empty set is returned.
-   *
-   * @param path ID file local path
-   * @return A set of {@link DatanodeID}
-   * @throws IOException If the id file is malformed or other I/O exceptions
-   */
-  public synchronized static List<DatanodeID> readDatanodeIDsFrom(File path)
-      throws IOException {
-    List<DatanodeID> ids = new ArrayList<DatanodeID>();
-    if (!path.exists()) {
-      return ids;
-    }
-    try(FileInputStream in = new FileInputStream(path)) {
-      while(in.available() > 0) {
-        try {
-          HdfsProtos.DatanodeIDProto id =
-              HdfsProtos.DatanodeIDProto.parseDelimitedFrom(in);
-          ids.add(DatanodeID.getFromProtoBuf(id));
-        } catch (IOException e) {
-          throw new IOException("Failed to parse Datanode ID from "
-              + path.getAbsolutePath(), e);
-        }
-      }
-    }
-    return ids;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
deleted file mode 100644
index de5e2d0..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.ozone.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * A helper class to wrap the info about under deletion container blocks.
- */
-public final class DeletedContainerBlocksSummary {
-
-  private final List<DeletedBlocksTransaction> blocks;
-  // key : txID
-  // value : times of this tx has been processed
-  private final Map<Long, Integer> txSummary;
-  // key : container name
-  // value : the number of blocks need to be deleted in this container
-  // if the message contains multiple entries for same block,
-  // blocks will be merged
-  private final Map<String, Integer> blockSummary;
-  // total number of blocks in this message
-  private int numOfBlocks;
-
-  private DeletedContainerBlocksSummary(List<DeletedBlocksTransaction> blocks) 
{
-    this.blocks = blocks;
-    txSummary = Maps.newHashMap();
-    blockSummary = Maps.newHashMap();
-    blocks.forEach(entry -> {
-      txSummary.put(entry.getTxID(), entry.getCount());
-      if (blockSummary.containsKey(entry.getContainerName())) {
-        blockSummary.put(entry.getContainerName(),
-            blockSummary.get(entry.getContainerName())
-                + entry.getBlockIDCount());
-      } else {
-        blockSummary.put(entry.getContainerName(), entry.getBlockIDCount());
-      }
-      numOfBlocks += entry.getBlockIDCount();
-    });
-  }
-
-  public static DeletedContainerBlocksSummary getFrom(
-      List<DeletedBlocksTransaction> blocks) {
-    return new DeletedContainerBlocksSummary(blocks);
-  }
-
-  public int getNumOfBlocks() {
-    return numOfBlocks;
-  }
-
-  public int getNumOfContainers() {
-    return blockSummary.size();
-  }
-
-  public String getTXIDs() {
-    return String.join(",", txSummary.keySet()
-        .stream().map(String::valueOf).collect(Collectors.toList()));
-  }
-
-  public String getTxIDSummary() {
-    List<String> txSummaryEntry = txSummary.entrySet().stream()
-        .map(entry -> entry.getKey() + "(" + entry.getValue() + ")")
-        .collect(Collectors.toList());
-    return "[" + String.join(",", txSummaryEntry) + "]";
-  }
-
-  @Override public String toString() {
-    StringBuffer sb = new StringBuffer();
-    for (DeletedBlocksTransaction blks : blocks) {
-      sb.append(" ")
-          .append("TXID=")
-          .append(blks.getTxID())
-          .append(", ")
-          .append("TimesProceed=")
-          .append(blks.getCount())
-          .append(", ")
-          .append(blks.getContainerName())
-          .append(" : [")
-          .append(String.join(",", blks.getBlockIDList())).append("]")
-          .append("\n");
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
deleted file mode 100644
index 27731c7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-
-/**
- * File Utils are helper routines used by putSmallFile and getSmallFile
- * RPCs.
- */
-public final class FileUtils {
-  /**
-   * Never Constructed.
-   */
-  private FileUtils() {
-  }
-
-  /**
-   * Gets a response for the putSmallFile RPC.
-   * @param msg - ContainerCommandRequestProto
-   * @return - ContainerCommandResponseProto
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      getPutFileResponse(ContainerProtos.ContainerCommandRequestProto msg) {
-    ContainerProtos.PutSmallFileResponseProto.Builder getResponse =
-        ContainerProtos.PutSmallFileResponseProto.newBuilder();
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getContainerResponse(msg, ContainerProtos.Result
-            .SUCCESS, "");
-    builder.setCmdType(ContainerProtos.Type.PutSmallFile);
-    builder.setPutSmallFile(getResponse);
-    return  builder.build();
-  }
-
-  /**
-   * Gets a response to the read small file call.
-   * @param msg - Msg
-   * @param data  - Data
-   * @param info  - Info
-   * @return    Response.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      getGetSmallFileResponse(ContainerProtos.ContainerCommandRequestProto msg,
-      byte[] data, ChunkInfo info) {
-    Preconditions.checkNotNull(msg);
-
-    ContainerProtos.ReadChunkResponseProto.Builder readChunkresponse =
-        ContainerProtos.ReadChunkResponseProto.newBuilder();
-    readChunkresponse.setChunkData(info.getProtoBufMessage());
-    readChunkresponse.setData(ByteString.copyFrom(data));
-    
readChunkresponse.setPipeline(msg.getGetSmallFile().getKey().getPipeline());
-
-    ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
-        ContainerProtos.GetSmallFileResponseProto.newBuilder();
-    getSmallFile.setData(readChunkresponse.build());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getContainerResponse(msg, ContainerProtos.Result
-            .SUCCESS, "");
-    builder.setCmdType(ContainerProtos.Type.GetSmallFile);
-    builder.setGetSmallFile(getSmallFile);
-    return builder.build();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
deleted file mode 100644
index 639de16..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * Helper class to convert Protobuf to Java classes.
- */
-public class KeyData {
-  private final String containerName;
-  private final String keyName;
-  private final Map<String, String> metadata;
-
-  /**
-   * Please note : when we are working with keys, we don't care what they point
-   * to. So we We don't read chunkinfo nor validate them. It is responsibility
-   * of higher layer like ozone. We just read and write data from network.
-   */
-  private List<ContainerProtos.ChunkInfo> chunks;
-
-  /**
-   * Constructs a KeyData Object.
-   *
-   * @param containerName
-   * @param keyName
-   */
-  public KeyData(String containerName, String keyName) {
-    this.containerName = containerName;
-    this.keyName = keyName;
-    this.metadata = new TreeMap<>();
-  }
-
-  /**
-   * Returns a keyData object from the protobuf data.
-   *
-   * @param data - Protobuf data.
-   * @return - KeyData
-   * @throws IOException
-   */
-  public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
-      IOException {
-    KeyData keyData = new KeyData(data.getContainerName(), data.getName());
-    for (int x = 0; x < data.getMetadataCount(); x++) {
-      keyData.addMetadata(data.getMetadata(x).getKey(),
-          data.getMetadata(x).getValue());
-    }
-    keyData.setChunks(data.getChunksList());
-    return keyData;
-  }
-
-  /**
-   * Returns a Protobuf message from KeyData.
-   * @return Proto Buf Message.
-   */
-  public ContainerProtos.KeyData getProtoBufMessage() {
-    ContainerProtos.KeyData.Builder builder =
-        ContainerProtos.KeyData.newBuilder();
-    builder.setContainerName(this.containerName);
-    builder.setName(this.getKeyName());
-    builder.addAllChunks(this.chunks);
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      OzoneProtos.KeyValue.Builder keyValBuilder =
-          OzoneProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key
-   * @param value - Value
-   * @throws IOException
-   */
-  public synchronized void addMetadata(String key, String value) throws
-      IOException {
-    if (this.metadata.containsKey(key)) {
-      throw new IOException("This key already exists. Key " + key);
-    }
-    metadata.put(key, value);
-  }
-
-  public synchronized Map<String, String> getMetadata() {
-    return Collections.unmodifiableMap(this.metadata);
-  }
-
-  /**
-   * Returns value of a key.
-   */
-  public synchronized String getValue(String key) {
-    return metadata.get(key);
-  }
-
-  /**
-   * Deletes a metadata entry from the map.
-   *
-   * @param key - Key
-   */
-  public synchronized void deleteKey(String key) {
-    metadata.remove(key);
-  }
-
-  /**
-   * Returns chunks list.
-   *
-   * @return list of chunkinfo.
-   */
-  public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunks;
-  }
-
-  /**
-   * Returns container Name.
-   * @return String.
-   */
-  public String getContainerName() {
-    return containerName;
-  }
-
-  /**
-   * Returns KeyName.
-   * @return String.
-   */
-  public String getKeyName() {
-    return keyName;
-  }
-
-  /**
-   * Sets Chunk list.
-   *
-   * @param chunks - List of chunks.
-   */
-  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
-    this.chunks = chunks;
-  }
-
-  /**
-   * Get the total size of chunks allocated for the key.
-   * @return total size of the key.
-   */
-  public long getSize() {
-    return chunks.parallelStream().mapToLong(e->e.getLen()).sum();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
deleted file mode 100644
index b1c2b00..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import 
org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.UNABLE_TO_READ_METADATA_DB;
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.NO_SUCH_KEY;
-
-/**
- * Utils functions to help key functions.
- */
-public final class KeyUtils {
-  public static final String ENCODING_NAME = "UTF-8";
-  public static final Charset ENCODING = Charset.forName(ENCODING_NAME);
-
-  /**
-   * Never Constructed.
-   */
-  private KeyUtils() {
-  }
-
-  /**
-   * Get a DB handler for a given container.
-   * If the handler doesn't exist in cache yet, first create one and
-   * add into cache. This function is called with containerManager
-   * ReadLock held.
-   *
-   * @param container container.
-   * @param conf configuration.
-   * @return MetadataStore handle.
-   * @throws StorageContainerException
-   */
-  public static MetadataStore getDB(ContainerData container,
-      Configuration conf) throws StorageContainerException {
-    Preconditions.checkNotNull(container);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    try {
-      return cache.getDB(container.getContainerName(), container.getDBPath());
-    } catch (IOException ex) {
-      String message =
-          String.format("Unable to open DB. DB Name: %s, Path: %s. ex: %s",
-          container.getContainerName(), container.getDBPath(), 
ex.getMessage());
-      throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
-    }
-  }
-
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param container - Container data.
-   * @param conf - Configuration.
-   */
-  public static void removeDB(ContainerData container,
-      Configuration conf) {
-    Preconditions.checkNotNull(container);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    cache.removeDB(container.getContainerName());
-  }
-  /**
-   * Shutdown all DB Handles.
-   *
-   * @param cache - Cache for DB Handles.
-   */
-  @SuppressWarnings("unchecked")
-  public static void shutdownCache(ContainerCache cache)  {
-    cache.shutdownCache();
-  }
-
-  /**
-   * Returns successful keyResponse.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerProtos.ContainerCommandResponseProto
-      getKeyResponse(ContainerProtos.ContainerCommandRequestProto msg) {
-    return ContainerUtils.getContainerResponse(msg);
-  }
-
-
-  public static ContainerProtos.ContainerCommandResponseProto
-      getKeyDataResponse(ContainerProtos.ContainerCommandRequestProto msg,
-      KeyData data) {
-    ContainerProtos.GetKeyResponseProto.Builder getKey = ContainerProtos
-        .GetKeyResponseProto.newBuilder();
-    getKey.setKeyData(data.getProtoBufMessage());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getContainerResponse(msg, ContainerProtos.Result
-            .SUCCESS, "");
-    builder.setGetKey(getKey);
-    return  builder.build();
-  }
-
-  /**
-   * Parses the key name from a bytes array.
-   * @param bytes key name in bytes.
-   * @return key name string.
-   */
-  public static String getKeyName(byte[] bytes) {
-    return new String(bytes, ENCODING);
-  }
-
-  /**
-   * Parses the {@link KeyData} from a bytes array.
-   *
-   * @param bytes key data in bytes.
-   * @return key data.
-   * @throws IOException if the bytes array is malformed or invalid.
-   */
-  public static KeyData getKeyData(byte[] bytes) throws IOException {
-    try {
-      ContainerProtos.KeyData kd = ContainerProtos.KeyData.parseFrom(bytes);
-      KeyData data = KeyData.getFromProtoBuf(kd);
-      return data;
-    } catch (IOException e) {
-      throw new StorageContainerException("Failed to parse key data from the" +
-              " bytes array.", NO_SUCH_KEY);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
deleted file mode 100644
index 21f31e1..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-/**
- Contains protocol buffer helper classes and utilites used in
- impl.
- **/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
deleted file mode 100644
index 9052df7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import 
org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.security.NoSuchAlgorithmException;
-import java.util.concurrent.ExecutionException;
-
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
-    .Result.UNSUPPORTED_REQUEST;
-
-/**
- * An implementation of ChunkManager that is used by default in ozone.
- */
-public class ChunkManagerImpl implements ChunkManager {
-  static final Logger LOG =
-      LoggerFactory.getLogger(ChunkManagerImpl.class);
-
-  private final ContainerManager containerManager;
-
-  /**
-   * Constructs a ChunkManager.
-   *
-   * @param manager - ContainerManager.
-   */
-  public ChunkManagerImpl(ContainerManager manager) {
-    this.containerManager = manager;
-  }
-
-  /**
-   * writes a given chunk.
-   *
-   * @param pipeline - Name and the set of machines that make this container.
-   * @param keyName - Name of the Key.
-   * @param info - ChunkInfo.
-   * @throws StorageContainerException
-   */
-  @Override
-  public void writeChunk(Pipeline pipeline, String keyName, ChunkInfo info,
-      byte[] data, ContainerProtos.Stage stage)
-      throws StorageContainerException {
-    // we don't want container manager to go away while we are writing chunks.
-    containerManager.readLock();
-
-    // TODO : Take keyManager Write lock here.
-    try {
-      Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
-      String containerName = pipeline.getContainerName();
-      Preconditions.checkNotNull(containerName,
-          "Container name cannot be null");
-      ContainerData container =
-          containerManager.readContainer(containerName);
-      File chunkFile = ChunkUtils.validateChunk(pipeline, container, info);
-      File tmpChunkFile = getTmpChunkFile(chunkFile, info);
-
-      LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file",
-          info.getChunkName(), stage, chunkFile, tmpChunkFile);
-      switch (stage) {
-      case WRITE_DATA:
-        ChunkUtils.writeData(tmpChunkFile, info, data);
-        break;
-      case COMMIT_DATA:
-        commitChunk(tmpChunkFile, chunkFile, containerName, info.getLen());
-        break;
-      case COMBINED:
-        // directly write to the chunk file
-        long oldSize = chunkFile.length();
-        ChunkUtils.writeData(chunkFile, info, data);
-        long newSize = chunkFile.length();
-        containerManager.incrBytesUsed(containerName, newSize - oldSize);
-        containerManager.incrWriteCount(containerName);
-        containerManager.incrWriteBytes(containerName, info.getLen());
-        break;
-      }
-    } catch (ExecutionException | NoSuchAlgorithmException | IOException e) {
-      LOG.error("write data failed. error: {}", e);
-      throw new StorageContainerException("Internal error: ", e,
-          CONTAINER_INTERNAL_ERROR);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.error("write data failed. error: {}", e);
-      throw new StorageContainerException("Internal error: ", e,
-          CONTAINER_INTERNAL_ERROR);
-    } finally {
-      containerManager.readUnlock();
-    }
-  }
-
-  // Create a temporary file in the same container directory
-  // in the format "<chunkname>.tmp"
-  private static File getTmpChunkFile(File chunkFile, ChunkInfo info)
-      throws StorageContainerException {
-    return new File(chunkFile.getParent(),
-        chunkFile.getName() +
-            OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
-            OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX);
-  }
-
-  // Commit the chunk by renaming the temporary chunk file to chunk file
-  private void commitChunk(File tmpChunkFile, File chunkFile,
-      String containerName, long chunkLen) throws IOException {
-    long sizeDiff = tmpChunkFile.length() - chunkFile.length();
-    // It is safe to replace here as the earlier chunk if existing should be
-    // caught as part of validateChunk
-    Files.move(tmpChunkFile.toPath(), chunkFile.toPath(),
-        StandardCopyOption.REPLACE_EXISTING);
-    containerManager.incrBytesUsed(containerName, sizeDiff);
-    containerManager.incrWriteCount(containerName);
-    containerManager.incrWriteBytes(containerName, chunkLen);
-  }
-
-  /**
-   * reads the data defined by a chunk.
-   *
-   * @param pipeline - container pipeline.
-   * @param keyName - Name of the Key
-   * @param info - ChunkInfo.
-   * @return byte array
-   * @throws StorageContainerException
-   * TODO: Right now we do not support partial reads and writes of chunks.
-   * TODO: Explore if we need to do that for ozone.
-   */
-  @Override
-  public byte[] readChunk(Pipeline pipeline, String keyName, ChunkInfo info)
-      throws StorageContainerException {
-    containerManager.readLock();
-    try {
-      Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
-      String containerName = pipeline.getContainerName();
-      Preconditions.checkNotNull(containerName,
-          "Container name cannot be null");
-      ContainerData container =
-          containerManager.readContainer(containerName);
-      File chunkFile = ChunkUtils.getChunkFile(pipeline, container, info);
-      ByteBuffer data =  ChunkUtils.readData(chunkFile, info);
-      containerManager.incrReadCount(containerName);
-      containerManager.incrReadBytes(containerName, chunkFile.length());
-      return data.array();
-    } catch (ExecutionException | NoSuchAlgorithmException e) {
-      LOG.error("read data failed. error: {}", e);
-      throw new StorageContainerException("Internal error: ",
-          e, CONTAINER_INTERNAL_ERROR);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.error("read data failed. error: {}", e);
-      throw new StorageContainerException("Internal error: ",
-          e, CONTAINER_INTERNAL_ERROR);
-    } finally {
-      containerManager.readUnlock();
-    }
-  }
-
-  /**
-   * Deletes a given chunk.
-   *
-   * @param pipeline - Pipeline.
-   * @param keyName - Key Name
-   * @param info - Chunk Info
-   * @throws StorageContainerException
-   */
-  @Override
-  public void deleteChunk(Pipeline pipeline, String keyName, ChunkInfo info)
-      throws StorageContainerException {
-    containerManager.readLock();
-    try {
-      Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
-      String containerName = pipeline.getContainerName();
-      Preconditions.checkNotNull(containerName,
-          "Container name cannot be null");
-      File chunkFile = ChunkUtils.getChunkFile(pipeline, containerManager
-          .readContainer(containerName), info);
-      if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) {
-        FileUtil.fullyDelete(chunkFile);
-        containerManager.decrBytesUsed(containerName, chunkFile.length());
-      } else {
-        LOG.error("Not Supported Operation. Trying to delete a " +
-            "chunk that is in shared file. chunk info : " + info.toString());
-        throw new StorageContainerException("Not Supported Operation. " +
-            "Trying to delete a chunk that is in shared file. chunk info : "
-            + info.toString(), UNSUPPORTED_REQUEST);
-      }
-    } finally {
-      containerManager.readUnlock();
-    }
-  }
-
-  /**
-   * Shutdown the chunkManager.
-   *
-   * In the chunkManager we haven't acquired any resources, so nothing to do
-   * here. This call is made with containerManager Writelock held.
-   */
-  @Override
-  public void shutdown() {
-    Preconditions.checkState(this.containerManager.hasWriteLock());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
deleted file mode 100644
index b842493..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.interfaces
-    .ContainerLocationManager;
-import 
org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManagerMXBean;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.LinkedList;
-import java.util.List;
-
-/**
- * A class that tells the ContainerManager where to place the containers.
- * Please note : There is *no* one-to-one correlation between metadata
- * Locations and data Locations.
- *
- *  For example : A user could map all container files to a
- *  SSD but leave data/metadata on bunch of other disks.
- */
-public class ContainerLocationManagerImpl implements ContainerLocationManager,
-    ContainerLocationManagerMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerLocationManagerImpl.class);
-
-  private final List<ContainerStorageLocation> dataLocations;
-  private int currentIndex;
-  private final List<StorageLocation> metadataLocations;
-  private final ObjectName jmxbean;
-
-  /**
-   * Constructs a Location Manager.
-   * @param metadataLocations  - Refers to the metadataLocations
-   * where we store the container metadata.
-   * @param dataDirs - metadataLocations where we store the actual
-   * data or chunk files.
-   * @param conf - configuration.
-   * @throws IOException
-   */
-  public ContainerLocationManagerImpl(List<StorageLocation> metadataLocations,
-      List<StorageLocation> dataDirs, Configuration conf)
-      throws IOException {
-    dataLocations = new LinkedList<>();
-    for (StorageLocation dataDir : dataDirs) {
-      dataLocations.add(new ContainerStorageLocation(dataDir, conf));
-    }
-    this.metadataLocations = metadataLocations;
-    jmxbean = MBeans.register("OzoneDataNode",
-        ContainerLocationManager.class.getSimpleName(), this);
-  }
-
-  /**
-   * Returns the path where the container should be placed from a set of
-   * metadataLocations.
-   *
-   * @return A path where we should place this container and metadata.
-   * @throws IOException
-   */
-  @Override
-  public Path getContainerPath()
-      throws IOException {
-    Preconditions.checkState(metadataLocations.size() > 0);
-    int index = currentIndex % metadataLocations.size();
-    return Paths.get(metadataLocations.get(index).getNormalizedUri());
-  }
-
-  /**
-   * Returns the path where the container Data file are stored.
-   *
-   * @return  a path where we place the LevelDB and data files of a container.
-   * @throws IOException
-   */
-  @Override
-  public Path getDataPath(String containerName) throws IOException {
-    Path currentPath = Paths.get(
-        dataLocations.get(currentIndex++ % dataLocations.size())
-            .getNormalizedUri());
-    currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX);
-    return currentPath.resolve(containerName);
-  }
-
-  @Override
-  public StorageLocationReport[] getLocationReport() throws IOException {
-    StorageLocationReport[] reports =
-        new StorageLocationReport[dataLocations.size()];
-    for (int idx = 0; idx < dataLocations.size(); idx++) {
-      ContainerStorageLocation loc = dataLocations.get(idx);
-      long scmUsed = 0;
-      long remaining = 0;
-      try {
-        scmUsed = loc.getScmUsed();
-        remaining = loc.getAvailable();
-      } catch (IOException ex) {
-        LOG.warn("Failed to get scmUsed and remaining for container " +
-            "storage location {}", loc.getNormalizedUri());
-        // reset scmUsed and remaining if df/du failed.
-        scmUsed = 0;
-        remaining = 0;
-      }
-
-      // TODO: handle failed storage
-      // For now, include storage report for location that failed to get df/du.
-      StorageLocationReport r = new StorageLocationReport(
-          loc.getStorageUuId(), false, loc.getCapacity(),
-          scmUsed, remaining);
-      reports[idx] = r;
-    }
-    return reports;
-  }
-
-  /**
-   * Supports clean shutdown of container location du threads.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void shutdown() throws IOException {
-    for (ContainerStorageLocation loc: dataLocations) {
-      loc.shutdown();
-    }
-    MBeans.unregister(jmxbean);
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to