http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
deleted file mode 100644
index 05a632e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.io;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Result;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationType;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationFactor;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import 
org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB;
-import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.scm.XceiverClientManager;
-import org.apache.hadoop.scm.XceiverClientSpi;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.scm.storage.ChunkOutputStream;
-import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Maintaining a list of ChunkInputStream. Write based on offset.
- *
- * Note that this may write to multiple containers in one write call. In case
- * that first container succeeded but later ones failed, the succeeded writes
- * are not rolled back.
- *
- * TODO : currently not support multi-thread access.
- */
-public class ChunkGroupOutputStream extends OutputStream {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ChunkGroupOutputStream.class);
-
-  // array list's get(index) is O(1)
-  private final ArrayList<ChunkOutputStreamEntry> streamEntries;
-  private int currentStreamIndex;
-  private long byteOffset;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
-  private final
-      StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-  private final KsmKeyArgs keyArgs;
-  private final int openID;
-  private final XceiverClientManager xceiverClientManager;
-  private final int chunkSize;
-  private final String requestID;
-  private boolean closed;
-
-  /**
-   * A constructor for testing purpose only.
-   */
-  @VisibleForTesting
-  public ChunkGroupOutputStream() {
-    streamEntries = new ArrayList<>();
-    ksmClient = null;
-    scmClient = null;
-    keyArgs = null;
-    openID = -1;
-    xceiverClientManager = null;
-    chunkSize = 0;
-    requestID = null;
-    closed = false;
-  }
-
-  /**
-   * For testing purpose only. Not building output stream from blocks, but
-   * taking from externally.
-   *
-   * @param outputStream
-   * @param length
-   */
-  @VisibleForTesting
-  public synchronized void addStream(OutputStream outputStream, long length) {
-    streamEntries.add(new ChunkOutputStreamEntry(outputStream, length));
-  }
-
-  @VisibleForTesting
-  public List<ChunkOutputStreamEntry> getStreamEntries() {
-    return streamEntries;
-  }
-
-  public ChunkGroupOutputStream(
-      OpenKeySession handler, XceiverClientManager xceiverClientManager,
-      StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
-      KeySpaceManagerProtocolClientSideTranslatorPB ksmClient,
-      int chunkSize, String requestId, ReplicationFactor factor,
-      ReplicationType type) throws IOException {
-    this.streamEntries = new ArrayList<>();
-    this.currentStreamIndex = 0;
-    this.byteOffset = 0;
-    this.ksmClient = ksmClient;
-    this.scmClient = scmClient;
-    KsmKeyInfo info = handler.getKeyInfo();
-    this.keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(info.getVolumeName())
-        .setBucketName(info.getBucketName())
-        .setKeyName(info.getKeyName())
-        .setType(type)
-        .setFactor(factor)
-        .setDataSize(info.getDataSize()).build();
-    this.openID = handler.getId();
-    this.xceiverClientManager = xceiverClientManager;
-    this.chunkSize = chunkSize;
-    this.requestID = requestId;
-    LOG.debug("Expecting open key with one block, but got" +
-        info.getKeyLocationVersions().size());
-  }
-
-  /**
-   * When a key is opened, it is possible that there are some blocks already
-   * allocated to it for this open session. In this case, to make use of these
-   * blocks, we need to add these blocks to stream entries. But, a key's 
version
-   * also includes blocks from previous versions, we need to avoid adding these
-   * old blocks to stream entries, because these old blocks should not be 
picked
-   * for write. To do this, the following method checks that, only those
-   * blocks created in this particular open version are added to stream 
entries.
-   *
-   * @param version the set of blocks that are pre-allocated.
-   * @param openVersion the version corresponding to the pre-allocation.
-   * @throws IOException
-   */
-  public void addPreallocateBlocks(KsmKeyLocationInfoGroup version,
-      long openVersion) throws IOException {
-    // server may return any number of blocks, (0 to any)
-    // only the blocks allocated in this open session (block createVersion
-    // equals to open session version)
-    for (KsmKeyLocationInfo subKeyInfo : version.getLocationList()) {
-      if (subKeyInfo.getCreateVersion() == openVersion) {
-        checkKeyLocationInfo(subKeyInfo);
-      }
-    }
-  }
-
-  private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
-      throws IOException {
-    String containerKey = subKeyInfo.getBlockID();
-    String containerName = subKeyInfo.getContainerName();
-    Pipeline pipeline = scmClient.getContainer(containerName);
-    XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
-    // create container if needed
-    if (subKeyInfo.getShouldCreateContainer()) {
-      try {
-        ContainerProtocolCalls.createContainer(xceiverClient, requestID);
-        scmClient.notifyObjectStageChange(
-            ObjectStageChangeRequestProto.Type.container,
-            containerName, ObjectStageChangeRequestProto.Op.create,
-            ObjectStageChangeRequestProto.Stage.complete);
-      } catch (StorageContainerException ex) {
-        if (ex.getResult().equals(Result.CONTAINER_EXISTS)) {
-          //container already exist, this should never happen
-          LOG.debug("Container {} already exists.", containerName);
-        } else {
-          LOG.error("Container creation failed for {}.", containerName, ex);
-          throw ex;
-        }
-      }
-    }
-    streamEntries.add(new ChunkOutputStreamEntry(containerKey,
-        keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
-        chunkSize, subKeyInfo.getLength()));
-  }
-
-
-  @VisibleForTesting
-  public long getByteOffset() {
-    return byteOffset;
-  }
-
-
-  @Override
-  public synchronized void write(int b) throws IOException {
-    checkNotClosed();
-
-    if (streamEntries.size() <= currentStreamIndex) {
-      Preconditions.checkNotNull(ksmClient);
-      // allocate a new block, if a exception happens, log an error and
-      // throw exception to the caller directly, and the write fails.
-      try {
-        allocateNewBlock(currentStreamIndex);
-      } catch (IOException ioe) {
-        LOG.error("Allocate block fail when writing.");
-        throw ioe;
-      }
-    }
-    ChunkOutputStreamEntry entry = streamEntries.get(currentStreamIndex);
-    entry.write(b);
-    if (entry.getRemaining() <= 0) {
-      currentStreamIndex += 1;
-    }
-    byteOffset += 1;
-  }
-
-  /**
-   * Try to write the bytes sequence b[off:off+len) to streams.
-   *
-   * NOTE: Throws exception if the data could not fit into the remaining space.
-   * In which case nothing will be written.
-   * TODO:May need to revisit this behaviour.
-   *
-   * @param b byte data
-   * @param off starting offset
-   * @param len length to write
-   * @throws IOException
-   */
-  @Override
-  public synchronized void write(byte[] b, int off, int len)
-      throws IOException {
-    checkNotClosed();
-
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if ((off < 0) || (off > b.length) || (len < 0) ||
-        ((off + len) > b.length) || ((off + len) < 0)) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return;
-    }
-    int succeededAllocates = 0;
-    while (len > 0) {
-      if (streamEntries.size() <= currentStreamIndex) {
-        Preconditions.checkNotNull(ksmClient);
-        // allocate a new block, if a exception happens, log an error and
-        // throw exception to the caller directly, and the write fails.
-        try {
-          allocateNewBlock(currentStreamIndex);
-          succeededAllocates += 1;
-        } catch (IOException ioe) {
-          LOG.error("Try to allocate more blocks for write failed, already " +
-              "allocated " + succeededAllocates + " blocks for this write.");
-          throw ioe;
-        }
-      }
-      // in theory, this condition should never violate due the check above
-      // still do a sanity check.
-      Preconditions.checkArgument(currentStreamIndex < streamEntries.size());
-      ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex);
-      int writeLen = Math.min(len, (int)current.getRemaining());
-      current.write(b, off, writeLen);
-      if (current.getRemaining() <= 0) {
-        currentStreamIndex += 1;
-      }
-      len -= writeLen;
-      off += writeLen;
-      byteOffset += writeLen;
-    }
-  }
-
-  /**
-   * Contact KSM to get a new block. Set the new block with the index (e.g.
-   * first block has index = 0, second has index = 1 etc.)
-   *
-   * The returned block is made to new ChunkOutputStreamEntry to write.
-   *
-   * @param index the index of the block.
-   * @throws IOException
-   */
-  private void allocateNewBlock(int index) throws IOException {
-    KsmKeyLocationInfo subKeyInfo = ksmClient.allocateBlock(keyArgs, openID);
-    checkKeyLocationInfo(subKeyInfo);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    checkNotClosed();
-    for (int i = 0; i <= currentStreamIndex; i++) {
-      streamEntries.get(i).flush();
-    }
-  }
-
-  /**
-   * Commit the key to KSM, this will add the blocks as the new key blocks.
-   *
-   * @throws IOException
-   */
-  @Override
-  public synchronized void close() throws IOException {
-    if (closed) {
-      return;
-    }
-    closed = true;
-    for (ChunkOutputStreamEntry entry : streamEntries) {
-      if (entry != null) {
-        entry.close();
-      }
-    }
-    if (keyArgs != null) {
-      // in test, this could be null
-      keyArgs.setDataSize(byteOffset);
-      ksmClient.commitKey(keyArgs, openID);
-    } else {
-      LOG.warn("Closing ChunkGroupOutputStream, but key args is null");
-    }
-  }
-
-  /**
-   * Builder class of ChunkGroupOutputStream.
-   */
-  public static class Builder {
-    private OpenKeySession openHandler;
-    private XceiverClientManager xceiverManager;
-    private StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-    private KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
-    private int chunkSize;
-    private String requestID;
-    private ReplicationType type;
-    private ReplicationFactor factor;
-
-    public Builder setHandler(OpenKeySession handler) {
-      this.openHandler = handler;
-      return this;
-    }
-
-    public Builder setXceiverClientManager(XceiverClientManager manager) {
-      this.xceiverManager = manager;
-      return this;
-    }
-
-    public Builder setScmClient(
-        StorageContainerLocationProtocolClientSideTranslatorPB client) {
-      this.scmClient = client;
-      return this;
-    }
-
-    public Builder setKsmClient(
-        KeySpaceManagerProtocolClientSideTranslatorPB client) {
-      this.ksmClient = client;
-      return this;
-    }
-
-    public Builder setChunkSize(int size) {
-      this.chunkSize = size;
-      return this;
-    }
-
-    public Builder setRequestID(String id) {
-      this.requestID = id;
-      return this;
-    }
-
-    public Builder setType(ReplicationType replicationType) {
-      this.type = replicationType;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor replicationFactor) {
-      this.factor = replicationFactor;
-      return this;
-    }
-
-    public ChunkGroupOutputStream build() throws IOException {
-      return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient,
-          ksmClient, chunkSize, requestID, factor, type);
-    }
-  }
-
-  private static class ChunkOutputStreamEntry extends OutputStream {
-    private OutputStream outputStream;
-    private final String containerKey;
-    private final String key;
-    private final XceiverClientManager xceiverClientManager;
-    private final XceiverClientSpi xceiverClient;
-    private final String requestId;
-    private final int chunkSize;
-    // total number of bytes that should be written to this stream
-    private final long length;
-    // the current position of this stream 0 <= currentPosition < length
-    private long currentPosition;
-
-    ChunkOutputStreamEntry(String containerKey, String key,
-        XceiverClientManager xceiverClientManager,
-        XceiverClientSpi xceiverClient, String requestId, int chunkSize,
-        long length) {
-      this.outputStream = null;
-      this.containerKey = containerKey;
-      this.key = key;
-      this.xceiverClientManager = xceiverClientManager;
-      this.xceiverClient = xceiverClient;
-      this.requestId = requestId;
-      this.chunkSize = chunkSize;
-
-      this.length = length;
-      this.currentPosition = 0;
-    }
-
-    /**
-     * For testing purpose, taking a some random created stream instance.
-     * @param  outputStream a existing writable output stream
-     * @param  length the length of data to write to the stream
-     */
-    ChunkOutputStreamEntry(OutputStream outputStream, long length) {
-      this.outputStream = outputStream;
-      this.containerKey = null;
-      this.key = null;
-      this.xceiverClientManager = null;
-      this.xceiverClient = null;
-      this.requestId = null;
-      this.chunkSize = -1;
-
-      this.length = length;
-      this.currentPosition = 0;
-    }
-
-    long getLength() {
-      return length;
-    }
-
-    long getRemaining() {
-      return length - currentPosition;
-    }
-
-    private synchronized void checkStream() {
-      if (this.outputStream == null) {
-        this.outputStream = new ChunkOutputStream(containerKey,
-            key, xceiverClientManager, xceiverClient,
-            requestId, chunkSize);
-      }
-    }
-
-    @Override
-    public void write(int b) throws IOException {
-      checkStream();
-      outputStream.write(b);
-      this.currentPosition += 1;
-    }
-
-    @Override
-    public void write(byte[] b, int off, int len) throws IOException {
-      checkStream();
-      outputStream.write(b, off, len);
-      this.currentPosition += len;
-    }
-
-    @Override
-    public void flush() throws IOException {
-      if (this.outputStream != null) {
-        this.outputStream.flush();
-      }
-    }
-
-    @Override
-    public void close() throws IOException {
-      if (this.outputStream != null) {
-        this.outputStream.close();
-      }
-    }
-  }
-
-  /**
-   * Verify that the output stream is open. Non blocking; this gives
-   * the last state of the volatile {@link #closed} field.
-   * @throws IOException if the connection is closed.
-   */
-  private void checkNotClosed() throws IOException {
-    if (closed) {
-      throw new IOException(
-          ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + keyArgs
-              .getKeyName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
deleted file mode 100644
index baf1887..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import java.io.FilterInputStream;
-import java.io.InputStream;
-
-/**
- * An input stream with length.
- */
-public class LengthInputStream extends FilterInputStream {
-
-  private final long length;
-
-  /**
-   * Create an stream.
-   * @param in the underlying input stream.
-   * @param length the length of the stream.
-   */
-  public LengthInputStream(InputStream in, long length) {
-    super(in);
-    this.length = length;
-  }
-
-  /** @return the length. */
-  public long getLength() {
-    return length;
-  }
-
-  public InputStream getWrappedStream() {
-    return in;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
deleted file mode 100644
index ca6f7aa..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.KeyData;
-
-/**
- * This class contains methods that define the translation between the Ozone
- * domain model and the storage container domain model.
- */
-final class OzoneContainerTranslation {
-
-  /**
-   * Creates key data intended for reading a container key.
-   *
-   * @param containerName container name
-   * @param containerKey container key
-   * @return KeyData intended for reading the container key
-   */
-  public static KeyData containerKeyDataForRead(String containerName,
-      String containerKey) {
-    return KeyData
-        .newBuilder()
-        .setContainerName(containerName)
-        .setName(containerKey)
-        .build();
-  }
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private OzoneContainerTranslation() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
deleted file mode 100644
index 3857bd0..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import org.apache.hadoop.scm.storage.ChunkInputStream;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * OzoneInputStream is used to read data from Ozone.
- * It uses SCM's {@link ChunkInputStream} for reading the data.
- */
-public class OzoneInputStream extends InputStream {
-
-  private final InputStream inputStream;
-
-  /**
-   * Constructs OzoneInputStream with ChunkInputStream.
-   *
-   * @param inputStream
-   */
-  public OzoneInputStream(InputStream inputStream) {
-    this.inputStream = inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return inputStream.read();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    inputStream.close();
-  }
-
-  @Override
-  public int available() throws IOException {
-    return inputStream.available();
-  }
-
-  public InputStream getInputStream() {
-    return inputStream;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
deleted file mode 100644
index 5369220..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * OzoneOutputStream is used to write data into Ozone.
- * It uses SCM's {@link ChunkGroupOutputStream} for writing the data.
- */
-public class OzoneOutputStream extends OutputStream {
-
-  private final OutputStream outputStream;
-
-  /**
-   * Constructs OzoneOutputStream with ChunkGroupOutputStream.
-   *
-   * @param outputStream
-   */
-  public OzoneOutputStream(OutputStream outputStream) {
-    this.outputStream = outputStream;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    outputStream.write(b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    outputStream.write(b, off, len);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    outputStream.flush();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    //commitKey can be done here, if needed.
-    outputStream.close();
-  }
-
-  public OutputStream getOutputStream() {
-    return outputStream;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
deleted file mode 100644
index 493ece8..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.io;
-
-/**
- * This package contains Ozone I/O classes.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
deleted file mode 100644
index 7e2591a..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-/**
- * This package contains Ozone Client classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
deleted file mode 100644
index 64a970e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.protocol;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneQuota;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.ReplicationFactor;
-import org.apache.hadoop.ozone.client.ReplicationType;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * An implementer of this interface is capable of connecting to Ozone Cluster
- * and perform client operations. The protocol used for communication is
- * determined by the implementation class specified by
- * property <code>ozone.client.protocol</code>. The build-in implementation
- * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
- * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
- */
-public interface ClientProtocol {
-
-  /**
-   * Creates a new Volume.
-   * @param volumeName Name of the Volume
-   * @throws IOException
-   */
-  void createVolume(String volumeName)
-      throws IOException;
-
-  /**
-   * Creates a new Volume with properties set in VolumeArgs.
-   * @param volumeName Name of the Volume
-   * @param args Properties to be set for the Volume
-   * @throws IOException
-   */
-  void createVolume(String volumeName, VolumeArgs args)
-      throws IOException;
-
-  /**
-   * Sets the owner of volume.
-   * @param volumeName Name of the Volume
-   * @param owner to be set for the Volume
-   * @throws IOException
-   */
-  void setVolumeOwner(String volumeName, String owner) throws IOException;
-
-  /**
-   * Set Volume Quota.
-   * @param volumeName Name of the Volume
-   * @param quota Quota to be set for the Volume
-   * @throws IOException
-   */
-  void setVolumeQuota(String volumeName, OzoneQuota quota)
-      throws IOException;
-
-  /**
-   * Returns {@link OzoneVolume}.
-   * @param volumeName Name of the Volume
-   * @return {@link OzoneVolume}
-   * @throws IOException
-   * */
-  OzoneVolume getVolumeDetails(String volumeName)
-      throws IOException;
-
-  /**
-   * Checks if a Volume exists and the user with a role specified has access
-   * to the Volume.
-   * @param volumeName Name of the Volume
-   * @param acl requested acls which needs to be checked for access
-   * @return Boolean - True if the user with a role can access the volume.
-   * This is possible for owners of the volume and admin users
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
-      throws IOException;
-
-  /**
-   * Deletes an empty Volume.
-   * @param volumeName Name of the Volume
-   * @throws IOException
-   */
-  void deleteVolume(String volumeName) throws IOException;
-
-  /**
-   * Lists all volumes in the cluster that matches the volumePrefix,
-   * size of the returned list depends on maxListResult. If volume prefix
-   * is null, returns all the volumes. The caller has to make multiple calls
-   * to read all volumes.
-   *
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Starting point of the list, this volume is excluded
-   * @param maxListResult Max number of volumes to return.
-   * @return {@code List<OzoneVolume>}
-   * @throws IOException
-   */
-  List<OzoneVolume> listVolumes(String volumePrefix, String prevVolume,
-                                int maxListResult)
-      throws IOException;
-
-  /**
-   * Lists all volumes in the cluster that are owned by the specified
-   * user and matches the volumePrefix, size of the returned list depends on
-   * maxListResult. If the user is null, return volumes owned by current user.
-   * If volume prefix is null, returns all the volumes. The caller has to make
-   * multiple calls to read all volumes.
-   *
-   * @param user User Name
-   * @param volumePrefix Volume prefix to match
-   * @param prevVolume Starting point of the list, this volume is excluded
-   * @param maxListResult Max number of volumes to return.
-   * @return {@code List<OzoneVolume>}
-   * @throws IOException
-   */
-  List<OzoneVolume> listVolumes(String user, String volumePrefix,
-                                    String prevVolume, int maxListResult)
-      throws IOException;
-
-  /**
-   * Creates a new Bucket in the Volume.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void createBucket(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Creates a new Bucket in the Volume, with properties set in BucketArgs.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param bucketArgs Bucket Arguments
-   * @throws IOException
-   */
-  void createBucket(String volumeName, String bucketName,
-                    BucketArgs bucketArgs)
-      throws IOException;
-
-  /**
-   * Adds ACLs to the Bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param addAcls ACLs to be added
-   * @throws IOException
-   */
-  void addBucketAcls(String volumeName, String bucketName,
-                     List<OzoneAcl> addAcls)
-      throws IOException;
-
-  /**
-   * Removes ACLs from a Bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param removeAcls ACLs to be removed
-   * @throws IOException
-   */
-  void removeBucketAcls(String volumeName, String bucketName,
-                        List<OzoneAcl> removeAcls)
-      throws IOException;
-
-
-  /**
-   * Enables or disables Bucket Versioning.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param versioning True to enable Versioning, False to disable.
-   * @throws IOException
-   */
-  void setBucketVersioning(String volumeName, String bucketName,
-                           Boolean versioning)
-      throws IOException;
-
-  /**
-   * Sets the Storage Class of a Bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param storageType StorageType to be set
-   * @throws IOException
-   */
-  void setBucketStorageType(String volumeName, String bucketName,
-                            StorageType storageType)
-      throws IOException;
-
-  /**
-   * Deletes a bucket if it is empty.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * True if the bucket exists and user has read access
-   * to the bucket else throws Exception.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @throws IOException
-   */
-  void checkBucketAccess(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Returns {@link OzoneBucket}.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @return {@link OzoneBucket}
-   * @throws IOException
-   */
-  OzoneBucket getBucketDetails(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Returns the List of Buckets in the Volume that matches the bucketPrefix,
-   * size of the returned list depends on maxListResult. The caller has to make
-   * multiple calls to read all volumes.
-   * @param volumeName Name of the Volume
-   * @param bucketPrefix Bucket prefix to match
-   * @param prevBucket Starting point of the list, this bucket is excluded
-   * @param maxListResult Max number of buckets to return.
-   * @return {@code List<OzoneBucket>}
-   * @throws IOException
-   */
-  List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
-                                String prevBucket, int maxListResult)
-      throws IOException;
-
-  /**
-   * Writes a key in an existing bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @param size Size of the data
-   * @return {@link OzoneOutputStream}
-   *
-   */
-  OzoneOutputStream createKey(String volumeName, String bucketName,
-                              String keyName, long size, ReplicationType type,
-                              ReplicationFactor factor)
-      throws IOException;
-
-  /**
-   * Reads a key from an existing bucket.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @return {@link OzoneInputStream}
-   * @throws IOException
-   */
-  OzoneInputStream getKey(String volumeName, String bucketName, String keyName)
-      throws IOException;
-
-
-  /**
-   * Deletes an existing key.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @throws IOException
-   */
-  void deleteKey(String volumeName, String bucketName, String keyName)
-      throws IOException;
-
-
-  /**
-   * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix,
-   * size of the returned list depends on maxListResult. The caller has
-   * to make multiple calls to read all keys.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyPrefix Bucket prefix to match
-   * @param prevKey Starting point of the list, this key is excluded
-   * @param maxListResult Max number of buckets to return.
-   * @return {@code List<OzoneKey>}
-   * @throws IOException
-   */
-  List<OzoneKey> listKeys(String volumeName, String bucketName,
-                          String keyPrefix, String prevKey, int maxListResult)
-      throws IOException;
-
-
-  /**
-   * Get OzoneKey.
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Key name
-   * @return {@link OzoneKey}
-   * @throws IOException
-   */
-  OzoneKey getKeyDetails(String volumeName, String bucketName,
-                         String keyName)
-      throws IOException;
-
-  /**
-   * Close and release the resources.
-   */
-  void close() throws IOException;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
deleted file mode 100644
index f4890a1..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.protocol;
-
-/**
- * This package contains Ozone client protocol library classes.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
deleted file mode 100644
index 93b3417..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-
-import java.util.List;
-import java.util.Random;
-
-/**
- * Default selector randomly picks one of the REST Server from the list.
- */
-public class DefaultRestServerSelector implements RestServerSelector {
-
-  @Override
-  public ServiceInfo getRestServer(List<ServiceInfo> restServices) {
-    return restServices.get(
-        new Random().nextInt(restServices.size()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java
deleted file mode 100644
index 953e399..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-
-/**
- * Class the represents various errors returned by the
- * Ozone Layer.
- */
-@InterfaceAudience.Private
-public class OzoneException extends Exception {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(OzoneException.class);
-  private static final ObjectMapper MAPPER;
-
-  static {
-    MAPPER = new ObjectMapper();
-    MAPPER.setVisibility(
-        MAPPER.getSerializationConfig().getDefaultVisibilityChecker()
-            .withCreatorVisibility(JsonAutoDetect.Visibility.NONE)
-            .withFieldVisibility(JsonAutoDetect.Visibility.NONE)
-            .withGetterVisibility(JsonAutoDetect.Visibility.NONE)
-            .withIsGetterVisibility(JsonAutoDetect.Visibility.NONE)
-            .withSetterVisibility(JsonAutoDetect.Visibility.NONE));
-  }
-
-  @JsonProperty("httpCode")
-  private long httpCode;
-  @JsonProperty("shortMessage")
-  private String shortMessage;
-  @JsonProperty("resource")
-  private String resource;
-  @JsonProperty("message")
-  private String message;
-  @JsonProperty("requestID")
-  private String requestId;
-  @JsonProperty("hostName")
-  private String hostID;
-
-  /**
-   * Constructs a new exception with {@code null} as its detail message. The
-   * cause is not initialized, and may subsequently be initialized by a call
-   * to {@link #initCause}.
-   *
-   * This constructor is needed by Json Serializer.
-   */
-  public OzoneException() {
-  }
-
-
-  /**
-   * Constructor that allows a shortMessage and exception.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   * @param ex Exception
-   */
-  public OzoneException(long httpCode, String shortMessage, Exception ex) {
-    super(ex);
-    this.message = ex.getMessage();
-    this.shortMessage = shortMessage;
-    this.httpCode = httpCode;
-  }
-
-
-  /**
-   * Constructor that allows a shortMessage.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   */
-  public OzoneException(long httpCode, String shortMessage) {
-    this.shortMessage = shortMessage;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Constructor that allows a shortMessage and long message.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   * @param message long error message
-   */
-  public OzoneException(long httpCode, String shortMessage, String message) {
-    this.shortMessage = shortMessage;
-    this.message = message;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Constructor that allows a shortMessage, a long message and an exception.
-   *
-   * @param httpCode Error code
-   * @param shortMessage Short message
-   * @param message Long error message
-   * @param ex Exception
-   */
-  public OzoneException(long httpCode, String shortMessage,
-      String message, Exception ex) {
-    super(ex);
-    this.shortMessage = shortMessage;
-    this.message = message;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Returns the Resource that was involved in the stackTraceString.
-   *
-   * @return String
-   */
-  public String getResource() {
-    return resource;
-  }
-
-  /**
-   * Sets Resource.
-   *
-   * @param resourceName - Name of the Resource
-   */
-  public void setResource(String resourceName) {
-    this.resource = resourceName;
-  }
-
-  /**
-   * Gets a detailed message for the error.
-   *
-   * @return String
-   */
-  public String getMessage() {
-    return message;
-  }
-
-  /**
-   * Sets the error message.
-   *
-   * @param longMessage - Long message
-   */
-  public void setMessage(String longMessage) {
-    this.message = longMessage;
-  }
-
-  /**
-   * Returns request Id.
-   *
-   * @return String
-   */
-  public String getRequestId() {
-    return requestId;
-  }
-
-  /**
-   * Sets request ID.
-   *
-   * @param ozoneRequestId Request ID generated by the Server
-   */
-  public void setRequestId(String ozoneRequestId) {
-    this.requestId = ozoneRequestId;
-  }
-
-  /**
-   * Returns short error string.
-   *
-   * @return String
-   */
-  public String getShortMessage() {
-    return shortMessage;
-  }
-
-  /**
-   * Sets short error string.
-   *
-   * @param shortError Short Error Code
-   */
-  public void setShortMessage(String shortError) {
-    this.shortMessage = shortError;
-  }
-
-  /**
-   * Returns hostID.
-   *
-   * @return String
-   */
-  public String getHostID() {
-    return hostID;
-  }
-
-  /**
-   * Sets host ID.
-   *
-   * @param hostName host Name
-   */
-  public void setHostID(String hostName) {
-    this.hostID = hostName;
-  }
-
-  /**
-   * Returns http error code.
-   *
-   * @return long
-   */
-  public long getHttpCode() {
-    return httpCode;
-  }
-
-  /**
-   * Sets http status.
-   *
-   * @param httpStatus http error code.
-   */
-  public void setHttpCode(long httpStatus) {
-    this.httpCode = httpStatus;
-  }
-
-  /**
-   * Returns a Json String.
-   *
-   * @return JSON representation of the Error
-   */
-  public String toJsonString() {
-    try {
-      return MAPPER.writeValueAsString(this);
-    } catch (IOException ex) {
-      // TODO : Log this error on server side.
-    }
-    // TODO : Replace this with a JSON Object -- That represents this error.
-    return "500 Internal Server Error";
-  }
-
-  /**
-   * Parses an Exception record.
-   *
-   * @param jsonString - Exception in Json format.
-   *
-   * @return OzoneException Object
-   *
-   * @throws IOException
-   */
-  public static OzoneException parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java
deleted file mode 100644
index 6c479f7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-
-import javax.ws.rs.core.Response;
-import javax.ws.rs.ext.ExceptionMapper;
-
-import org.slf4j.MDC;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *  Class the represents various errors returned by the
- *  Object Layer.
- */
-public class OzoneExceptionMapper implements ExceptionMapper<OzoneException> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneExceptionMapper.class);
-
-  @Override
-  public Response toResponse(OzoneException exception) {
-    LOG.debug("Returning exception. ex: {}", exception.toJsonString());
-    MDC.clear();
-    return Response.status((int)exception.getHttpCode())
-      .entity(exception.toJsonString()).build();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
deleted file mode 100644
index c420ce5..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ /dev/null
@@ -1,799 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneQuota;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.ReplicationFactor;
-import org.apache.hadoop.ozone.client.ReplicationType;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpHeaders;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
-import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PipedInputStream;
-import java.io.PipedOutputStream;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.text.ParseException;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-
-/**
- * Ozone Client REST protocol implementation. It uses REST protocol to
- * connect to Ozone Handler that executes client calls.
- */
-public class RestClient implements ClientProtocol {
-
-  private static final String PATH_SEPARATOR = "/";
-  private static final Logger LOG = LoggerFactory.getLogger(RpcClient.class);
-
-  private final Configuration conf;
-  private final URI ozoneRestUri;
-  private final CloseableHttpClient httpClient;
-  private final UserGroupInformation ugi;
-  private final OzoneAcl.OzoneACLRights userRights;
-
-   /**
-    * Creates RestClient instance with the given configuration.
-    * @param conf Configuration
-    * @throws IOException
-    */
-  public RestClient(Configuration conf)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(conf);
-      this.conf = conf;
-
-      long socketTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      long connectionTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      int maxConnection = conf.getInt(
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX,
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT);
-
-      int maxConnectionPerRoute = conf.getInt(
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX,
-          OzoneConfigKeys
-              .OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT
-      );
-
-      /*
-      To make RestClient Thread safe, creating the HttpClient with
-      ThreadSafeClientConnManager.
-      */
-      PoolingHttpClientConnectionManager connManager =
-          new PoolingHttpClientConnectionManager();
-      connManager.setMaxTotal(maxConnection);
-      connManager.setDefaultMaxPerRoute(maxConnectionPerRoute);
-
-      this.httpClient = HttpClients.custom()
-          .setConnectionManager(connManager)
-          .setDefaultRequestConfig(
-              RequestConfig.custom()
-              .setSocketTimeout(Math.toIntExact(socketTimeout))
-                  .setConnectTimeout(Math.toIntExact(connectionTimeout))
-                  .build())
-          .build();
-      this.ugi = UserGroupInformation.getCurrentUser();
-      this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-          KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-
-      // TODO: Add new configuration parameter to configure RestServerSelector.
-      RestServerSelector defaultSelector = new DefaultRestServerSelector();
-      InetSocketAddress restServer = 
getOzoneRestServerAddress(defaultSelector);
-      URIBuilder uriBuilder = new URIBuilder()
-          .setScheme("http")
-          .setHost(restServer.getHostName())
-          .setPort(restServer.getPort());
-      this.ozoneRestUri = uriBuilder.build();
-
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  private InetSocketAddress getOzoneRestServerAddress(
-      RestServerSelector selector) throws IOException {
-    String httpAddress = conf.get(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY);
-
-    if (httpAddress == null) {
-      throw new IllegalArgumentException(
-          KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    HttpGet httpGet = new HttpGet("http://"; + httpAddress + "/serviceList");
-    HttpEntity entity = executeHttpRequest(httpGet);
-    try {
-      String serviceListJson = EntityUtils.toString(entity);
-
-      ObjectMapper objectMapper = new ObjectMapper();
-      TypeReference<List<ServiceInfo>> serviceInfoReference =
-          new TypeReference<List<ServiceInfo>>() {
-          };
-      List<ServiceInfo> services = objectMapper.readValue(
-          serviceListJson, serviceInfoReference);
-
-      List<ServiceInfo> dataNodeInfos = services.stream().filter(
-          a -> a.getNodeType().equals(OzoneProtos.NodeType.DATANODE))
-          .collect(Collectors.toList());
-
-      ServiceInfo restServer = selector.getRestServer(dataNodeInfos);
-
-      return NetUtils.createSocketAddr(restServer.getHostname() + ":" +
-          restServer.getPort(ServicePort.Type.HTTP));
-    } finally {
-      EntityUtils.consume(entity);
-    }
-  }
-
-  @Override
-  public void createVolume(String volumeName) throws IOException {
-    createVolume(volumeName, VolumeArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createVolume(String volumeName, VolumeArgs volArgs)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      String owner = volArgs.getOwner() == null ?
-          ugi.getUserName() : volArgs.getOwner();
-      //TODO: support for ACLs has to be done in OzoneHandler (rest server)
-      /**
-      List<OzoneAcl> listOfAcls = new ArrayList<>();
-      //User ACL
-      listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-          owner, userRights));
-      //ACLs from VolumeArgs
-      if(volArgs.getAcls() != null) {
-        listOfAcls.addAll(volArgs.getAcls());
-      }
-       */
-      builder.setPath(PATH_SEPARATOR + volumeName);
-
-      String quota = volArgs.getQuota();
-      if(quota != null) {
-        builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota);
-      }
-
-      HttpPost httpPost = new HttpPost(builder.build());
-      addOzoneHeaders(httpPost);
-      //use admin from VolumeArgs, if it's present
-      if(volArgs.getAdmin() != null) {
-        httpPost.removeHeaders(HttpHeaders.AUTHORIZATION);
-        httpPost.addHeader(HttpHeaders.AUTHORIZATION,
-            Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-                volArgs.getAdmin());
-      }
-      httpPost.addHeader(Header.OZONE_USER, owner);
-      LOG.info("Creating Volume: {}, with {} as owner and quota set to {}.",
-          volumeName, owner, quota == null ? "default" : quota);
-      EntityUtils.consume(executeHttpRequest(httpPost));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-
-  @Override
-  public void setVolumeOwner(String volumeName, String owner)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(owner);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-      httpPut.addHeader(Header.OZONE_USER, owner);
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setVolumeQuota(String volumeName, OzoneQuota quota)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(quota);
-      String quotaString = quota.toString();
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public OzoneVolume getVolumeDetails(String volumeName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_VOLUME);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      VolumeInfo volInfo =
-          VolumeInfo.parse(EntityUtils.toString(response));
-      //TODO: OzoneHandler in datanode has to be modified to send ACLs
-      OzoneVolume volume = new OzoneVolume(conf,
-          this,
-          volInfo.getVolumeName(),
-          volInfo.getCreatedBy(),
-          volInfo.getOwner().getName(),
-          volInfo.getQuota().sizeInBytes(),
-          OzoneClientUtils.formatDateTime(volInfo.getCreatedOn()),
-          null);
-      EntityUtils.consume(response);
-      return volume;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public void deleteVolume(String volumeName) throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String volumePrefix, String prevKey,
-                                       int maxListResult)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String user, String volumePrefix,
-                                       String prevKey, int maxListResult)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public void createBucket(String volumeName, String bucketName)
-      throws IOException {
-    createBucket(volumeName, bucketName, BucketArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createBucket(
-      String volumeName, String bucketName, BucketArgs bucketArgs)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(bucketArgs);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED;
-      if(bucketArgs.getVersioning() != null &&
-          bucketArgs.getVersioning()) {
-        versioning = OzoneConsts.Versioning.ENABLED;
-      }
-      StorageType storageType = bucketArgs.getStorageType() == null ?
-          StorageType.DEFAULT : bucketArgs.getStorageType();
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPost httpPost = new HttpPost(builder.build());
-      addOzoneHeaders(httpPost);
-
-      //ACLs from BucketArgs
-      if(bucketArgs.getAcls() != null) {
-        for (OzoneAcl acl : bucketArgs.getAcls()) {
-          httpPost.addHeader(
-              Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
-        }
-      }
-      httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
-      httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING,
-          versioning.toString());
-      LOG.info("Creating Bucket: {}/{}, with Versioning {} and Storage Type" +
-              " set to {}", volumeName, bucketName, versioning,
-          storageType);
-
-      EntityUtils.consume(executeHttpRequest(httpPost));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void addBucketAcls(
-      String volumeName, String bucketName, List<OzoneAcl> addAcls)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(addAcls);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      for (OzoneAcl acl : addAcls) {
-        httpPut.addHeader(
-            Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
-      }
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void removeBucketAcls(
-      String volumeName, String bucketName, List<OzoneAcl> removeAcls)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(removeAcls);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      for (OzoneAcl acl : removeAcls) {
-        httpPut.addHeader(
-            Header.OZONE_ACLS, Header.OZONE_ACL_REMOVE + " " + acl.toString());
-      }
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setBucketVersioning(
-      String volumeName, String bucketName, Boolean versioning)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(versioning);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      httpPut.addHeader(Header.OZONE_BUCKET_VERSIONING,
-          getBucketVersioning(versioning).toString());
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setBucketStorageType(
-      String volumeName, String bucketName, StorageType storageType)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(storageType);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      httpPut.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void deleteBucket(String volumeName, String bucketName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void checkBucketAccess(String volumeName, String bucketName)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public OzoneBucket getBucketDetails(String volumeName, String bucketName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_BUCKET);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      BucketInfo bucketInfo =
-          BucketInfo.parse(EntityUtils.toString(response));
-      OzoneBucket bucket = new OzoneBucket(conf,
-          this,
-          bucketInfo.getVolumeName(),
-          bucketInfo.getBucketName(),
-          bucketInfo.getAcls(),
-          bucketInfo.getStorageType(),
-          getBucketVersioningFlag(bucketInfo.getVersioning()),
-          OzoneClientUtils.formatDateTime(bucketInfo.getCreatedOn()));
-      EntityUtils.consume(response);
-      return bucket;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
-                                       String prevBucket, int maxListResult)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  /**
-   * Writes a key in an existing bucket.
-   *
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @param size Size of the data
-   * @param type
-   * @param factor @return {@link OzoneOutputStream}
-   */
-  @Override
-  public OzoneOutputStream createKey(
-      String volumeName, String bucketName, String keyName, long size,
-      ReplicationType type, ReplicationFactor factor)
-      throws IOException {
-    // TODO: Once ReplicationType and ReplicationFactor are supported in
-    // OzoneHandler (in Datanode), set them in header.
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName +
-          PATH_SEPARATOR + keyName);
-      HttpPut putRequest = new HttpPut(builder.build());
-      addOzoneHeaders(putRequest);
-      PipedInputStream in = new PipedInputStream();
-      OutputStream out = new PipedOutputStream(in);
-      putRequest.setEntity(new InputStreamEntity(in, size));
-      FutureTask<HttpEntity> futureTask =
-          new FutureTask<>(() -> executeHttpRequest(putRequest));
-      new Thread(futureTask).start();
-      OzoneOutputStream outputStream = new OzoneOutputStream(
-          new OutputStream() {
-            @Override
-            public void write(int b) throws IOException {
-              out.write(b);
-            }
-
-            @Override
-            public void close() throws IOException {
-              try {
-                out.close();
-                EntityUtils.consume(futureTask.get());
-              } catch (ExecutionException | InterruptedException e) {
-                throw new IOException(e);
-              }
-            }
-          });
-
-      return outputStream;
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public OzoneInputStream getKey(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName +
-          PATH_SEPARATOR + keyName);
-      HttpGet getRequest = new HttpGet(builder.build());
-      addOzoneHeaders(getRequest);
-      HttpEntity entity = executeHttpRequest(getRequest);
-      PipedInputStream in = new PipedInputStream();
-      OutputStream out = new PipedOutputStream(in);
-      FutureTask<Void> futureTask =
-          new FutureTask<>(() -> {
-            entity.writeTo(out);
-            out.close();
-            return null;
-          });
-      new Thread(futureTask).start();
-      OzoneInputStream inputStream = new OzoneInputStream(
-          new InputStream() {
-
-            @Override
-            public int read() throws IOException {
-              return in.read();
-            }
-
-            @Override
-            public void close() throws IOException {
-              in.close();
-              EntityUtils.consume(entity);
-            }
-          });
-
-      return inputStream;
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void deleteKey(String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneKey> listKeys(String volumeName, String bucketName,
-                                 String keyPrefix, String prevKey,
-                                 int maxListResult)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public OzoneKey getKeyDetails(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_KEY);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      KeyInfo keyInfo =
-          KeyInfo.parse(EntityUtils.toString(response));
-      OzoneKey key = new OzoneKey(volumeName,
-          bucketName,
-          keyInfo.getKeyName(),
-          keyInfo.getSize(),
-          OzoneClientUtils.formatDateTime(keyInfo.getCreatedOn()),
-          OzoneClientUtils.formatDateTime(keyInfo.getModifiedOn()));
-      EntityUtils.consume(response);
-      return key;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Adds Ozone headers to http request.
-   *
-   * @param httpRequest Http Request
-   */
-  private void addOzoneHeaders(HttpUriRequest httpRequest) {
-    httpRequest.addHeader(HttpHeaders.AUTHORIZATION,
-        Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-            ugi.getUserName());
-    httpRequest.addHeader(HttpHeaders.DATE,
-        OzoneClientUtils.formatDateTime(Time.monotonicNow()));
-    httpRequest.addHeader(Header.OZONE_VERSION_HEADER,
-        Header.OZONE_V1_VERSION_HEADER);
-  }
-
-  /**
-   * Sends the http request to server and returns the response HttpEntity.
-   * It's responsibility of the caller to consume and close response HttpEntity
-   * by calling {@code EntityUtils.consume}
-   *
-   * @param httpUriRequest http request
-   * @throws IOException
-   */
-  private HttpEntity executeHttpRequest(HttpUriRequest httpUriRequest)
-      throws IOException {
-    HttpResponse response = httpClient.execute(httpUriRequest);
-    int errorCode = response.getStatusLine().getStatusCode();
-    HttpEntity entity = response.getEntity();
-    if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-      return entity;
-    }
-    if (entity != null) {
-      throw new IOException(
-          OzoneException.parse(EntityUtils.toString(entity)));
-    } else {
-      throw new IOException("Unexpected null in http payload," +
-          " while processing request");
-    }
-  }
-
-  /**
-   * Converts OzoneConts.Versioning to boolean.
-   *
-   * @param version
-   * @return corresponding boolean value
-   */
-  private Boolean getBucketVersioningFlag(
-      OzoneConsts.Versioning version) {
-    if(version != null) {
-      switch(version) {
-      case ENABLED:
-        return true;
-      case NOT_DEFINED:
-      case DISABLED:
-      default:
-        return false;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Converts Bucket versioning flag into OzoneConts.Versioning.
-   *
-   * @param flag versioning flag
-   * @return corresponding OzoneConts.Versionin
-   */
-  private OzoneConsts.Versioning getBucketVersioning(Boolean flag) {
-    if(flag != null) {
-      if(flag) {
-        return OzoneConsts.Versioning.ENABLED;
-      } else {
-        return OzoneConsts.Versioning.DISABLED;
-      }
-    }
-    return OzoneConsts.Versioning.NOT_DEFINED;
-  }
-
-  @Override
-  public void close() throws IOException {
-    httpClient.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
deleted file mode 100644
index 54e219b..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-
-import java.util.List;
-
-/**
- * The implementor of this interface should select the REST server which will
- * be used by the client to connect to Ozone Cluster, given list of
- * REST Servers/DataNodes (DataNodes are the ones which hosts REST Service).
- */
-public interface RestServerSelector {
-
-  /**
-   * Returns the REST Service which will be used by the client for connection.
-   *
-   * @param restServices list of available REST servers
-   * @return ServiceInfo
-   */
-  ServiceInfo getRestServer(List<ServiceInfo> restServices);
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java
deleted file mode 100644
index 233e788..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rest.exceptions;
-
-/**
- * This package contains ozone rest client libraries.
- */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to