http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
new file mode 100644
index 0000000..3156eb2
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract test for ROOT directory operations.
+ */
+public class ITestOzoneContractRootDir extends
+    AbstractContractRootDirectoryTest {
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
new file mode 100644
index 0000000..c4bc0ff
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Ozone contract tests covering file seek.
+ */
+public class ITestOzoneContractSeek extends AbstractContractSeekTest {
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    OzoneContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    OzoneContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new OzoneContract(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
new file mode 100644
index 0000000..8417e46
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+/**
+ * The contract of Ozone: only enabled if the test bucket is provided.
+ */
+class OzoneContract extends AbstractFSContract {
+
+  private static MiniOzoneCluster cluster;
+  private static StorageHandler storageHandler;
+  private static final String CONTRACT_XML = "contract/ozone.xml";
+
+  OzoneContract(Configuration conf) {
+    super(conf);
+    //insert the base features
+    addConfResource(CONTRACT_XML);
+  }
+
+  @Override
+  public String getScheme() {
+    return OzoneConsts.OZONE_URI_SCHEME;
+  }
+
+  @Override
+  public Path getTestPath() {
+    Path path = new Path("/test");
+    return path;
+  }
+
+  public static void createCluster() throws IOException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.addResource(CONTRACT_XML);
+
+    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
+    try {
+      cluster.waitForClusterToBeReady();
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+  }
+
+  private void copyClusterConfigs(String configKey) {
+    getConf().set(configKey, cluster.getConf().get(configKey));
+  }
+
+  @Override
+  public FileSystem getTestFileSystem() throws IOException {
+    //assumes cluster is not null
+    Assert.assertNotNull("cluster not created", cluster);
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+
+    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    try {
+      storageHandler.createVolume(volumeArgs);
+      storageHandler.createBucket(bucketArgs);
+    } catch (OzoneException e) {
+      throw new IOException(e.getMessage());
+    }
+
+    String uri = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+    getConf().set("fs.defaultFS", uri);
+    copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY);
+    copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
+    return FileSystem.get(getConf());
+  }
+
+  public static void destroyCluster() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml 
b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml
new file mode 100644
index 0000000..fe2075c
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml
@@ -0,0 +1,113 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<configuration>
+  <!--
+  Ozone is a blobstore, with very different behavior than a classic filesystem.
+  -->
+
+    <property>
+        <name>fs.contract.test.root-tests-enabled</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.test.random-seek-count</name>
+        <value>10</value>
+    </property>
+
+    <property>
+        <name>fs.contract.is-blobstore</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.create-visibility-delayed</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.is-case-sensitive</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.rename-returns-false-if-source-missing</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-append</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-atomic-directory-delete</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-atomic-rename</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-block-locality</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-concat</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-getfilestatus</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-seek</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-seek-on-closed-file</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.rejects-seek-past-eof</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-strict-exceptions</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>fs.contract.supports-unix-permissions</name>
+        <value>false</value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties 
b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
new file mode 100644
index 0000000..3bf1619
--- /dev/null
+++ b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
@@ -0,0 +1,23 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} 
(%F:%M(%L)) - %m%n
+
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+
+# for debugging low level Ozone operations, uncomment this line
+# log4j.logger.org.apache.hadoop.ozone=DEBUG

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index cffef14..b655088 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -34,6 +34,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
     <module>client</module>
     <module>ozone-manager</module>
     <module>tools</module>
+    <module>ozonefs</module>
     <module>integration-test</module>
     <module>objectstore-service</module>
     <module>docs</module>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ed0187b..dfd1eac 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -586,7 +586,11 @@
         <artifactId>hadoop-ozone-docs</artifactId>
         <version>${hdds.version}</version>
       </dependency>
-
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-filesystem</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdds-common</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-ozone/pom.xml 
b/hadoop-tools/hadoop-ozone/pom.xml
deleted file mode 100644
index a7d0cfa..0000000
--- a/hadoop-tools/hadoop-ozone/pom.xml
+++ /dev/null
@@ -1,174 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0";
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project</artifactId>
-    <version>3.2.0-SNAPSHOT</version>
-    <relativePath>../../hadoop-project</relativePath>
-  </parent>
-  <artifactId>hadoop-ozone-filesystem</artifactId>
-  <name>Apache Hadoop Ozone FileSystem</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>deplist</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>list</goal>
-            </goals>
-            <configuration>
-              <!-- build a shellprofile -->
-              
<outputFile>${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt</outputFile>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-ozone-manager</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-objectstore-service</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-integration-test</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
deleted file mode 100644
index 832a0cb..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/Constants.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-/**
- * Constants for Ozone FileSystem implementation.
- */
-public final class Constants {
-
-  public static final String OZONE_DEFAULT_USER = "hdfs";
-
-  public static final String OZONE_USER_DIR = "/user";
-
-  /** Local buffer directory. */
-  public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir";
-
-  /** Temporary directory. */
-  public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir";
-
-  /** Page size for Ozone listing operation. */
-  public static final int LISTING_PAGE_SIZE = 1024;
-
-  private Constants() {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
deleted file mode 100644
index 4163c13..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-/**
- * ozone implementation of AbstractFileSystem.
- * This impl delegates to the OzoneFileSystem
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class OzFs extends DelegateToFileSystem {
-
-  public OzFs(URI theUri, Configuration conf)
-      throws IOException, URISyntaxException {
-    super(theUri, new OzoneFileSystem(), conf,
-        OzoneConsts.OZONE_URI_SCHEME, false);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
deleted file mode 100644
index 4c5c0c8..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * The input stream for Ozone file system.
- *
- * TODO: Make inputStream generic for both rest and rpc clients
- * This class is not thread safe.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class OzoneFSInputStream extends FSInputStream {
-
-  private final ChunkGroupInputStream inputStream;
-
-  public OzoneFSInputStream(InputStream inputStream) {
-    this.inputStream = (ChunkGroupInputStream)inputStream;
-  }
-
-  @Override
-  public int read() throws IOException {
-    return inputStream.read();
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    return inputStream.read(b, off, len);
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    inputStream.close();
-  }
-
-  @Override
-  public void seek(long pos) throws IOException {
-    inputStream.seek(pos);
-  }
-
-  @Override
-  public long getPos() throws IOException {
-    return inputStream.getPos();
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  @Override
-  public int available() throws IOException {
-    return inputStream.available();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
deleted file mode 100644
index faa3628..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
-
-
-/**
- * The output stream for Ozone file system.
- *
- * TODO: Make outputStream generic for both rest and rpc clients
- * This class is not thread safe.
- */
-public class OzoneFSOutputStream extends OutputStream {
-
-  private final ChunkGroupOutputStream outputStream;
-
-  public OzoneFSOutputStream(OutputStream outputStream) {
-    this.outputStream = (ChunkGroupOutputStream)outputStream;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    outputStream.write(b);
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    outputStream.write(b, off, len);
-  }
-
-  @Override
-  public synchronized void flush() throws IOException {
-    outputStream.flush();
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    outputStream.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
deleted file mode 100644
index 6906a9d..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.Iterator;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
-
-/**
- * The Ozone Filesystem implementation.
- *
- * This subclass is marked as private as code should not be creating it
- * directly; use {@link FileSystem#get(Configuration)} and variants to create
- * one. If cast to {@link OzoneFileSystem}, extra methods and features may be
- * accessed. Consider those private and unstable.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class OzoneFileSystem extends FileSystem {
-  static final Logger LOG = LoggerFactory.getLogger(OzoneFileSystem.class);
-
-  /** The Ozone client for connecting to Ozone server. */
-  private OzoneClient ozoneClient;
-  private ObjectStore objectStore;
-  private OzoneVolume volume;
-  private OzoneBucket bucket;
-  private URI uri;
-  private String userName;
-  private Path workingDir;
-  private ReplicationType replicationType;
-  private ReplicationFactor replicationFactor;
-
-  private static final Pattern URL_SCHEMA_PATTERN =
-      Pattern.compile("(.+)\\.([^\\.]+)");
-
-  @Override
-  public void initialize(URI name, Configuration conf) throws IOException {
-    super.initialize(name, conf);
-    setConf(conf);
-    Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name);
-    assert getScheme().equals(name.getScheme());
-
-    String authority = name.getAuthority();
-
-    Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
-
-    if (!matcher.matches()) {
-      throw new IllegalArgumentException("Ozone file system url should be "
-          + "in the form o3://bucket.volume");
-    }
-    String bucketStr = matcher.group(1);
-    String volumeStr = matcher.group(2);
-
-    try {
-      uri = new URIBuilder().setScheme(OZONE_URI_SCHEME)
-          .setHost(authority).build();
-      LOG.trace("Ozone URI for ozfs initialization is " + uri);
-      this.ozoneClient = OzoneClientFactory.getRpcClient(conf);
-      objectStore = ozoneClient.getObjectStore();
-      this.volume = objectStore.getVolume(volumeStr);
-      this.bucket = volume.getBucket(bucketStr);
-      this.replicationType = ReplicationType.valueOf(
-          conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
-              OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
-      this.replicationFactor = ReplicationFactor.valueOf(
-          conf.getInt(OzoneConfigKeys.OZONE_REPLICATION,
-              OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
-      try {
-        this.userName =
-            UserGroupInformation.getCurrentUser().getShortUserName();
-      } catch (IOException e) {
-        this.userName = OZONE_DEFAULT_USER;
-      }
-      this.workingDir = new Path(OZONE_USER_DIR, this.userName)
-              .makeQualified(this.uri, this.workingDir);
-    } catch (URISyntaxException ue) {
-      final String msg = "Invalid Ozone endpoint " + name;
-      LOG.error(msg, ue);
-      throw new IOException(msg, ue);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      ozoneClient.close();
-    } finally {
-      super.close();
-    }
-  }
-
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
-  @Override
-  public String getScheme() {
-    return OZONE_URI_SCHEME;
-  }
-
-  @Override
-  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    LOG.trace("open() path:{}", f);
-    final FileStatus fileStatus = getFileStatus(f);
-    final String key = pathToKey(f);
-    if (fileStatus.isDirectory()) {
-      throw new FileNotFoundException("Can't open directory " + f + " to 
read");
-    }
-
-    return new FSDataInputStream(
-        new OzoneFSInputStream(bucket.readKey(key).getInputStream()));
-  }
-
-  @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
-                                   boolean overwrite, int bufferSize,
-                                   short replication, long blockSize,
-                                   Progressable progress) throws IOException {
-    LOG.trace("create() path:{}", f);
-    final String key = pathToKey(f);
-    final FileStatus status;
-    try {
-      status = getFileStatus(f);
-      if (status.isDirectory()) {
-        throw new FileAlreadyExistsException(f + " is a directory");
-      } else {
-        if (!overwrite) {
-          // path references a file and overwrite is disabled
-          throw new FileAlreadyExistsException(f + " already exists");
-        }
-        LOG.trace("Overwriting file {}", f);
-        deleteObject(key);
-      }
-    } catch (FileNotFoundException ignored) {
-      // check if the parent directory needs to be created
-      Path parent = f.getParent();
-      try {
-        // create all the directories for the parent
-        FileStatus parentStatus = getFileStatus(parent);
-        LOG.trace("parent key:{} status:{}", key, parentStatus);
-      } catch (FileNotFoundException e) {
-        mkdirs(parent);
-      }
-      // This exception needs to ignored as this means that the file currently
-      // does not exists and a new file can thus be created.
-    }
-
-    OzoneOutputStream ozoneOutputStream =
-        bucket.createKey(key, 0, replicationType, replicationFactor);
-    // We pass null to FSDataOutputStream so it won't count writes that
-    // are being buffered to a file
-    return new FSDataOutputStream(
-        new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()), null);
-  }
-
-  @Override
-  public FSDataOutputStream createNonRecursive(Path path,
-      FsPermission permission,
-      EnumSet<CreateFlag> flags,
-      int bufferSize,
-      short replication,
-      long blockSize,
-      Progressable progress) throws IOException {
-    final Path parent = path.getParent();
-    if (parent != null) {
-      // expect this to raise an exception if there is no parent
-      if (!getFileStatus(parent).isDirectory()) {
-        throw new FileAlreadyExistsException("Not a directory: " + parent);
-      }
-    }
-    return create(path, permission, flags.contains(CreateFlag.OVERWRITE),
-        bufferSize, replication, blockSize, progress);
-  }
-
-  @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
-      Progressable progress) throws IOException {
-    throw new UnsupportedOperationException("append() Not implemented by the "
-        + getClass().getSimpleName() + " FileSystem implementation");
-  }
-
-  private class RenameIterator extends OzoneListingIterator {
-    private final String srcKey;
-    private final String dstKey;
-
-    RenameIterator(Path srcPath, Path dstPath)
-        throws IOException {
-      super(srcPath);
-      srcKey = pathToKey(srcPath);
-      dstKey = pathToKey(dstPath);
-      LOG.trace("rename from:{} to:{}", srcKey, dstKey);
-    }
-
-    boolean processKey(String key) throws IOException {
-      String newKeyName = dstKey.concat(key.substring(srcKey.length()));
-      bucket.renameKey(key, newKeyName);
-      return true;
-    }
-  }
-
-  /**
-   * Check whether the source and destination path are valid and then perform
-   * rename from source path to destination path.
-   *
-   * The rename operation is performed by renaming the keys with src as prefix.
-   * For such keys the prefix is changed from src to dst.
-   *
-   * @param src source path for rename
-   * @param dst destination path for rename
-   * @return true if rename operation succeeded or
-   * if the src and dst have the same path and are of the same type
-   * @throws IOException on I/O errors or if the src/dst paths are invalid.
-   */
-  @Override
-  public boolean rename(Path src, Path dst) throws IOException {
-    if (src.equals(dst)) {
-      return true;
-    }
-
-    LOG.trace("rename() from:{} to:{}", src, dst);
-    if (src.isRoot()) {
-      // Cannot rename root of file system
-      LOG.trace("Cannot rename the root of a filesystem");
-      return false;
-    }
-
-    // Cannot rename a directory to its own subdirectory
-    Path dstParent = dst.getParent();
-    while (dstParent != null && !src.equals(dstParent)) {
-      dstParent = dstParent.getParent();
-    }
-    Preconditions.checkArgument(dstParent == null,
-        "Cannot rename a directory to its own subdirectory");
-    // Check if the source exists
-    FileStatus srcStatus;
-    try {
-      srcStatus = getFileStatus(src);
-    } catch (FileNotFoundException fnfe) {
-      // source doesn't exist, return
-      return false;
-    }
-
-    // Check if the destination exists
-    FileStatus dstStatus;
-    try {
-      dstStatus = getFileStatus(dst);
-    } catch (FileNotFoundException fnde) {
-      dstStatus = null;
-    }
-
-    if (dstStatus == null) {
-      // If dst doesn't exist, check whether dst parent dir exists or not
-      // if the parent exists, the source can still be renamed to dst path
-      dstStatus = getFileStatus(dst.getParent());
-      if (!dstStatus.isDirectory()) {
-        throw new IOException(String.format(
-            "Failed to rename %s to %s, %s is a file", src, dst,
-            dst.getParent()));
-      }
-    } else {
-      // if dst exists and source and destination are same,
-      // check both the src and dst are of same type
-      if (srcStatus.getPath().equals(dstStatus.getPath())) {
-        return !srcStatus.isDirectory();
-      } else if (dstStatus.isDirectory()) {
-        // If dst is a directory, rename source as subpath of it.
-        // for example rename /source to /dst will lead to /dst/source
-        dst = new Path(dst, src.getName());
-        FileStatus[] statuses;
-        try {
-          statuses = listStatus(dst);
-        } catch (FileNotFoundException fnde) {
-          statuses = null;
-        }
-
-        if (statuses != null && statuses.length > 0) {
-          // If dst exists and not a directory not empty
-          throw new FileAlreadyExistsException(String.format(
-              "Failed to rename %s to %s, file already exists or not empty!",
-              src, dst));
-        }
-      } else {
-        // If dst is not a directory
-        throw new FileAlreadyExistsException(String.format(
-            "Failed to rename %s to %s, file already exists!", src, dst));
-      }
-    }
-
-    if (srcStatus.isDirectory()) {
-      if (dst.toString().startsWith(src.toString())) {
-        LOG.trace("Cannot rename a directory to a subdirectory of self");
-        return false;
-      }
-    }
-    RenameIterator iterator = new RenameIterator(src, dst);
-    return iterator.iterate();
-  }
-
-  private class DeleteIterator extends OzoneListingIterator {
-    private boolean recursive;
-    DeleteIterator(Path f, boolean recursive)
-        throws IOException {
-      super(f);
-      this.recursive = recursive;
-      if (getStatus().isDirectory()
-          && !this.recursive
-          && listStatus(f).length != 0) {
-        throw new PathIsNotEmptyDirectoryException(f.toString());
-      }
-    }
-
-    boolean processKey(String key) throws IOException {
-      if (key.equals("")) {
-        LOG.trace("Skipping deleting root directory");
-        return true;
-      } else {
-        LOG.trace("deleting key:" + key);
-        boolean succeed = deleteObject(key);
-        // if recursive delete is requested ignore the return value of
-        // deleteObject and issue deletes for other keys.
-        return recursive || succeed;
-      }
-    }
-  }
-
-  @Override
-  public boolean delete(Path f, boolean recursive) throws IOException {
-    LOG.trace("delete() path:{} recursive:{}", f, recursive);
-    try {
-      DeleteIterator iterator = new DeleteIterator(f, recursive);
-      return iterator.iterate();
-    } catch (FileNotFoundException e) {
-      LOG.debug("Couldn't delete {} - does not exist", f);
-      return false;
-    }
-  }
-
-  private class ListStatusIterator extends OzoneListingIterator {
-    private  List<FileStatus> statuses = new ArrayList<>(LISTING_PAGE_SIZE);
-    private Path f;
-
-    ListStatusIterator(Path f) throws IOException  {
-      super(f);
-      this.f = f;
-    }
-
-    boolean processKey(String key) throws IOException {
-      Path keyPath = new Path(OZONE_URI_DELIMITER + key);
-      if (key.equals(getPathKey())) {
-        if (pathIsDirectory()) {
-          return true;
-        } else {
-          statuses.add(getFileStatus(keyPath));
-          return true;
-        }
-      }
-      // left with only subkeys now
-      if (pathToKey(keyPath.getParent()).equals(pathToKey(f))) {
-        // skip keys which are for subdirectories of the directory
-        statuses.add(getFileStatus(keyPath));
-      }
-      return true;
-    }
-
-    FileStatus[] getStatuses() {
-      return statuses.toArray(new FileStatus[statuses.size()]);
-    }
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path f) throws IOException {
-    LOG.trace("listStatus() path:{}", f);
-    ListStatusIterator iterator = new ListStatusIterator(f);
-    iterator.iterate();
-    return iterator.getStatuses();
-  }
-
-  @Override
-  public void setWorkingDirectory(Path newDir) {
-    workingDir = newDir;
-  }
-
-  @Override
-  public Path getWorkingDirectory() {
-    return workingDir;
-  }
-
-  /**
-   * Check whether the path is valid and then create directories.
-   * Directory is represented using a key with no value.
-   * All the non-existent parent directories are also created.
-   *
-   * @param path directory path to be created
-   * @return true if directory exists or created successfully.
-   * @throws IOException
-   */
-  private boolean mkdir(Path path) throws IOException {
-    Path fPart = path;
-    Path prevfPart = null;
-    do {
-      LOG.trace("validating path:{}", fPart);
-      try {
-        FileStatus fileStatus = getFileStatus(fPart);
-        if (fileStatus.isDirectory()) {
-          // If path exists and a directory, exit
-          break;
-        } else {
-          // Found a file here, rollback and delete newly created directories
-          LOG.trace("Found a file with same name as directory, path:{}", 
fPart);
-          if (prevfPart != null) {
-            delete(prevfPart, true);
-          }
-          throw new FileAlreadyExistsException(String.format(
-              "Can't make directory for path '%s', it is a file.", fPart));
-        }
-      } catch (FileNotFoundException fnfe) {
-        LOG.trace("creating directory for fpart:{}", fPart);
-        String key = pathToKey(fPart);
-        String dirKey = addTrailingSlashIfNeeded(key);
-        if (!createDirectory(dirKey)) {
-          // Directory creation failed here,
-          // rollback and delete newly created directories
-          LOG.trace("Directory creation failed, path:{}", fPart);
-          if (prevfPart != null) {
-            delete(prevfPart, true);
-          }
-          return false;
-        }
-      }
-      prevfPart = fPart;
-      fPart = fPart.getParent();
-    } while (fPart != null);
-    return true;
-  }
-
-  @Override
-  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
-    LOG.trace("mkdir() path:{} ", f);
-    String key = pathToKey(f);
-    if (StringUtils.isEmpty(key)) {
-      return false;
-    }
-    return mkdir(f);
-  }
-
-  @Override
-  public FileStatus getFileStatus(Path f) throws IOException {
-    LOG.trace("getFileStatus() path:{}", f);
-    Path qualifiedPath = f.makeQualified(uri, workingDir);
-    String key = pathToKey(qualifiedPath);
-
-    if (key.length() == 0) {
-      return new FileStatus(0, true, 1, 0,
-          bucket.getCreationTime(), qualifiedPath);
-    }
-
-    // consider this a file and get key status
-    OzoneKey meta = getKeyInfo(key);
-    if (meta == null) {
-      key = addTrailingSlashIfNeeded(key);
-      meta = getKeyInfo(key);
-    }
-
-    if (meta == null) {
-      LOG.trace("File:{} not found", f);
-      throw new FileNotFoundException(f + ": No such file or directory!");
-    } else if (isDirectory(meta)) {
-      return new FileStatus(0, true, 1, 0,
-          meta.getModificationTime(), qualifiedPath);
-    } else {
-      //TODO: Fetch replication count from ratis config
-      return new FileStatus(meta.getDataSize(), false, 1,
-            getDefaultBlockSize(f), meta.getModificationTime(), qualifiedPath);
-    }
-  }
-
-  /**
-   * Helper method to fetch the key metadata info.
-   * @param key key whose metadata information needs to be fetched
-   * @return metadata info of the key
-   */
-  private OzoneKey getKeyInfo(String key) {
-    try {
-      return bucket.getKey(key);
-    } catch (IOException e) {
-      LOG.trace("Key:{} does not exists", key);
-      return null;
-    }
-  }
-
-  /**
-   * Helper method to check if an Ozone key is representing a directory.
-   * @param key key to be checked as a directory
-   * @return true if key is a directory, false otherwise
-   */
-  private boolean isDirectory(OzoneKey key) {
-    LOG.trace("key name:{} size:{}", key.getName(),
-        key.getDataSize());
-    return key.getName().endsWith(OZONE_URI_DELIMITER)
-        && (key.getDataSize() == 0);
-  }
-
-  /**
-   * Helper method to create an directory specified by key name in bucket.
-   * @param keyName key name to be created as directory
-   * @return true if the key is created, false otherwise
-   */
-  private boolean createDirectory(String keyName) {
-    try {
-      LOG.trace("creating dir for key:{}", keyName);
-      bucket.createKey(keyName, 0, replicationType, replicationFactor).close();
-      return true;
-    } catch (IOException ioe) {
-      LOG.error("create key failed for key:{}", keyName, ioe);
-      return false;
-    }
-  }
-
-  /**
-   * Helper method to delete an object specified by key name in bucket.
-   * @param keyName key name to be deleted
-   * @return true if the key is deleted, false otherwise
-   */
-  private boolean deleteObject(String keyName) {
-    LOG.trace("issuing delete for key" + keyName);
-    try {
-      bucket.deleteKey(keyName);
-      return true;
-    } catch (IOException ioe) {
-      LOG.error("delete key failed " + ioe.getMessage());
-      return false;
-    }
-  }
-
-  /**
-   * Turn a path (relative or otherwise) into an Ozone key.
-   *
-   * @param path the path of the file.
-   * @return the key of the object that represents the file.
-   */
-  public String pathToKey(Path path) {
-    Objects.requireNonNull(path, "Path can not be null!");
-    if (!path.isAbsolute()) {
-      path = new Path(workingDir, path);
-    }
-    // removing leading '/' char
-    String key = path.toUri().getPath().substring(1);
-    LOG.trace("path for key:{} is:{}", key, path);
-    return key;
-  }
-
-  /**
-   * Add trailing delimiter to path if it is already not present.
-   *
-   * @param key the ozone Key which needs to be appended
-   * @return delimiter appended key
-   */
-  private String addTrailingSlashIfNeeded(String key) {
-    if (StringUtils.isNotEmpty(key) && !key.endsWith(OZONE_URI_DELIMITER)) {
-      return key + OZONE_URI_DELIMITER;
-    } else {
-      return key;
-    }
-  }
-
-  @Override
-  public String toString() {
-    return "OzoneFileSystem{URI=" + uri + ", "
-        + "workingDir=" + workingDir + ", "
-        + "userName=" + userName + ", "
-        + "statistics=" + statistics
-        + "}";
-  }
-
-  private abstract class OzoneListingIterator {
-    private final Path path;
-    private final FileStatus status;
-    private String pathKey;
-    private Iterator<OzoneKey> keyIterator;
-
-    OzoneListingIterator(Path path)
-        throws IOException {
-      this.path = path;
-      this.status = getFileStatus(path);
-      this.pathKey = pathToKey(path);
-      if (status.isDirectory()) {
-        this.pathKey = addTrailingSlashIfNeeded(pathKey);
-      }
-      keyIterator = bucket.listKeys(pathKey);
-    }
-
-    abstract boolean processKey(String key) throws IOException;
-
-    // iterates all the keys in the particular path
-    boolean iterate() throws IOException {
-      LOG.trace("Iterating path {}", path);
-      if (status.isDirectory()) {
-        LOG.trace("Iterating directory:{}", pathKey);
-        while (keyIterator.hasNext()) {
-          OzoneKey key = keyIterator.next();
-          LOG.trace("iterating key:{}", key.getName());
-          if (!processKey(key.getName())) {
-            return false;
-          }
-        }
-        return true;
-      } else {
-        LOG.trace("iterating file:{}", path);
-        return processKey(pathKey);
-      }
-    }
-
-    String getPathKey() {
-      return pathKey;
-    }
-
-    boolean pathIsDirectory() {
-      return status.isDirectory();
-    }
-
-    FileStatus getStatus() {
-      return status;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
 
b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
deleted file mode 100644
index 93e82c3..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Ozone Filesystem.
- *
- * Except for the exceptions, it should all be hidden as implementation 
details.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
deleted file mode 100644
index ad21f28..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * Test OzoneFSInputStream by reading through multiple interfaces.
- */
-public class TestOzoneFSInputStream {
-  private static MiniOzoneCluster cluster = null;
-  private static FileSystem fs;
-  private static StorageHandler storageHandler;
-  private static Path filePath = null;
-  private static byte[] data = null;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 10);
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(10)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageHandler =
-        new ObjectStoreHandler(conf).getStorageHandler();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    // Fetch the host and port for File System init
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-    int port = datanodeDetails
-        .getPort(DatanodeDetails.Port.Name.REST).getValue();
-    String host = datanodeDetails.getHostName();
-
-    // Set the fs.defaultFS and start the filesystem
-    String uri = String.format("%s://%s.%s/",
-        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
-    fs =  FileSystem.get(conf);
-    int fileLen = 100 * 1024 * 1024;
-    data = DFSUtil.string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
-    filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
-    try (FSDataOutputStream stream = fs.create(filePath)) {
-      stream.write(data);
-    }
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() throws IOException {
-    fs.close();
-    storageHandler.close();
-    cluster.shutdown();
-  }
-
-  @Test
-  public void testO3FSSingleByteRead() throws IOException {
-    FSDataInputStream inputStream = fs.open(filePath);
-    byte[] value = new byte[data.length];
-    int i = 0;
-    while(true) {
-      int val = inputStream.read();
-      if (val == -1) {
-        break;
-      }
-      value[i] = (byte)val;
-      Assert.assertEquals("value mismatch at:" + i, value[i], data[i]);
-      i++;
-    }
-    Assert.assertEquals(i, data.length);
-    Assert.assertTrue(Arrays.equals(value, data));
-    inputStream.close();
-  }
-
-  @Test
-  public void testO3FSMultiByteRead() throws IOException {
-    FSDataInputStream inputStream = fs.open(filePath);
-    byte[] value = new byte[data.length];
-    byte[] tmp = new byte[1* 1024 *1024];
-    int i = 0;
-    while(true) {
-      int val = inputStream.read(tmp);
-      if (val == -1) {
-        break;
-      }
-      System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length);
-      i++;
-    }
-    Assert.assertEquals(i * tmp.length, data.length);
-    Assert.assertTrue(Arrays.equals(value, data));
-    inputStream.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
deleted file mode 100644
index a225702..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.junit.After;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-
-import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test OzoneFileSystem Interfaces.
- *
- * This test will test the various interfaces i.e.
- * create, read, write, getFileStatus
- */
-@RunWith(Parameterized.class)
-public class TestOzoneFileInterfaces {
-
-  private String rootPath;
-  private String userName;
-
-  /**
-   * Parameter class to set absolute url/defaultFS handling.
-   * <p>
-   * Hadoop file systems could be used in multiple ways: Using the defaultfs
-   * and file path without the schema, or use absolute url-s even with
-   * different defaultFS. This parameter matrix would test both the use cases.
-   */
-  @Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {{false, true}, {true, false}});
-  }
-
-  private boolean setDefaultFs;
-
-  private boolean useAbsolutePath;
-
-  private static MiniOzoneCluster cluster = null;
-
-  private static FileSystem fs;
-
-  private static StorageHandler storageHandler;
-
-  public TestOzoneFileInterfaces(boolean setDefaultFs,
-      boolean useAbsolutePath) {
-    this.setDefaultFs = setDefaultFs;
-    this.useAbsolutePath = useAbsolutePath;
-  }
-
-  @Before
-  public void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageHandler =
-        new ObjectStoreHandler(conf).getStorageHandler();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    rootPath = String
-        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
-            volumeName);
-    if (setDefaultFs) {
-      // Set the fs.defaultFS and start the filesystem
-      conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-      fs = FileSystem.get(conf);
-    } else {
-      fs = FileSystem.get(new URI(rootPath + "/test.txt"), conf);
-    }
-  }
-
-  @After
-  public void teardown() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.closeQuietly(fs);
-    IOUtils.closeQuietly(storageHandler);
-  }
-
-  @Test
-  public void testFileSystemInit() throws IOException {
-    if (setDefaultFs) {
-      assertTrue(
-          "The initialized file system is not OzoneFileSystem but " +
-              fs.getClass(),
-          fs instanceof OzoneFileSystem);
-      assertEquals(OzoneConsts.OZONE_URI_SCHEME, fs.getUri().getScheme());
-    }
-  }
-
-  @Test
-  public void testOzFsReadWrite() throws IOException {
-    long currentTime = Time.now();
-    int stringLen = 20;
-    String data = RandomStringUtils.randomAlphanumeric(stringLen);
-    String filePath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + filePath);
-    try (FSDataOutputStream stream = fs.create(path)) {
-      stream.writeBytes(data);
-    }
-
-    FileStatus status = fs.getFileStatus(path);
-    // The timestamp of the newly created file should always be greater than
-    // the time when the test was started
-    assertTrue("Modification time has not been recorded: " + status,
-        status.getModificationTime() > currentTime);
-
-    try (FSDataInputStream inputStream = fs.open(path)) {
-      byte[] buffer = new byte[stringLen];
-      inputStream.readFully(0, buffer);
-      String out = new String(buffer, 0, buffer.length);
-      assertEquals(data, out);
-    }
-  }
-
-
-  @Test
-  public void testDirectory() throws IOException {
-    String dirPath = RandomStringUtils.randomAlphanumeric(5);
-    Path path = createPath("/" + dirPath);
-    assertTrue("Makedirs returned with false for the path " + path,
-        fs.mkdirs(path));
-
-    FileStatus status = fs.getFileStatus(path);
-    assertTrue("The created path is not directory.", status.isDirectory());
-
-    assertEquals(0, status.getLen());
-
-    FileStatus[] statusList = fs.listStatus(createPath("/"));
-    assertEquals(1, statusList.length);
-    assertEquals(status, statusList[0]);
-
-    FileStatus statusRoot = fs.getFileStatus(createPath("/"));
-    assertTrue("Root dir (/) is not a directory.", status.isDirectory());
-    assertEquals(0, status.getLen());
-
-
-  }
-
-  @Test
-  public void testPathToKey() throws Exception {
-    OzoneFileSystem ozoneFs = (OzoneFileSystem) TestOzoneFileInterfaces.fs;
-
-    assertEquals("a/b/1", ozoneFs.pathToKey(new Path("/a/b/1")));
-
-    assertEquals("user/" + getCurrentUser() + "/key1/key2",
-        ozoneFs.pathToKey(new Path("key1/key2")));
-
-    assertEquals("key1/key2",
-        ozoneFs.pathToKey(new Path("o3://test1/key1/key2")));
-  }
-
-  private String getCurrentUser() {
-    try {
-      return UserGroupInformation.getCurrentUser().getShortUserName();
-    } catch (IOException e) {
-      return OZONE_DEFAULT_USER;
-    }
-  }
-
-  private Path createPath(String relativePath) {
-    if (useAbsolutePath) {
-      return new Path(
-          rootPath + (relativePath.startsWith("/") ? "" : "/") + relativePath);
-    } else {
-      return new Path(relativePath);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
deleted file mode 100644
index dd54315..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests creating files.
- */
-public class ITestOzoneContractCreate extends AbstractContractCreateTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
deleted file mode 100644
index f0a3d8d..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering deletes.
- */
-public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
deleted file mode 100644
index 134a9ad..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-
-/**
- * Contract test suite covering S3A integration with DistCp.
- * Uses the block output stream, buffered to disk. This is the
- * recommended output mechanism for DistCP due to its scalability.
- */
-public class ITestOzoneContractDistCp extends AbstractContractDistCpTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected OzoneContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
deleted file mode 100644
index 98bbb14..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering getFileStatus.
- */
-public class ITestOzoneContractGetFileStatus
-    extends AbstractContractGetFileStatusTest {
-
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-
-  @Override
-  public void teardown() throws Exception {
-    getLog().info("FS details {}", getFileSystem());
-    super.teardown();
-  }
-
-  @Override
-  protected Configuration createConfiguration() {
-    return super.createConfiguration();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
deleted file mode 100644
index bc0de5d..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Test dir operations on Ozone.
- */
-public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
deleted file mode 100644
index 0bc57d4..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests opening files.
- */
-public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e16e5b30/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
 
b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
deleted file mode 100644
index 8ce1d1b..0000000
--- 
a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.io.IOException;
-
-/**
- * Ozone contract tests covering rename.
- */
-public class ITestOzoneContractRename extends AbstractContractRenameTest {
-
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws IOException {
-    OzoneContract.destroyCluster();
-  }
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new OzoneContract(conf);
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to