http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
index ea83502..ca09050 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
@@ -43,8 +43,7 @@ public class VersionInfo {
     String versionInfoFile = component + "-version-info.properties";
     InputStream is = null;
     try {
-      is = ThreadUtil.getResourceAsStream(VersionInfo.class.getClassLoader(),
-          versionInfoFile);
+      is = ThreadUtil.getResourceAsStream(versionInfoFile);
       info.load(is);
     } catch (IOException ex) {
       LoggerFactory.getLogger(getClass()).warn("Could not read '" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj 
b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
index ac3767b..1119069 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
@@ -71,7 +71,6 @@
   <PropertyGroup>
     <SnappyLib 
Condition="Exists('$(CustomSnappyPrefix)\snappy.dll')">$(CustomSnappyPrefix)</SnappyLib>
     <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\lib\snappy.dll') And 
'$(SnappyLib)' == ''">$(CustomSnappyPrefix)\lib</SnappyLib>
-    <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\bin\snappy.dll') And 
'$(SnappyLib)' == ''">$(CustomSnappyPrefix)\bin</SnappyLib>
     <SnappyLib Condition="Exists('$(CustomSnappyLib)') And '$(SnappyLib)' == 
''">$(CustomSnappyLib)</SnappyLib>
     <SnappyInclude 
Condition="Exists('$(CustomSnappyPrefix)\snappy.h')">$(CustomSnappyPrefix)</SnappyInclude>
     <SnappyInclude Condition="Exists('$(CustomSnappyPrefix)\include\snappy.h') 
And '$(SnappyInclude)' == ''">$(CustomSnappyPrefix)\include</SnappyInclude>
@@ -83,7 +82,6 @@
   <PropertyGroup>
     <IsalLib 
Condition="Exists('$(CustomIsalPrefix)\isa-l.dll')">$(CustomIsalPrefix)</IsalLib>
     <IsalLib Condition="Exists('$(CustomIsalPrefix)\lib\isa-l.dll') And 
'$(IsalLib)' == ''">$(CustomIsalPrefix)\lib</IsalLib>
-    <IsalLib Condition="Exists('$(CustomIsalPrefix)\bin\isa-l.dll') And 
'$(IsalLib)' == ''">$(CustomIsalPrefix)\bin</IsalLib>
     <IsalLib Condition="Exists('$(CustomIsalLib)') And '$(IsalLib)' == 
''">$(CustomIsalLib)</IsalLib>
     <IsalEnabled Condition="'$(IsalLib)' != ''">true</IsalEnabled>
   </PropertyGroup>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
index abff7ea..c7984a3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
@@ -27,12 +27,8 @@
 #ifdef UNIX
 static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void);
 static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-static int (*dlsym_EVP_CIPHER_CTX_reset)(EVP_CIPHER_CTX *);
-#else
 static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
 static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
-#endif
 static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
 static int (*dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
 static int (*dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
@@ -127,16 +123,10 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
                       "EVP_CIPHER_CTX_new");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_free, env, openssl,  \
                       "EVP_CIPHER_CTX_free");
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_reset, env, openssl,  \
-                      "EVP_CIPHER_CTX_reset");
-#else
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_cleanup, env, openssl,  \
                       "EVP_CIPHER_CTX_cleanup");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_init, env, openssl,  \
                       "EVP_CIPHER_CTX_init");
-#endif
-
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl,  \
                       "EVP_CIPHER_CTX_set_padding");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl,  \
@@ -281,11 +271,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_init
   (*env)->ReleaseByteArrayElements(env, key, jKey, 0);
   (*env)->ReleaseByteArrayElements(env, iv, jIv, 0);
   if (rc == 0) {
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-    dlsym_EVP_CIPHER_CTX_reset(context);
-#else
     dlsym_EVP_CIPHER_CTX_cleanup(context);
-#endif
     THROW(env, "java/lang/InternalError", "Error in EVP_CipherInit_ex.");
     return (jlong)0;
   }
@@ -348,11 +334,7 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_update
   int output_len = 0;
   if (!dlsym_EVP_CipherUpdate(context, output_bytes, &output_len,  \
       input_bytes, input_len)) {
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-    dlsym_EVP_CIPHER_CTX_reset(context);
-#else
     dlsym_EVP_CIPHER_CTX_cleanup(context);
-#endif
     THROW(env, "java/lang/InternalError", "Error in EVP_CipherUpdate.");
     return 0;
   }
@@ -394,11 +376,7 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_doFinal
   
   int output_len = 0;
   if (!dlsym_EVP_CipherFinal_ex(context, output_bytes, &output_len)) {
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-    dlsym_EVP_CIPHER_CTX_reset(context);
-#else
     dlsym_EVP_CIPHER_CTX_cleanup(context);
-#endif
     THROW(env, "java/lang/InternalError", "Error in EVP_CipherFinal_ex.");
     return 0;
   }
@@ -418,16 +396,6 @@ JNIEXPORT jstring JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_getLibrary
     (JNIEnv *env, jclass clazz) 
 {
 #ifdef UNIX
-#if OPENSSL_API_COMPAT < 0x10100000L && OPENSSL_VERSION_NUMBER >= 0x10100000L
-  if (dlsym_EVP_CIPHER_CTX_reset) {
-    Dl_info dl_info;
-    if(dladdr(
-        dlsym_EVP_CIPHER_CTX_reset,
-        &dl_info)) {
-      return (*env)->NewStringUTF(env, dl_info.dli_fname);
-    }
-  }
-#else
   if (dlsym_EVP_CIPHER_CTX_init) {
     Dl_info dl_info;
     if(dladdr(
@@ -436,7 +404,6 @@ JNIEXPORT jstring JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_getLibrary
       return (*env)->NewStringUTF(env, dl_info.dli_fname);
     }
   }
-#endif
 
   return (*env)->NewStringUTF(env, HADOOP_OPENSSL_LIBRARY);
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 8f54464..f1d68cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.conf;
 
-import java.io.BufferedInputStream;
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -2420,34 +2419,4 @@ public class TestConfiguration {
       System.setOut(output);
     }
   }
-
-  /**
-   * Test race conditions between clone() and getProps().
-   * Test for race conditions in the way Hadoop handles the Configuration
-   * class. The scenario is the following. Let's assume that there are two
-   * threads sharing the same Configuration class. One adds some resources
-   * to the configuration, while the other one clones it. Resources are
-   * loaded lazily in a deferred call to loadResources(). If the cloning
-   * happens after adding the resources but before parsing them, some temporary
-   * resources like input stream pointers are cloned. Eventually both copies
-   * will load the same input stream resources.
-   * One parses the input stream XML and closes it updating it's own copy of
-   * the resource. The other one has another pointer to the same input stream.
-   * When it tries to load it, it will crash with a stream closed exception.
-   */
-  @Test
-  public void testResourceRace() {
-    InputStream is =
-        new BufferedInputStream(new ByteArrayInputStream(
-            "<configuration></configuration>".getBytes()));
-    Configuration config = new Configuration();
-    // Thread 1
-    config.addResource(is);
-    // Thread 2
-    Configuration confClone = new Configuration(conf);
-    // Thread 2
-    confClone.get("firstParse");
-    // Thread 1
-    config.get("secondParse");
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
index 1f37f74..e3a4a12 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
@@ -43,7 +43,7 @@ public final class FileContextTestHelper {
    * Create a context with test root relative to the test directory
    */
   public FileContextTestHelper() {
-    this(GenericTestUtils.getRandomizedTestDir().getPath());
+    this(GenericTestUtils.getRandomizedTestDir().getAbsolutePath());
   }
 
   /**
@@ -83,7 +83,7 @@ public final class FileContextTestHelper {
         absTestRootDir = testRootDir;
       } else {
         absTestRootDir = fc.getWorkingDirectory().toString() + "/"
-            + new Path(testRootDir).toUri();
+            + testRootDir;
       }
     }
     return absTestRootDir;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 54d015a..d2cbca0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -228,9 +228,9 @@ public class ContractTestUtils extends Assert {
   public static void verifyFileContents(FileSystem fs,
                                         Path path,
                                         byte[] original) throws IOException {
-    assertIsFile(fs, path);
     FileStatus stat = fs.getFileStatus(path);
     String statText = stat.toString();
+    assertTrue("not a file " + statText, stat.isFile());
     assertEquals("wrong length " + statText, original.length, stat.getLen());
     byte[] bytes = readDataset(fs, path, original.length);
     compareByteArrays(original, bytes, original.length);
@@ -854,36 +854,6 @@ public class ContractTestUtils extends Assert {
   }
 
   /**
-   * Assert that a varargs list of paths exist.
-   * @param fs filesystem
-   * @param message message for exceptions
-   * @param paths paths
-   * @throws IOException IO failure
-   */
-  public static void assertPathsExist(FileSystem fs,
-      String message,
-      Path... paths) throws IOException {
-    for (Path path : paths) {
-      assertPathExists(fs, message, path);
-    }
-  }
-
-  /**
-   * Assert that a varargs list of paths do not exist.
-   * @param fs filesystem
-   * @param message message for exceptions
-   * @param paths paths
-   * @throws IOException IO failure
-   */
-  public static void assertPathsDoNotExist(FileSystem fs,
-      String message,
-      Path... paths) throws IOException {
-    for (Path path : paths) {
-      assertPathDoesNotExist(fs, message, path);
-    }
-  }
-
-  /**
    * Create a dataset for use in the tests; all data is in the range
    * base to (base+modulo-1) inclusive.
    * @param len length of data

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 53eb2be..61b0271 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -44,7 +44,6 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Appender;
@@ -249,7 +248,7 @@ public abstract class GenericTestUtils {
    * @return the absolute directory for tests. Caller is expected to create it.
    */
   public static File getRandomizedTestDir() {
-    return new File(getRandomizedTempPath());
+    return new File(getRandomizedTempPath()).getAbsoluteFile();
   }
 
   /**
@@ -260,9 +259,7 @@ public abstract class GenericTestUtils {
    * @return a string to use in paths
    */
   public static String getTempPath(String subpath) {
-    String prop = (Path.WINDOWS) ? DEFAULT_TEST_DATA_PATH
-        : System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_PATH);
-
+    String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, 
DEFAULT_TEST_DATA_PATH);
     if (prop.isEmpty()) {
       // corner case: property is there but empty
       prop = DEFAULT_TEST_DATA_PATH;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..1817a13 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -28,7 +28,6 @@ import javax.servlet.ServletContextListener;
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.Meter;
 import com.codahale.metrics.MetricRegistry;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.CachingKeyProvider;
@@ -160,11 +159,6 @@ public class KMSWebApp implements ServletContextListener {
       }
       KeyProvider keyProvider =
           KeyProviderFactory.get(new URI(providerString), kmsConf);
-      Preconditions.checkNotNull(keyProvider, String.format("No" +
-              " KeyProvider has been initialized, please" +
-              " check whether %s '%s' is configured correctly in" +
-              " kms-site.xml.", KMSConfiguration.KEY_PROVIDER_URI,
-          providerString));
       if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE,
           KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
         long keyTimeOutMillis =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 0875328..af7b540 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2910,7 +2910,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
    * @param num Number of threads for hedged reads thread pool.
    * If zero, skip hedged reads thread pool creation.
    */
-  private static synchronized void initThreadsNumForHedgedReads(int num) {
+  private synchronized void initThreadsNumForHedgedReads(int num) {
     if (num <= 0 || HEDGED_READ_THREAD_POOL != null) return;
     HEDGED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
         TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 52d9257..fa0e174 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -31,11 +31,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   <properties>
     <require.fuse>false</require.fuse>
-    <require.libwebhdfs>false</require.libwebhdfs>
-    <require.valgrind>false</require.valgrind>
-    <native_ctest_args></native_ctest_args>
-    <native_cmake_args></native_cmake_args>
-    <native_make_args></native_make_args>
     <hadoop.component>hdfs</hadoop.component>
   </properties>
 
@@ -90,7 +85,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
             <exclude>src/main/native/config/*</exclude>
             <exclude>src/main/native/m4/*</exclude>
             <exclude>src/main/native/fuse-dfs/util/tree.h</exclude>
-            <exclude>src/main/native/libhdfspp/third_party/**</exclude>
             <exclude>src/contrib/**</exclude>
           </excludes>
         </configuration>
@@ -144,16 +138,17 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
                 </goals>
                 <configuration>
                   <target>
+                    <condition property="generator" value="Visual Studio 10" 
else="Visual Studio 10 Win64">
+                      <equals arg1="Win32" arg2="${env.PLATFORM}" />
+                    </condition>
                     <mkdir dir="${project.build.directory}/native"/>
                     <exec executable="cmake" 
dir="${project.build.directory}/native"
                           failonerror="true">
-                      <arg line="${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DHADOOP_BUILD=1 
-DREQUIRE_FUSE=${require.fuse} -DREQUIRE_VALGRIND=${require.valgrind} -A 
'${env.PLATFORM}'"/>
-                      <arg line="${native_cmake_args}"/>
+                      <arg line="${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_FUSE=${require.fuse} -G 
'${generator}'"/>
                     </exec>
                     <exec executable="msbuild" 
dir="${project.build.directory}/native"
                           failonerror="true">
                       <arg line="ALL_BUILD.vcxproj /nologo 
/p:Configuration=RelWithDebInfo /p:LinkIncremental=false"/>
-                      <arg line="${native_make_args}"/>
                     </exec>
                     <!-- Copy for inclusion in distribution. -->
                     <copy todir="${project.build.directory}/bin">
@@ -172,15 +167,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
                     <property name="compile_classpath" 
refid="maven.compile.classpath"/>
                     <property name="test_classpath" 
refid="maven.test.classpath"/>
                     <exec executable="ctest" failonerror="true" 
dir="${project.build.directory}/native">
-                      <arg line="--output-on-failure"/>
-                      <arg line="${native_ctest_args}"/>
                       <env key="CLASSPATH" 
value="${test_classpath}:${compile_classpath}"/>
                       <!-- HADOOP_HOME required to find winutils. -->
                       <env key="HADOOP_HOME" 
value="${hadoop.common.build.dir}"/>
                       <!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
                       <env key="PATH" 
value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
-                      <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
-                      <env key="LD_LIBRARY_PATH" 
value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
                     </exec>
                   </target>
                 </configuration>
@@ -201,90 +192,31 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <build>
         <plugins>
           <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-maven-plugins</artifactId>
             <executions>
               <execution>
-                <id>make</id>
+                <id>cmake-compile</id>
                 <phase>compile</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}"/>
-                    <exec executable="cmake" dir="${project.build.directory}" 
failonerror="true">
-                      <arg line="${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}  -DHADOOP_BUILD=1 
-DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} 
-DREQUIRE_VALGRIND=${require.valgrind} "/>
-                      <arg line="${native_cmake_args}"/>
-                    </exec>
-                    <exec executable="make" dir="${project.build.directory}" 
failonerror="true">
-                      <arg line="${native_make_args}"/>
-                    </exec>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>native_tests</id>
-                <phase>test</phase>
-                <goals><goal>run</goal></goals>
+                <goals><goal>cmake-compile</goal></goals>
                 <configuration>
-                  <skip>${skipTests}</skip>
-                  <target>
-                    <property name="compile_classpath" 
refid="maven.compile.classpath"/>
-                    <property name="test_classpath" 
refid="maven.test.classpath"/>
-                    <exec executable="ctest" failonerror="true" 
dir="${project.build.directory}/">
-                      <arg line="--output-on-failure"/>
-                      <arg line="${native_ctest_args}"/>
-                      <env key="CLASSPATH" 
value="${test_classpath}:${compile_classpath}"/>
-                      <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
-                      <env key="LD_LIBRARY_PATH" 
value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
-                    </exec>
-                  </target>
+                  <source>${basedir}/src</source>
+                  <vars>
+                    
<GENERATED_JAVAH>${project.build.directory}/native/javah</GENERATED_JAVAH>
+                    
<JVM_ARCH_DATA_MODEL>${sun.arch.data.model}</JVM_ARCH_DATA_MODEL>
+                    <REQUIRE_FUSE>${require.fuse}</REQUIRE_FUSE>
+                  </vars>
+                  <output>${project.build.directory}</output>
                 </configuration>
               </execution>
             </executions>
           </plugin>
-        </plugins>
-      </build>
-    </profile>
-    <profile>
-      <id>test-patch</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <properties>
-        <runningWithNative>true</runningWithNative>
-      </properties>
-      <build>
-        <plugins>
           <plugin>
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>make_altern</id>
-                <phase>compile</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}/altern"/>
-                    <condition property="c_compiler" value="clang" else="gcc">
-                      <contains string="${env.CC}" substring="gcc"/>
-                    </condition>
-                    <condition property="cxx_compiler" value="clang++" 
else="g++">
-                      <contains string="${env.CXX}" substring="g++"/>
-                    </condition>
-                    <exec executable="cmake" 
dir="${project.build.directory}/altern" failonerror="true">
-                      <arg line="${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/altern/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}  -DHADOOP_BUILD=1 
-DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} 
-DREQUIRE_VALGRIND=${require.valgrind} "/>
-                      <arg line="-DCMAKE_C_COMPILER=${c_compiler} 
-DCMAKE_CXX_COMPILER=${cxx_compiler}"/>
-                      <arg line="${native_cmake_args}"/>
-                    </exec>
-                    <exec executable="make" 
dir="${project.build.directory}/altern" failonerror="true">
-                      <arg line="${native_make_args}"/>
-                    </exec>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>native_tests_altern</id>
+                <id>native_tests</id>
                 <phase>test</phase>
                 <goals><goal>run</goal></goals>
                 <configuration>
@@ -292,26 +224,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
                   <target>
                     <property name="compile_classpath" 
refid="maven.compile.classpath"/>
                     <property name="test_classpath" 
refid="maven.test.classpath"/>
-                    <exec executable="ctest" failonerror="true" 
dir="${project.build.directory}/altern">
-                      <arg line="--output-on-failure"/>
-                      <arg line="${native_ctest_args}"/>
+                    <exec executable="ctest" failonerror="true" 
dir="${project.build.directory}/">
                       <env key="CLASSPATH" 
value="${test_classpath}:${compile_classpath}"/>
                       <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
-                      <env key="LD_LIBRARY_PATH" 
value="${env.LD_LIBRARY_PATH}:${project.build.directory}/altern/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
+                      <env key="LD_LIBRARY_PATH" 
value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
                     </exec>
                   </target>
                 </configuration>
               </execution>
-              <execution>
-                <id>clean_altern</id>
-                <phase>test</phase>
-                <goals><goal>run</goal></goals>
-                <configuration>
-                  <target>
-                    <delete dir="${project.build.directory}/altern" 
includeemptydirs="true"/>
-                  </target>
-                </configuration>
-              </execution>
             </executions>
           </plugin>
         </plugins>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index a3f8f2d..aa690e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -58,11 +58,19 @@ if(WIN32)
     # Omit unneeded headers.
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
     set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/windows)
-    set(OUT_DIR target/bin)
+
+    # IMPORTANT: OUT_DIR MUST be relative to maven's
+    # project.build.directory (=target) and match dist-copynativelibs
+    # in order to be in a release
+    set(OUT_DIR bin)
 else()
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
     set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/posix)
-    set(OUT_DIR target/usr/local/lib)
+
+    # IMPORTANT: OUT_DIR MUST be relative to maven's
+    # project.build.directory (=target) and match dist-copynativelibs
+    # in order to be in a release
+    set(OUT_DIR native/target/usr/local/lib)
 endif()
 
 # Configure JNI.
@@ -90,11 +98,6 @@ endfunction()
 
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
-add_subdirectory(main/native/libhdfspp)
-
-if(REQUIRE_LIBWEBHDFS)
-    add_subdirectory(contrib/libwebhdfs)
-endif()
 
 # Find Linux FUSE
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
index 6938109..b36ef76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -182,16 +182,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct 
NativeMiniDfsConf *conf)
         }
         (*env)->DeleteLocalRef(env, val.l);
     }
-    if (conf->numDataNodes) {
-        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-                "numDataNodes", "(I)L" MINIDFS_CLUSTER_BUILDER ";", 
conf->numDataNodes);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
-                                  "Builder::numDataNodes");
-            goto error;
-        }
-    }
-    (*env)->DeleteLocalRef(env, val.l);
     jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
             "build", "()L" MINIDFS_CLUSTER ";");
     if (jthr) {
@@ -301,7 +291,7 @@ int nmdGetNameNodeHttpAddress(const struct 
NativeMiniDfsCluster *cl,
     jthrowable jthr;
     int ret = 0;
     const char *host;
-
+    
     if (!env) {
         fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
         return -EIO;
@@ -316,7 +306,7 @@ int nmdGetNameNodeHttpAddress(const struct 
NativeMiniDfsCluster *cl,
         return -EIO;
     }
     jNameNode = jVal.l;
-
+    
     // Then get the http address (InetSocketAddress) of the NameNode
     jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
                         "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
@@ -327,7 +317,7 @@ int nmdGetNameNodeHttpAddress(const struct 
NativeMiniDfsCluster *cl,
         goto error_dlr_nn;
     }
     jAddress = jVal.l;
-
+    
     jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
                         JAVA_INETSOCKETADDRESS, "getPort", "()I");
     if (jthr) {
@@ -337,7 +327,7 @@ int nmdGetNameNodeHttpAddress(const struct 
NativeMiniDfsCluster *cl,
         goto error_dlr_addr;
     }
     *port = jVal.i;
-
+    
     jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
                         "getHostName", "()Ljava/lang/String;");
     if (jthr) {
@@ -349,12 +339,12 @@ int nmdGetNameNodeHttpAddress(const struct 
NativeMiniDfsCluster *cl,
     host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
     *hostName = strdup(host);
     (*env)->ReleaseStringUTFChars(env, jVal.l, host);
-
+    
 error_dlr_addr:
     (*env)->DeleteLocalRef(env, jAddress);
 error_dlr_nn:
     (*env)->DeleteLocalRef(env, jNameNode);
-
+    
     return ret;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
index 628180f..ce8b1cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
@@ -26,7 +26,7 @@ extern  "C" {
 #endif
 
 struct hdfsBuilder;
-struct NativeMiniDfsCluster;
+struct NativeMiniDfsCluster; 
 
 /**
  * Represents a configuration to use for creating a Native MiniDFSCluster
@@ -51,11 +51,6 @@ struct NativeMiniDfsConf {
      * Nonzero if we should configure short circuit.
      */
     jboolean configureShortCircuit;
-
-    /**
-     * The number of datanodes in MiniDfsCluster
-     */
-    jint numDataNodes;
 };
 
 /**
@@ -101,13 +96,13 @@ void nmdFree(struct NativeMiniDfsCluster* cl);
  *
  * @return          the port, or a negative error code
  */
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl);
+int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); 
 
 /**
  * Get the http address that's in use by the given (non-HA) nativeMiniDfs
  *
  * @param cl        The initialized NativeMiniDfsCluster
- * @param port      Used to capture the http port of the NameNode
+ * @param port      Used to capture the http port of the NameNode 
  *                  of the NativeMiniDfsCluster
  * @param hostName  Used to capture the http hostname of the NameNode
  *                  of the NativeMiniDfsCluster

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
deleted file mode 100644
index dca4782..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "hdfs/hdfs.h"
-#include "hdfspp/hdfs_ext.h"
-#include "native_mini_dfs.h"
-#include "os/thread.h"
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define TO_STR_HELPER(X) #X
-#define TO_STR(X) TO_STR_HELPER(X)
-
-#define TLH_MAX_THREADS 10000
-
-#define TLH_MAX_DNS 16
-
-#define TLH_DEFAULT_BLOCK_SIZE 1048576
-
-#define TLH_DEFAULT_DFS_REPLICATION 3
-
-#define TLH_DEFAULT_IPC_CLIENT_CONNECT_MAX_RETRIES 100
-
-#define TLH_DEFAULT_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS 5
-
-#ifndef RANDOM_ERROR_RATIO
-#define RANDOM_ERROR_RATIO 1000000000
-#endif
-
-struct tlhThreadInfo {
-  /** Thread index */
-  int threadIdx;
-  /** 0 = thread was successful; error code otherwise */
-  int success;
-  /** thread identifier */
-  thread theThread;
-  /** fs, shared with other threads **/
-  hdfsFS hdfs;
-  /** Filename */
-  const char *fileNm;
-
-};
-
-static int hdfsNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
-                               const char *username)
-{
-  int ret;
-  tPort port;
-  hdfsFS hdfs;
-  struct hdfsBuilder *bld;
-
-  port = (tPort)nmdGetNameNodePort(cl);
-  if (port < 0) {
-    fprintf(stderr, "hdfsNameNodeConnect: nmdGetNameNodePort "
-            "returned error %d\n", port);
-    return port;
-  }
-  bld = hdfsNewBuilder();
-  if (!bld)
-    return -ENOMEM;
-  hdfsBuilderSetForceNewInstance(bld);
-  hdfsBuilderSetNameNode(bld, "localhost");
-  hdfsBuilderSetNameNodePort(bld, port);
-  hdfsBuilderConfSetStr(bld, "dfs.block.size",
-                        TO_STR(TLH_DEFAULT_BLOCK_SIZE));
-  hdfsBuilderConfSetStr(bld, "dfs.blocksize",
-                        TO_STR(TLH_DEFAULT_BLOCK_SIZE));
-  hdfsBuilderConfSetStr(bld, "dfs.replication",
-                        TO_STR(TLH_DEFAULT_DFS_REPLICATION));
-  hdfsBuilderConfSetStr(bld, "ipc.client.connect.max.retries",
-                        TO_STR(TLH_DEFAULT_IPC_CLIENT_CONNECT_MAX_RETRIES));
-  hdfsBuilderConfSetStr(bld, "ipc.client.connect.retry.interval",
-                        
TO_STR(TLH_DEFAULT_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS));
-  if (username) {
-    hdfsBuilderSetUserName(bld, username);
-  }
-  hdfs = hdfsBuilderConnect(bld);
-  if (!hdfs) {
-    ret = -errno;
-    return ret;
-  }
-  *fs = hdfs;
-  return 0;
-}
-
-static int hdfsWriteData(hdfsFS hdfs, const char *dirNm,
-                         const char *fileNm, tSize fileSz)
-{
-  hdfsFile file;
-  int ret, expected;
-  const char *content;
-
-  content = fileNm;
-
-  if (hdfsExists(hdfs, dirNm) == 0) {
-    EXPECT_ZERO(hdfsDelete(hdfs, dirNm, 1));
-  }
-  EXPECT_ZERO(hdfsCreateDirectory(hdfs, dirNm));
-
-  file = hdfsOpenFile(hdfs, fileNm, O_WRONLY, 0, 0, 0);
-  EXPECT_NONNULL(file);
-
-  expected = (int)strlen(content);
-  tSize sz = 0;
-  while (sz < fileSz) {
-    ret = hdfsWrite(hdfs, file, content, expected);
-    if (ret < 0) {
-      ret = errno;
-      fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
-      return ret;
-    }
-    if (ret != expected) {
-      fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
-              "it wrote %d\n", ret, expected);
-      return EIO;
-    }
-    sz += ret;
-  }
-  EXPECT_ZERO(hdfsFlush(hdfs, file));
-  EXPECT_ZERO(hdfsHSync(hdfs, file));
-  EXPECT_ZERO(hdfsCloseFile(hdfs, file));
-  return 0;
-}
-
-static int fileEventCallback1(const char * event, const char * cluster, const 
char * file, int64_t value, int64_t cookie)
-{
-  char * randomErrRatioStr = getenv("RANDOM_ERROR_RATIO");
-  int64_t randomErrRatio = RANDOM_ERROR_RATIO;
-  if (randomErrRatioStr) randomErrRatio = (int64_t)atoi(randomErrRatioStr);
-  if (randomErrRatio == 0) return DEBUG_SIMULATE_ERROR;
-  else if (randomErrRatio < 0) return LIBHDFSPP_EVENT_OK;
-  return random() % randomErrRatio == 0 ? DEBUG_SIMULATE_ERROR : 
LIBHDFSPP_EVENT_OK;
-}
-
-static int fileEventCallback2(const char * event, const char * cluster, const 
char * file, int64_t value, int64_t cookie)
-{
-  /* no op */
-  return LIBHDFSPP_EVENT_OK;
-}
-
-static int doTestHdfsMiniStress(struct tlhThreadInfo *ti, int randomErr)
-{
-  char tmp[4096];
-  hdfsFile file;
-  int ret, expected;
-  hdfsFileInfo *fileInfo;
-  uint64_t readOps, nErrs=0;
-  tOffset seekPos;
-  const char *content;
-
-  content = ti->fileNm;
-  expected = (int)strlen(content);
-
-  fileInfo = hdfsGetPathInfo(ti->hdfs, ti->fileNm);
-  EXPECT_NONNULL(fileInfo);
-
-  file = hdfsOpenFile(ti->hdfs, ti->fileNm, O_RDONLY, 0, 0, 0);
-  EXPECT_NONNULL(file);
-
-  libhdfspp_file_event_callback callback = (randomErr != 0) ? 
&fileEventCallback1 : &fileEventCallback2;
-
-  hdfsPreAttachFileMonitor(callback, 0);
-
-  fprintf(stderr, "testHdfsMiniStress(threadIdx=%d): starting read loop\n",
-          ti->threadIdx);
-  for (readOps=0; readOps < 1000; ++readOps) {
-    EXPECT_ZERO(hdfsCloseFile(ti->hdfs, file));
-    file = hdfsOpenFile(ti->hdfs, ti->fileNm, O_RDONLY, 0, 0, 0);
-    EXPECT_NONNULL(file);
-    seekPos = (((double)random()) / RAND_MAX) * (fileInfo->mSize - expected);
-    seekPos = (seekPos / expected) * expected;
-    ret = hdfsSeek(ti->hdfs, file, seekPos);
-    if (ret < 0) {
-      ret = errno;
-      fprintf(stderr, "hdfsSeek to %"PRIu64" failed and set"
-              " errno %d\n", seekPos, ret);
-      ++nErrs;
-      continue;
-    }
-    ret = hdfsRead(ti->hdfs, file, tmp, expected);
-    if (ret < 0) {
-      ret = errno;
-      fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
-      ++nErrs;
-      continue;
-    }
-    if (ret != expected) {
-      fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
-              "it read %d\n", ret, expected);
-      ++nErrs;
-      continue;
-    }
-    ret = memcmp(content, tmp, expected);
-    if (ret) {
-      fprintf(stderr, "hdfsRead result (%.*s) does not match expected (%.*s)",
-              expected, tmp, expected, content);
-      ++nErrs;
-      continue;
-    }
-  }
-  EXPECT_ZERO(hdfsCloseFile(ti->hdfs, file));
-  fprintf(stderr, "testHdfsMiniStress(threadIdx=%d): finished read loop\n",
-          ti->threadIdx);
-  EXPECT_ZERO(nErrs);
-  return 0;
-}
-
-static int testHdfsMiniStressImpl(struct tlhThreadInfo *ti)
-{
-  fprintf(stderr, "testHdfsMiniStress(threadIdx=%d): starting\n",
-          ti->threadIdx);
-  EXPECT_NONNULL(ti->hdfs);
-  // Error injection on, some failures are expected in the read path.
-  // The expectation is that any memory stomps will cascade and cause
-  // the following test to fail.  Ideally RPC errors would be seperated
-  // from BlockReader errors (RPC is expected to recover from disconnects).
-  doTestHdfsMiniStress(ti, 1);
-  // No error injection
-  EXPECT_ZERO(doTestHdfsMiniStress(ti, 0));
-  return 0;
-}
-
-static void testHdfsMiniStress(void *v)
-{
-  struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
-  int ret = testHdfsMiniStressImpl(ti);
-  ti->success = ret;
-}
-
-static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
-{
-  int i, threadsFailed = 0;
-  const char *sep = "";
-
-  for (i = 0; i < tlhNumThreads; i++) {
-    if (ti[i].success != 0) {
-      threadsFailed = 1;
-    }
-  }
-  if (!threadsFailed) {
-    fprintf(stderr, "testLibHdfsMiniStress: all threads succeeded.  
SUCCESS.\n");
-    return EXIT_SUCCESS;
-  }
-  fprintf(stderr, "testLibHdfsMiniStress: some threads failed: [");
-  for (i = 0; i < tlhNumThreads; i++) {
-    if (ti[i].success != 0) {
-      fprintf(stderr, "%s%d", sep, i);
-      sep = ", ";
-    }
-  }
-  fprintf(stderr, "].  FAILURE.\n");
-  return EXIT_FAILURE;
-}
-
-/**
- * Test intended to stress libhdfs client with concurrent requests. Currently 
focused
- * on concurrent reads.
- */
-int main(void)
-{
-  int i, tlhNumThreads;
-  char *dirNm, *fileNm;
-  tSize fileSz;
-  const char *tlhNumThreadsStr, *tlhNumDNsStr;
-  hdfsFS hdfs = NULL;
-  struct NativeMiniDfsCluster* tlhCluster;
-  struct tlhThreadInfo ti[TLH_MAX_THREADS];
-  struct NativeMiniDfsConf conf = {
-      1, /* doFormat */
-  };
-
-  dirNm = "/tlhMiniStressData";
-  fileNm = "/tlhMiniStressData/file";
-  fileSz = 2*1024*1024;
-
-  tlhNumDNsStr = getenv("TLH_NUM_DNS");
-  if (!tlhNumDNsStr) {
-    tlhNumDNsStr = "1";
-  }
-  conf.numDataNodes = atoi(tlhNumDNsStr);
-  if ((conf.numDataNodes <= 0) || (conf.numDataNodes > TLH_MAX_DNS)) {
-    fprintf(stderr, "testLibHdfsMiniStress: must have a number of datanodes "
-            "between 1 and %d inclusive, not %d\n",
-            TLH_MAX_DNS, conf.numDataNodes);
-    return EXIT_FAILURE;
-  }
-
-  tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
-  if (!tlhNumThreadsStr) {
-    tlhNumThreadsStr = "8";
-  }
-  tlhNumThreads = atoi(tlhNumThreadsStr);
-  if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
-    fprintf(stderr, "testLibHdfsMiniStress: must have a number of threads "
-            "between 1 and %d inclusive, not %d\n",
-            TLH_MAX_THREADS, tlhNumThreads);
-    return EXIT_FAILURE;
-  }
-  memset(&ti[0], 0, sizeof(ti));
-  for (i = 0; i < tlhNumThreads; i++) {
-    ti[i].threadIdx = i;
-  }
-
-  tlhCluster = nmdCreate(&conf);
-  EXPECT_NONNULL(tlhCluster);
-  EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
-
-  EXPECT_ZERO(hdfsNameNodeConnect(tlhCluster, &hdfs, NULL));
-
-  // Single threaded writes for now.
-  EXPECT_ZERO(hdfsWriteData(hdfs, dirNm, fileNm, fileSz));
-
-  // Multi-threaded reads.
-  for (i = 0; i < tlhNumThreads; i++) {
-    ti[i].theThread.start = testHdfsMiniStress;
-    ti[i].theThread.arg = &ti[i];
-    ti[i].hdfs = hdfs;
-    ti[i].fileNm = fileNm;
-    EXPECT_ZERO(threadCreate(&ti[i].theThread));
-  }
-  for (i = 0; i < tlhNumThreads; i++) {
-    EXPECT_ZERO(threadJoin(&ti[i].theThread));
-  }
-
-  EXPECT_ZERO(hdfsDisconnect(hdfs));
-  EXPECT_ZERO(nmdShutdown(tlhCluster));
-  nmdFree(tlhCluster);
-  return checkFailures(ti, tlhNumThreads);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
index 343e05a..8d4b743 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c
@@ -30,7 +30,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <limits.h>
 
 #define TO_STR_HELPER(X) #X
 #define TO_STR(X) TO_STR_HELPER(X)
@@ -57,7 +56,7 @@ static int hdfsSingleNameNodeConnect(struct 
NativeMiniDfsCluster *cl, hdfsFS *fs
     tPort port;
     hdfsFS hdfs;
     struct hdfsBuilder *bld;
-
+    
     port = (tPort)nmdGetNameNodePort(cl);
     if (port < 0) {
         fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
@@ -93,12 +92,13 @@ static int doTestGetDefaultBlockSize(hdfsFS fs, const char 
*path)
 
     blockSize = hdfsGetDefaultBlockSize(fs);
     if (blockSize < 0) {
-        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", 
errno);
-        return -1;
+        ret = errno;
+        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", ret);
+        return ret;
     } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
         fprintf(stderr, "hdfsGetDefaultBlockSize got %"PRId64", but we "
                 "expected %d\n", blockSize, TLH_DEFAULT_BLOCK_SIZE);
-        return -1;
+        return EIO;
     }
 
     blockSize = hdfsGetDefaultBlockSizeAtPath(fs, path);
@@ -109,7 +109,7 @@ static int doTestGetDefaultBlockSize(hdfsFS fs, const char 
*path)
         return ret;
     } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
         fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) got "
-                "%"PRId64", but we expected %d\n",
+                "%"PRId64", but we expected %d\n", 
                 path, blockSize, TLH_DEFAULT_BLOCK_SIZE);
         return EIO;
     }
@@ -157,19 +157,12 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, 
hdfsFS fs,
 
     EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
 
-    /* There is no such directory.
-     * Check that errno is set to ENOENT
-     */
-    char invalid_path[] = "/some_invalid/path";
-    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, invalid_path, &numEntries), 
ENOENT);
-
     /* There should be no entry in the directory. */
     errno = EACCES; // see if errno is set to 0 on success
     EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 
0);
     if (numEntries != 0) {
         fprintf(stderr, "hdfsListDirectory set numEntries to "
                 "%d on empty directory.", numEntries);
-        return EIO;
     }
 
     /* There should not be any file to open for reading. */
@@ -197,45 +190,19 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, 
hdfsFS fs,
     }
     if (ret != expected) {
         fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
-                "it wrote %d\n", expected, ret);
+                "it wrote %d\n", ret, expected);
         return EIO;
     }
     EXPECT_ZERO(hdfsFlush(fs, file));
     EXPECT_ZERO(hdfsHSync(fs, file));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
 
-    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->file1));
-
     /* There should be 1 entry in the directory. */
-    hdfsFileInfo * dirList = hdfsListDirectory(fs, paths->prefix, &numEntries);
-    EXPECT_NONNULL(dirList);
+    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
     if (numEntries != 1) {
         fprintf(stderr, "hdfsListDirectory set numEntries to "
                 "%d on directory containing 1 file.", numEntries);
     }
-    hdfsFreeFileInfo(dirList, numEntries);
-
-    /* Create many files for ListDirectory to page through */
-    char listDirTest[PATH_MAX];
-    strcpy(listDirTest, paths->prefix);
-    strcat(listDirTest, "/for_list_test/");
-    EXPECT_ZERO(hdfsCreateDirectory(fs, listDirTest));
-    int nFile;
-    for (nFile = 0; nFile < 10000; nFile++) {
-      char filename[PATH_MAX];
-      snprintf(filename, PATH_MAX, "%s/many_files_%d", listDirTest, nFile);
-      file = hdfsOpenFile(fs, filename, O_WRONLY, 0, 0, 0);
-      EXPECT_NONNULL(file);
-      EXPECT_ZERO(hdfsCloseFile(fs, file));
-    }
-    dirList = hdfsListDirectory(fs, listDirTest, &numEntries);
-    EXPECT_NONNULL(dirList);
-    hdfsFreeFileInfo(dirList, numEntries);
-    if (numEntries != 10000) {
-        fprintf(stderr, "hdfsListDirectory set numEntries to "
-                "%d on directory containing 10000 files.", numEntries);
-        return EIO;
-    }
 
     /* Let's re-open the file for reading */
     file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
@@ -279,8 +246,8 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, 
hdfsFS fs,
     EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
     EXPECT_ZERO(hdfsCloseFile(fs, file));
 
-    //Non-recursive delete fails
-    EXPECT_NONZERO(hdfsDelete(fs, paths->prefix, 0));
+    // TODO: Non-recursive delete should fail?
+    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
     EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
 
     EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
@@ -307,17 +274,6 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, 
hdfsFS fs,
 
     snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
     EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
-
-    //Test case: File does not exist
-    EXPECT_NULL_WITH_ERRNO(hdfsGetPathInfo(fs, invalid_path), ENOENT);
-
-    //Test case: No permission to access parent directory
-    EXPECT_ZERO(hdfsChmod(fs, paths->prefix, 0));
-    //reconnect as user "SomeGuy" and verify that we get permission errors
-    hdfsFS fs2 = NULL;
-    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs2, "SomeGuy"));
-    EXPECT_NULL_WITH_ERRNO(hdfsGetPathInfo(fs2, paths->file2), EACCES);
-    EXPECT_ZERO(hdfsDisconnect(fs2));
     return 0;
 }
 
@@ -329,8 +285,6 @@ static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
     fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
         ti->threadIdx);
     EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
-    if (!fs)
-        return 1;
     EXPECT_ZERO(setupPaths(ti, &paths));
     // test some operations
     EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
@@ -341,8 +295,6 @@ static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
     EXPECT_ZERO(hdfsDisconnect(fs));
     // reconnect to do the final delete.
     EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
-    if (!fs)
-        return 1;
     EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
     EXPECT_ZERO(hdfsDisconnect(fs));
     return 0;
@@ -373,7 +325,7 @@ static int checkFailures(struct tlhThreadInfo *ti, int 
tlhNumThreads)
     for (i = 0; i < tlhNumThreads; i++) {
         if (ti[i].success != 0) {
             fprintf(stderr, "%s%d", sep, i);
-            sep = ", ";
+            sep = ", "; 
         }
     }
     fprintf(stderr, "].  FAILURE.\n");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindCyrusSASL.cmake
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindCyrusSASL.cmake
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindCyrusSASL.cmake
deleted file mode 100644
index 8ce027f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindCyrusSASL.cmake
+++ /dev/null
@@ -1,49 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# - Find Cyrus SASL (sasl.h, libsasl2.so)
-#
-# This module defines
-#  CYRUS_SASL_INCLUDE_DIR, directory containing headers
-#  CYRUS_SASL_SHARED_LIB, path to Cyrus SASL's shared library
-#  CYRUS_SASL_FOUND, whether Cyrus SASL and its plugins have been found
-#
-# N.B: we do _not_ include sasl in thirdparty, for a fairly subtle reason. The
-# TLDR version is that newer versions of cyrus-sasl (>=2.1.26) have a bug fix
-# for https://bugzilla.cyrusimap.org/show_bug.cgi?id=3590, but that bug fix
-# relied on a change both on the plugin side and on the library side. If you
-# then try to run the new version of sasl (e.g from our thirdparty tree) with
-# an older version of a plugin (eg from RHEL6 install), you'll get a 
SASL_NOMECH
-# error due to this bug.
-#
-# In practice, Cyrus-SASL is so commonly used and generally non-ABI-breaking 
that
-# we should be OK to depend on the host installation.
-
-# Note that this is modified from the version that was copied from our
-# friends at the Kudu project.  The original version implicitly required
-# the Cyrus SASL.  This version will only complain if REQUIRED is added.
-
-
-find_path(CYRUS_SASL_INCLUDE_DIR sasl/sasl.h)
-find_library(CYRUS_SASL_SHARED_LIB sasl2)
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(CYRUS_SASL DEFAULT_MSG
-  CYRUS_SASL_SHARED_LIB CYRUS_SASL_INCLUDE_DIR)
-
-MARK_AS_ADVANCED(CYRUS_SASL_INCLUDE_DIR CYRUS_SASL_SHARED_LIB)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindGSasl.cmake
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindGSasl.cmake
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindGSasl.cmake
deleted file mode 100644
index 57588ad..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMake/FindGSasl.cmake
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# - Try to find the GNU sasl library (gsasl)
-#
-# Once done this will define
-#
-#  GSASL_FOUND - System has gnutls
-#  GSASL_INCLUDE_DIR - The gnutls include directory
-#  GSASL_LIBRARIES - The libraries needed to use gnutls
-#  GSASL_DEFINITIONS - Compiler switches required for using gnutls
-
-
-IF (GSASL_INCLUDE_DIR AND GSASL_LIBRARIES)
-  # in cache already
-  SET(GSasl_FIND_QUIETLY TRUE)
-ENDIF (GSASL_INCLUDE_DIR AND GSASL_LIBRARIES)
-
-FIND_PATH(GSASL_INCLUDE_DIR gsasl.h)
-
-FIND_LIBRARY(GSASL_LIBRARIES gsasl)
-
-INCLUDE(FindPackageHandleStandardArgs)
-
-# handle the QUIETLY and REQUIRED arguments and set GSASL_FOUND to TRUE if
-# all listed variables are TRUE
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(GSASL DEFAULT_MSG GSASL_LIBRARIES 
GSASL_INCLUDE_DIR)
-
-MARK_AS_ADVANCED(GSASL_INCLUDE_DIR GSASL_LIBRARIES)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
deleted file mode 100644
index 94b1b56..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ /dev/null
@@ -1,297 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# If cmake variable HDFSPP_LIBRARY_ONLY is set, then tests, examples, and
-# tools will not be built. This allows for faster builds of the libhdfspp
-# library alone, avoids looking for a JDK, valgrind, and gmock, and
-# prevents the generation of multiple binaries that might not be relevant
-# to other projects during normal use.
-# Example of cmake invocation with HDFSPP_LIBRARY_ONLY enabled:
-# cmake -DHDFSPP_LIBRARY_ONLY=1
-
-project (libhdfspp)
-
-cmake_minimum_required(VERSION 2.8)
-
-enable_testing()
-include (CTest)
-
-SET(BUILD_SHARED_HDFSPP TRUE CACHE STRING "BUILD_SHARED_HDFSPP defaulting to 
'TRUE'")
-SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake" ${CMAKE_MODULE_PATH})
-
-# If there's a better way to inform FindCyrusSASL.cmake, let's make this 
cleaner:
-SET(CMAKE_PREFIX_PATH 
"${CMAKE_PREFIX_PATH};${CYRUS_SASL_DIR};${GSASL_DIR};$ENV{PROTOBUF_HOME}")
-
-# Specify PROTOBUF_HOME so that find_package picks up the correct version
-SET(CMAKE_PREFIX_PATH "${CMAKE_PREFIX_PATH};$ENV{PROTOBUF_HOME}")
-
-find_package(Doxygen)
-find_package(OpenSSL REQUIRED)
-find_package(Protobuf REQUIRED)
-find_package(CyrusSASL)
-find_package(GSasl)
-find_package(Threads)
-
-include(CheckCXXSourceCompiles)
-
-# Check if thread_local is supported
-unset (THREAD_LOCAL_SUPPORTED CACHE)
-set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11")
-set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
-check_cxx_source_compiles(
-    "#include <thread>
-    int main(void) {
-      thread_local int s;
-      return 0;
-    }"
-    THREAD_LOCAL_SUPPORTED)
-if (NOT THREAD_LOCAL_SUPPORTED)
-  message(FATAL_ERROR
-  "FATAL ERROR: The required feature thread_local storage is not supported by 
your compiler. \
-  Known compilers that support this feature: GCC, Visual Studio, Clang 
(community version), \
-  Clang (version for iOS 9 and later).")
-endif (NOT THREAD_LOCAL_SUPPORTED)
-
-# Check if PROTOC library was compiled with the compatible compiler by trying
-# to compile some dummy code
-unset (PROTOC_IS_COMPATIBLE CACHE)
-set (CMAKE_REQUIRED_INCLUDES ${PROTOBUF_INCLUDE_DIRS})
-set (CMAKE_REQUIRED_LIBRARIES ${PROTOBUF_LIBRARY} ${PROTOBUF_PROTOC_LIBRARY})
-check_cxx_source_compiles(
-    "#include <google/protobuf/io/printer.h>
-    #include <string>
-    int main(void) {
-      ::google::protobuf::io::ZeroCopyOutputStream *out = NULL;
-      ::google::protobuf::io::Printer printer(out, '$');
-      printer.PrintRaw(std::string(\"test\"));
-      return 0;
-    }"
-    PROTOC_IS_COMPATIBLE)
-if (NOT PROTOC_IS_COMPATIBLE)
-  message(WARNING
-  "WARNING: the Protocol Buffers Library and the Libhdfs++ Library must both 
be compiled \
-  with the same (or compatible) compiler. Normally only the same major 
versions of the same \
-  compiler are compatible with each other.")
-endif (NOT PROTOC_IS_COMPATIBLE)
-
-find_program(MEMORYCHECK_COMMAND valgrind HINTS ${VALGRIND_DIR} )
-set(MEMORYCHECK_COMMAND_OPTIONS "--trace-children=yes --leak-check=full 
--error-exitcode=1")
-message(STATUS "valgrind location: ${MEMORYCHECK_COMMAND}")
-
-if (REQUIRE_VALGRIND AND MEMORYCHECK_COMMAND MATCHES 
"MEMORYCHECK_COMMAND-NOTFOUND" )
-  message(FATAL_ERROR "valgrind was required but not found.  "
-                      "The path can be included via a -DVALGRIND_DIR=... flag 
passed to CMake.")
-endif (REQUIRE_VALGRIND AND MEMORYCHECK_COMMAND MATCHES 
"MEMORYCHECK_COMMAND-NOTFOUND" )
-
-# Find the SASL library to use.  If you don't want to require a sasl library,
-#    define -DNO_SASL=1 in your cmake call
-# Prefer Cyrus SASL, but use GSASL if it is found
-# Note that the packages can be disabled by setting 
CMAKE_DISABLE_FIND_PACKAGE_GSasl or
-#    CMAKE_DISABLE_FIND_PACKAGE_CyrusSASL, respectively (case sensitive)
-set (SASL_LIBRARIES)
-set (SASL_INCLUDE_DIR)
-if (NOT NO_SASL)
-    if (CYRUS_SASL_FOUND)
-        message(STATUS "Using Cyrus SASL; link with ${CYRUS_SASL_SHARED_LIB}")
-        set (SASL_INCLUDE_DIR ${CYRUS_SASL_INCLUDE_DIR})
-        set (SASL_LIBRARIES ${CYRUS_SASL_SHARED_LIB})
-        set (CMAKE_USING_CYRUS_SASL 1)
-        add_definitions(-DUSE_SASL -DUSE_CYRUS_SASL)
-    else (CYRUS_SASL_FOUND)
-        if (REQUIRE_CYRUS_SASL)
-          message(FATAL_ERROR "Cyrus SASL was required but not found.  "
-                                "The path can be included via a 
-DCYRUS_SASL_DIR=... flag passed to CMake.")
-        endif (REQUIRE_CYRUS_SASL)
-
-        # If we didn't pick Cyrus, use GSASL instead
-        if (GSASL_FOUND)
-          message(STATUS "Using GSASL; link with ${GSASL_LIBRARIES}")
-          set (SASL_INCLUDE_DIR ${GSASL_INCLUDE_DIR})
-          set (SASL_LIBRARIES ${GSASL_LIBRARIES})
-          set (CMAKE_USING_GSASL 1)
-          add_definitions(-DUSE_SASL -DUSE_GSASL)
-        else (GSASL_FOUND)
-          if (REQUIRE_GSASL)
-            message(FATAL_ERROR "GSASL was required but not found.  "
-                                "The path can be included via a 
-DGSASL_DIR=... flag passed to CMake.")
-          endif (REQUIRE_GSASL)
-
-          # No SASL was found, but NO_SASL was not defined
-          message(FATAL_ERROR "Cound not find a SASL library (GSASL (gsasl) or 
Cyrus SASL (libsasl2).  "
-                            "Install/configure one of them or define NO_SASL=1 
in your cmake call")
-        endif (GSASL_FOUND)
-    endif (CYRUS_SASL_FOUND)
-else (NOT NO_SASL)
-    message(STATUS "Compiling with NO SASL SUPPORT")
-endif (NOT NO_SASL)
-
-add_definitions(-DASIO_STANDALONE -DASIO_CPP11_DATE_TIME)
-
-# Disable optimizations if compiling debug
-set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0")
-set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0")
-
-if(UNIX)
-set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -std=c++11 -g 
-fPIC -fno-strict-aliasing")
-set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -fPIC -fno-strict-aliasing")
-endif()
-
-if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
-    add_definitions(-DASIO_HAS_STD_ADDRESSOF -DASIO_HAS_STD_ARRAY 
-DASIO_HAS_STD_ATOMIC -DASIO_HAS_CSTDINT -DASIO_HAS_STD_SHARED_PTR 
-DASIO_HAS_STD_TYPE_TRAITS -DASIO_HAS_VARIADIC_TEMPLATES 
-DASIO_HAS_STD_FUNCTION -DASIO_HAS_STD_CHRONO -DASIO_HAS_STD_SYSTEM_ERROR)
-endif ()
-
-# Mac OS 10.7 and later deprecates most of the methods in OpenSSL.
-# Add -Wno-deprecated-declarations to avoid the warnings.
-if(APPLE)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ 
-Wno-deprecated-declarations -Wno-unused-local-typedef")
-endif()
-
-if(DOXYGEN_FOUND)
-configure_file(${CMAKE_CURRENT_SOURCE_DIR}/doc/Doxyfile.in 
${CMAKE_CURRENT_BINARY_DIR}/doc/Doxyfile @ONLY)
-add_custom_target(doc ${DOXYGEN_EXECUTABLE} 
${CMAKE_CURRENT_BINARY_DIR}/doc/Doxyfile
-                  WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
-                  COMMENT "Generating API documentation with Doxygen" VERBATIM)
-endif(DOXYGEN_FOUND)
-
-
-# Copy files from the hadoop tree into the output/extern directory if
-#    they've changed
-function (copy_on_demand input_src_glob input_dest_dir)
-  get_filename_component(src_glob ${input_src_glob} REALPATH)
-  get_filename_component(dest_dir ${input_dest_dir} REALPATH)
-  get_filename_component(src_dir ${src_glob} PATH)
-  message(STATUS "Syncing ${src_glob} to ${dest_dir}")
-
-  file(GLOB_RECURSE src_files ${src_glob})
-  foreach(src_path ${src_files})
-    file(RELATIVE_PATH relative_src ${src_dir} ${src_path})
-    set(dest_path "${dest_dir}/${relative_src}")
-    add_custom_command(TARGET copy_hadoop_files
-     COMMAND ${CMAKE_COMMAND} -E copy_if_different "${src_path}" "${dest_path}"
-    )
-  endforeach()
-endfunction()
-
-# If we're building in the hadoop tree, pull the Hadoop files that
-#     libhdfspp depends on.  This allows us to ensure that
-#     the distribution will have a consistent set of headers and
-#     .proto files
-if(HADOOP_BUILD)
-    set(HADOOP_IMPORT_DIR ${PROJECT_BINARY_DIR}/extern)
-    get_filename_component(HADOOP_IMPORT_DIR ${HADOOP_IMPORT_DIR} REALPATH)
-
-  add_custom_target(copy_hadoop_files ALL)
-
-  # Gather the Hadoop files and resources that libhdfs++ needs to build
-  copy_on_demand(../libhdfs/include/*.h* ${HADOOP_IMPORT_DIR}/include)
-  
copy_on_demand(${CMAKE_CURRENT_LIST_DIR}/../../../../../hadoop-hdfs-client/src/main/proto/*.proto
 ${HADOOP_IMPORT_DIR}/proto/hdfs)
-  
copy_on_demand(${CMAKE_CURRENT_LIST_DIR}/../../../../../../hadoop-common-project/hadoop-common/src/main/proto/*.proto
  ${HADOOP_IMPORT_DIR}/proto/hadoop)
-  
copy_on_demand(${CMAKE_CURRENT_LIST_DIR}/../../../../../../hadoop-common-project/hadoop-common/src/test/proto/*.proto
  ${HADOOP_IMPORT_DIR}/proto/hadoop_test)
-else(HADOOP_BUILD)
-  set(HADOOP_IMPORT_DIR ${CMAKE_CURRENT_LIST_DIR}/extern)
-endif(HADOOP_BUILD)
-
-# Paths to find the imported files
-set(PROTO_HDFS_DIR         ${HADOOP_IMPORT_DIR}/proto/hdfs)
-set(PROTO_HADOOP_DIR       ${HADOOP_IMPORT_DIR}/proto/hadoop)
-set(PROTO_HADOOP_TEST_DIR  ${HADOOP_IMPORT_DIR}/proto/hadoop_test)
-
-include_directories(
-  include
-  lib
-  ${HADOOP_IMPORT_DIR}/include
-)
-
-include_directories( SYSTEM
-  ${PROJECT_BINARY_DIR}/lib/proto
-  third_party/asio-1.10.2/include
-  third_party/rapidxml-1.13
-  third_party/gmock-1.7.0
-  third_party/tr2
-  third_party/protobuf
-  third_party/uriparser2
-  ${OPENSSL_INCLUDE_DIR}
-  ${SASL_INCLUDE_DIR}
-  ${PROTOBUF_INCLUDE_DIRS}
-)
-
-
-add_subdirectory(third_party/gmock-1.7.0)
-add_subdirectory(third_party/uriparser2)
-add_subdirectory(lib)
-if(NOT HDFSPP_LIBRARY_ONLY)
-    add_subdirectory(tests)
-    add_subdirectory(examples)
-    add_subdirectory(tools)
-endif()
-
-# create an empty file; hadoop_add_dual_library wraps add_library which
-# requires at least one file as an argument
-set(EMPTY_FILE_CC ${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/empty.cc)
-file(WRITE ${EMPTY_FILE_CC} "")
-
-# Build the output libraries
-if(NEED_LINK_DL)
-   set(LIB_DL dl)
-endif()
-
-set(LIBHDFSPP_VERSION "0.1.0")
-set(LIBHDFSPP_ALL_OBJECTS $<TARGET_OBJECTS:bindings_c_obj> 
$<TARGET_OBJECTS:fs_obj> $<TARGET_OBJECTS:rpc_obj> $<TARGET_OBJECTS:reader_obj> 
$<TARGET_OBJECTS:proto_obj> $<TARGET_OBJECTS:connection_obj> 
$<TARGET_OBJECTS:common_obj> $<TARGET_OBJECTS:uriparser2_obj>)
-if (HADOOP_BUILD)
-  hadoop_add_dual_library(hdfspp ${EMPTY_FILE_CC} ${LIBHDFSPP_ALL_OBJECTS})
-  hadoop_target_link_dual_libraries(hdfspp
-    ${LIB_DL}
-    ${PROTOBUF_LIBRARY}
-    ${OPENSSL_LIBRARIES}
-    ${SASL_LIBRARIES}
-    ${CMAKE_THREAD_LIBS_INIT}
-  )
-  set_target_properties(hdfspp PROPERTIES SOVERSION ${LIBHDFSPP_VERSION})
-else (HADOOP_BUILD)
-  add_library(hdfspp_static STATIC ${EMPTY_FILE_CC} ${LIBHDFSPP_ALL_OBJECTS})
-  target_link_libraries(hdfspp_static
-    ${LIB_DL}
-    ${PROTOBUF_LIBRARY}
-    ${OPENSSL_LIBRARIES}
-    ${SASL_LIBRARIES}
-    ${CMAKE_THREAD_LIBS_INIT}
-    )
-  if(BUILD_SHARED_HDFSPP)
-    add_library(hdfspp SHARED ${EMPTY_FILE_CC} ${LIBHDFSPP_ALL_OBJECTS})
-    set_target_properties(hdfspp PROPERTIES SOVERSION ${LIBHDFSPP_VERSION})
-  endif(BUILD_SHARED_HDFSPP)
-endif (HADOOP_BUILD)
-
-# Set up make install targets
-# Can be installed to a particular location via "make DESTDIR=... install"
-file(GLOB_RECURSE LIBHDFSPP_HEADER_FILES 
"${CMAKE_CURRENT_LIST_DIR}/include/*.h*")
-file(GLOB_RECURSE LIBHDFS_HEADER_FILES "${HADOOP_IMPORT_DIR}/include/*.h*")
-install(FILES ${LIBHDFSPP_HEADER_FILES} DESTINATION include/hdfspp)
-install(FILES ${LIBHDFS_HEADER_FILES} DESTINATION include/hdfs)
-
-install(TARGETS hdfspp_static ARCHIVE DESTINATION lib)
-if(BUILD_SHARED_HDFSPP)
-  install(TARGETS hdfspp LIBRARY DESTINATION lib)
-endif(BUILD_SHARED_HDFSPP)
-
-add_custom_target(
-    InstallToBuildDirectory
-    COMMAND "${CMAKE_MAKE_PROGRAM}" install 
DESTDIR=${PROJECT_BINARY_DIR}/output
-)
-set(LIBHDFSPP_DIR ${PROJECT_BINARY_DIR}/output)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CONTRIBUTING.md
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CONTRIBUTING.md
deleted file mode 100644
index d36a38e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CONTRIBUTING.md
+++ /dev/null
@@ -1,161 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Libhdfs++ Coding Standards
-==========================
-
-* Libhdfs++ Coding Standards
-    * Introduction
-    * Automated Formatting
-    * Explicit Scoping
-    * Comments
-    * Portability
-
-
-Introduction
-------------
-
-The foundation of the libhdfs++ project's coding standards
-is Google's C++ style guide. It can be found here:
-
-<a 
href="https://google.github.io/styleguide/cppguide.html";>https://google.github.io/styleguide/cppguide.html</a>
-
-There are several small restrictions adopted from Sun's Java
-standards and Hadoop convention on top of Google's that must
-also be followed as well as portability requirements.
-
-Automated Formatting
---------------------
-
-Prior to submitting a patch for code review use llvm's formatting tool, 
clang-format, on the .h, .c, and .cc files included in the patch.  Use the 
-style=google switch when doing so.
-
-Example presubmission usage:
-
-``` shell
-cat my_source_file.cc | clang-format -style=goole > temp_file.cc
-#optionally diff the source and temp file to get an idea what changed
-mv temp_file.cc my_source_file.cc
-```
-
-* note: On some linux distributions clang-format already exists in 
repositories but don't show up without an appended version number.  On Ubuntu 
you'll find it with:
-``` shell
-   "apt-get install clang-format-3.6"
-```
-
-Explicit Block Scopes
----------------------
-
-Always add brackets conditional and loop bodies, even if the body could fit on 
a single line.
-
-__BAD__:
-``` c
-if (foo)
-  Bar();
-
-if (foo)
-  Bar();
-else
-  Baz();
-
-for (int i=0; i<10; i++)
-  Bar(i);
-```
-__GOOD__:
-``` c
-if (foo) {
-  Bar();
-}
-
-if (foo) {
-  Bar();
-} else {
-  Baz();
-}
-
-for (int i=0; i<10; i++) {
-  Bar(i);
-}
-```
-
-Comments
---------
-
-Use the /\* comment \*/ style to maintain consistency with the rest of the 
Hadoop code base.
-
-__BAD__:
-``` c
-//this is a bad single line comment
-/*
-  this is a bad block comment
-*/
-```
-__GOOD__:
-``` c
-/* this is a single line comment */
-
-/**
- * This is a block comment.  Note that nothing is on the first
- * line of the block.
- **/
-```
-
-Portability
------------
-
-Please make sure you write code that is portable.
-
-* All code most be able to build using GCC and LLVM.
-    * In the future we hope to support other compilers as well.
-* Don't make assumptions about endianness or architecture.
-    * Don't do clever things with pointers or intrinsics.
-* Don't write code that could force a non-aligned word access.
-    * This causes performance issues on most architectures and isn't supported 
at all on some.
-    * Generally the compiler will prevent this unless you are doing clever 
things with pointers e.g. abusing placement new or reinterpreting a pointer 
into a pointer to a wider type.
-* If a type needs to be a a specific width make sure to specify it.
-    * `int32_t my_32_bit_wide_int`
-* Avoid using compiler dependent pragmas or attributes.
-    * If there is a justified and unavoidable reason for using these you must 
document why. See examples below.
-
-__BAD__:
-``` c
-struct Foo {
-  int32_t x_;
-  char y_;
-  int32_t z_;
-  char z_;
-} __attribute__((packed));
-/**
- * "I didn't profile and identify that this is causing
- * significant memory overhead but I want to pack it to
- * save 6 bytes"
- **/
-```
-__NECESSARY__: Still not good but required for short-circuit reads.
-``` c
-struct FileDescriptorMessage {
-  struct cmsghdr msg_;
-  int file_descriptors_[2];
-} __attribute__((packed));
-/**
- * This is actually needed for short circuit reads.
- * "struct cmsghdr" is well defined on UNIX systems.
- * This mechanism relies on the fact that any passed
- * ancillary data is _directly_ following the cmghdr.
- * The kernel interprets any padding as real data.
- **/
-```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/Doxyfile.in
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/Doxyfile.in
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/Doxyfile.in
deleted file mode 100644
index 8f6194f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/Doxyfile.in
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-DOXYFILE_ENCODING      = UTF-8
-PROJECT_NAME           = "libhdfspp"
-OUTPUT_DIRECTORY       = doc
-TAB_SIZE               = 2
-MARKDOWN_SUPPORT       = YES
-BUILTIN_STL_SUPPORT    = YES
-
-
-INPUT                  = @PROJECT_SOURCE_DIR@/doc/mainpage.dox \
-                         @PROJECT_SOURCE_DIR@/include/libhdfspp \
-                         @PROJECT_SOURCE_DIR@/lib/common/continuation \
-
-INPUT_ENCODING         = UTF-8
-RECURSIVE              = NO
-
-GENERATE_HTML          = YES
-GENERATE_LATEX         = NO

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/mainpage.dox
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/mainpage.dox
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/mainpage.dox
deleted file mode 100644
index 46532f2..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/doc/mainpage.dox
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
-\mainpage libhdfs++
-
-libhdfs++ is a modern implementation of HDFS client in C++11. It is
-optimized for the Massive Parallel Processing (MPP) applications that
-access thousands of files concurrently in HDFS.
-
-*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
deleted file mode 100644
index ca3a2c7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-add_subdirectory(c)
-add_subdirectory(cc)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/CMakeLists.txt
deleted file mode 100644
index a73d2bc..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-add_subdirectory(cat)
-add_subdirectory(connect_cancel)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a542fb3/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/CMakeLists.txt
deleted file mode 100644
index 41a9ee8..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/CMakeLists.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Default LIBHDFSPP_DIR to the default install location.  You can override
-#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
-set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
-
-include_directories( ${LIBHDFSPP_DIR}/include )
-link_directories( ${LIBHDFSPP_DIR}/lib )
-
-add_executable(cat_c cat.c)
-target_link_libraries(cat_c hdfspp_static uriparser2)
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to