http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
new file mode 100644
index 0000000..19d95b4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
@@ -0,0 +1,771 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfspp_mini_dfs.h"
+#include "hdfspp/hdfs_ext.h"
+
+#include <cstring>
+#include <chrono>
+#include <exception>
+
+namespace hdfs {
+
+class HdfsExtTest: public ::testing::Test {
+public:
+  MiniCluster cluster;
+};
+
+// Make sure we can set up a mini-cluster and connect to it
+TEST_F(HdfsExtTest, TestGetBlockLocations) {
+  HdfsHandle connection = cluster.connect_c();
+  EXPECT_NE(nullptr, connection.handle());
+
+  hdfsBlockLocations * blocks = nullptr;
+
+  // Free a null pointer
+  int result = hdfsFreeBlockLocations(blocks);
+  EXPECT_EQ(0, result);
+
+  // Test non-extant files
+  EXPECT_EQ(-1, hdfsGetBlockLocations(connection, "non_extant_file", 
&blocks));  // Should be an error
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+
+  // Test an extant file
+  std::string filename = connection.newFile(1024);
+  result = hdfsGetBlockLocations(connection, filename.c_str(), &blocks);
+  EXPECT_EQ(0, result);
+
+  EXPECT_EQ(1024, blocks->fileLength);
+  EXPECT_EQ(1, blocks->num_blocks);
+  EXPECT_EQ(0, blocks->isUnderConstruction);
+  EXPECT_NE(0, blocks->isLastBlockComplete);
+  EXPECT_EQ(1024, blocks->blocks->num_bytes);
+  EXPECT_EQ(0, blocks->blocks->start_offset);
+  EXPECT_EQ(1, blocks->blocks->num_locations);
+  EXPECT_NE(nullptr, blocks->blocks->locations->hostname);
+  EXPECT_NE(nullptr, blocks->blocks->locations->ip_address);
+  EXPECT_NE(nullptr, blocks->blocks->locations->network_location);
+  EXPECT_NE(0, blocks->blocks->locations->xfer_port);
+
+  result = hdfsFreeBlockLocations(blocks);
+  EXPECT_EQ(0, result);
+
+}
+
+
+// Writing a file to the filesystem and checking the used space
+TEST_F(HdfsExtTest, TestGetUsed) {
+  using namespace std::chrono;
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  // File system's used space before writing
+  tOffset used_before_write;
+  EXPECT_GE(used_before_write = hdfsGetUsed(fs), 0);
+
+  // Write to a file
+  tOffset fileSize = 1024;
+  std::string filename = connection.newFile(fileSize);
+
+  //Need to run hdfsGetUsed() in a loop until the refreshInterval
+  //is passed on the filesystem and the used space is updated
+  //Time-out is 3 minutes
+  tOffset used_after_write;
+  tOffset difference;
+  minutes beginTime = duration_cast<minutes>(
+      system_clock::now().time_since_epoch());
+  minutes currentTime;
+  do{
+    EXPECT_GE(used_after_write = hdfsGetUsed(fs), 0);
+    difference = used_after_write - used_before_write;
+    currentTime = duration_cast<minutes>(
+          system_clock::now().time_since_epoch());
+  } while (difference == 0 && currentTime.count() - beginTime.count() < 3);
+
+  //There should be at least fileSize bytes added to the used space
+  EXPECT_GT(difference, fileSize);
+  //There could be additional metadata added to the used space,
+  //but no more than double the fileSize
+  EXPECT_LT(difference, fileSize * 2);
+
+}
+
+
+//Testing allow, disallow, create, and delete snapshot
+TEST_F(HdfsExtTest, TestSnapshotOperations) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //argument 'path' is NULL
+  EXPECT_EQ(-1, hdfsAllowSnapshot(fs, nullptr));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsCreateSnapshot(fs, nullptr, "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, nullptr, "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, nullptr, "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, nullptr));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+
+  //argument 'name' is NULL for deletion
+  EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, "/dir/", nullptr));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+
+  //Path not found
+  std::string path = "/wrong/dir/";
+  EXPECT_EQ(-1, hdfsAllowSnapshot(fs, path.c_str()));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsCreateSnapshot(fs, path.c_str(), "Bad"));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+
+  //Not a directory
+  path = connection.newFile(1024); //1024 byte file
+  EXPECT_EQ(-1, hdfsAllowSnapshot(fs, path.c_str()));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsCreateSnapshot(fs, path.c_str(), "Bad"));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
+
+  //Not snapshottable directory
+  std::string dirName = connection.newDir();
+  EXPECT_EQ(0, hdfsDisallowSnapshot(fs, dirName.c_str()));
+  EXPECT_EQ(-1, hdfsCreateSnapshot(fs, dirName.c_str(), "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+
+  //Verify snapshot created
+  EXPECT_EQ(0, hdfsAllowSnapshot(fs, dirName.c_str()));
+  EXPECT_EQ(0, hdfsCreateSnapshot(fs, dirName.c_str(), "Good"));
+  std::string snapDir = dirName + ".snapshot/";
+  int size;
+  hdfsFileInfo *file_infos;
+  EXPECT_NE(nullptr, file_infos = hdfsListDirectory(fs, snapDir.c_str(), 
&size));
+  EXPECT_EQ(1, size);
+  EXPECT_STREQ("Good", file_infos[0].mName);
+  hdfsFreeFileInfo(file_infos, 1);
+
+  //Verify snapshot renamed
+  EXPECT_EQ(0, hdfsRenameSnapshot(fs, dirName.c_str(), "Good", "Best"));
+
+  //Verify snapshot deleted
+  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, dirName.c_str(), "Best"));
+  EXPECT_EQ(nullptr, file_infos = hdfsListDirectory(fs, snapDir.c_str(), 
&size));
+  EXPECT_EQ(0, size);
+  hdfsFreeFileInfo(file_infos, 0);
+}
+
+//Testing creating directories
+TEST_F(HdfsExtTest, TestMkdirs) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Correct operation
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, "/myDir123"));
+
+  //TODO Should return error if directory already exists?
+  //EXPECT_EQ(-1, hdfsCreateDirectory(fs, "/myDir123"));
+  //EXPECT_EQ((int) std::errc::file_exists, errno);
+
+  //Creating directory on a path of the existing file
+  std::string path = connection.newFile(1024); //1024 byte file
+  EXPECT_EQ(-1, hdfsCreateDirectory(fs, path.c_str()));
+  EXPECT_EQ((int) std::errc::file_exists, errno);
+}
+
+//Testing deleting files and directories
+TEST_F(HdfsExtTest, TestDelete) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Path not found
+  EXPECT_EQ(-1, hdfsDelete(fs, "/wrong_path", 1));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, "/myDir"));
+  std::string path = connection.newFile("/myDir", 1024); //1024 byte file
+
+  //Non-recursive delete should fail on a non-empty directory
+  //error ENOTEMPTY(39) for libhdfspp or 255 for libhdfs
+  EXPECT_EQ(-1, hdfsDelete(fs, "/myDir", 0));
+  EXPECT_EQ((int) std::errc::directory_not_empty, errno);
+
+  //Correct operation
+  EXPECT_EQ(0, hdfsDelete(fs, "/myDir", 1));
+}
+
+//Testing renaming files and directories
+TEST_F(HdfsExtTest, TestRename) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Creating directory with two files
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, "/myDir"));
+  std::string file1 = connection.newFile("/myDir", 1024); //1024 byte file
+  std::string file2 = connection.newFile("/myDir", 1024); //1024 byte file
+  std::string file3 = connection.newFile(1024); //1024 byte file
+
+  //Path not found
+  EXPECT_EQ(-1, hdfsRename(fs, "/wrong_path", "/new_name"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+
+  //No parent directory in new path
+  EXPECT_EQ(-1, hdfsRename(fs, file1.c_str(), "/wrong_parent/new_name"));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+
+  //New name already exists in the folder
+  EXPECT_EQ(-1, hdfsRename(fs, file1.c_str(), file2.c_str()));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+
+  //Correct operation
+  EXPECT_EQ(0, hdfsRename(fs, file1.c_str(), "/myDir/new_awesome_name"));
+  EXPECT_EQ(0, hdfsRename(fs, file3.c_str(), "/myDir/another_file"));
+  EXPECT_EQ(0, hdfsRename(fs, "/myDir", "/new_awesome_dir"));
+
+  //Verification
+  int numEntries;
+  hdfsFileInfo * dirList = hdfsListDirectory(fs, "/new_awesome_dir", 
&numEntries);
+  EXPECT_NE(nullptr, dirList);
+  EXPECT_EQ(3, numEntries);
+  hdfsFreeFileInfo(dirList, 3);
+}
+
+//Testing Chmod and Chown
+TEST_F(HdfsExtTest, TestChmodChown) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Path not found
+  std::string path = "/wrong/dir/";
+  EXPECT_EQ(-1, hdfsChmod(fs, path.c_str(), 0777));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsChown(fs, path.c_str(), "foo", "bar"));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+
+  //Wrong arguments
+  path = connection.newFile(1024); //1024 byte file
+  EXPECT_EQ(-1, hdfsChmod(fs, nullptr, 0777));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsChmod(fs, path.c_str(), 07777));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsChmod(fs, path.c_str(), -1));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsChown(fs, nullptr, "foo", "bar"));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+
+  //Permission denied
+  HdfsHandle connection2 = cluster.connect_c("OtherGuy");
+  hdfsFS fs2 = connection2.handle();
+  EXPECT_EQ(-1, hdfsChmod(fs2, path.c_str(), 0123));
+  EXPECT_EQ((int ) std::errc::permission_denied, errno);
+  EXPECT_EQ(-1, hdfsChown(fs2, path.c_str(), "cool", "nice"));
+  EXPECT_EQ((int ) std::errc::permission_denied, errno);
+
+  //Verify Chmod and Chown worked
+  EXPECT_EQ(0, hdfsChmod(fs, path.c_str(), 0123));
+  EXPECT_EQ(0, hdfsChown(fs, path.c_str(), "cool", "nice"));
+  hdfsFileInfo *file_info;
+  EXPECT_NE(nullptr, file_info = hdfsGetPathInfo(fs, path.c_str()));
+  EXPECT_EQ(0123, file_info->mPermissions);
+  EXPECT_STREQ("cool", file_info->mOwner);
+  EXPECT_STREQ("nice", file_info->mGroup);
+  hdfsFreeFileInfo(file_info, 1);
+}
+
+//Testing EOF
+TEST_F(HdfsExtTest, TestEOF) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  //Write to a file
+  errno = 0;
+  int size = 256;
+  std::string path = "/eofTest";
+  hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
+  EXPECT_NE(nullptr, file);
+  void * buf = malloc(size);
+  memset(buf, ' ', size);
+  EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
+  free(buf);
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+  //libhdfs file operations work, but sometimes sets errno ENOENT : 2
+
+  //Test normal reading (no EOF)
+  char buffer[300];
+  file = hdfsOpenFile(fs, path.c_str(), O_RDONLY, 0, 0, 0);
+  EXPECT_EQ(size, hdfsPread(fs, file, 0, buffer, sizeof(buffer)));
+  //Read executes correctly, but causes a warning (captured in HDFS-10595)
+  //and sets errno to EINPROGRESS 115 : Operation now in progress
+
+  //Test reading at offset past the EOF
+  EXPECT_EQ(-1, hdfsPread(fs, file, sizeof(buffer), buffer, sizeof(buffer)));
+  EXPECT_EQ(Status::kInvalidOffset, errno);
+
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+}
+
+//Testing hdfsExists
+TEST_F(HdfsExtTest, TestExists) {
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  //Path not found
+  EXPECT_EQ(-1, hdfsExists(fs, "/wrong/dir/"));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+
+  //Correct operation
+  std::string pathDir = "/testExistsDir";
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, pathDir.c_str()));
+  EXPECT_EQ(0, hdfsExists(fs, pathDir.c_str()));
+  std::string pathFile = connection.newFile(pathDir.c_str(), 1024);
+  EXPECT_EQ(0, hdfsExists(fs, pathFile.c_str()));
+
+  //Permission denied
+  EXPECT_EQ(0, hdfsChmod(fs, pathDir.c_str(), 0700));
+  HdfsHandle connection2 = cluster.connect_c("OtherGuy");
+  hdfsFS fs2 = connection2.handle();
+  EXPECT_EQ(-1, hdfsExists(fs2, pathFile.c_str()));
+  EXPECT_EQ((int ) std::errc::permission_denied, errno);
+}
+
+//Testing Replication and Time modifications
+TEST_F(HdfsExtTest, TestReplAndTime) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  std::string path = "/wrong/dir/";
+
+  //Path not found
+  EXPECT_EQ(-1, hdfsSetReplication(fs, path.c_str(), 3));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsUtime(fs, path.c_str(), 1000000, 1000000));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+
+  //Correct operation
+  path = connection.newFile(1024);
+  EXPECT_EQ(0, hdfsSetReplication(fs, path.c_str(), 7));
+  EXPECT_EQ(0, hdfsUtime(fs, path.c_str(), 123456789, 987654321));
+  hdfsFileInfo *file_info;
+  EXPECT_NE(nullptr, file_info = hdfsGetPathInfo(fs, path.c_str()));
+  EXPECT_EQ(7, file_info->mReplication);
+  EXPECT_EQ(123456789, file_info->mLastMod);
+  EXPECT_EQ(987654321, file_info->mLastAccess);
+  hdfsFreeFileInfo(file_info, 1);
+
+  //Wrong arguments
+  EXPECT_EQ(-1, hdfsSetReplication(fs, path.c_str(), 0));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsSetReplication(fs, path.c_str(), 513));
+  EXPECT_EQ((int ) std::errc::invalid_argument, errno);
+
+  //Permission denied
+  EXPECT_EQ(0, hdfsChmod(fs, path.c_str(), 0700));
+  HdfsHandle connection2 = cluster.connect_c("OtherGuy");
+  hdfsFS fs2 = connection2.handle();
+  EXPECT_EQ(-1, hdfsSetReplication(fs2, path.c_str(), 3));
+  EXPECT_EQ((int ) std::errc::permission_denied, errno);
+  EXPECT_EQ(-1, hdfsUtime(fs2, path.c_str(), 111111111, 222222222));
+  EXPECT_EQ((int ) std::errc::permission_denied, errno);
+}
+
+//Testing getting default block size at path
+TEST_F(HdfsExtTest, TestDefaultBlockSize) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Correct operation (existing path)
+  std::string path = connection.newFile(1024);
+  long block_size = hdfsGetDefaultBlockSizeAtPath(fs, path.c_str());
+  EXPECT_GT(block_size, 0);
+  hdfsFileInfo *file_info;
+  EXPECT_NE(nullptr, file_info = hdfsGetPathInfo(fs, path.c_str()));
+  EXPECT_EQ(block_size, file_info->mBlockSize);
+  hdfsFreeFileInfo(file_info, 1);
+
+  //Non-existing path
+  path = "/wrong/dir/";
+  EXPECT_GT(hdfsGetDefaultBlockSizeAtPath(fs, path.c_str()), 0);
+
+  //No path specified
+  EXPECT_GT(hdfsGetDefaultBlockSize(fs), 0);
+}
+
+//Testing getting hosts
+TEST_F(HdfsExtTest, TestHosts) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  char *** hosts = nullptr;
+
+  // Free a null pointer
+  hdfsFreeHosts(hosts);
+  EXPECT_EQ(0, errno);
+
+  // Test non-existent files
+  EXPECT_EQ(nullptr, hdfsGetHosts(fs, "/wrong/file/", 0, 
std::numeric_limits<int64_t>::max()));
+  EXPECT_EQ((int ) std::errc::no_such_file_or_directory, errno);
+
+  // Test an existent file
+  std::string filename = connection.newFile(1024);
+  EXPECT_NE(nullptr, hosts = hdfsGetHosts(fs, filename.c_str(), 0, 
std::numeric_limits<int64_t>::max()));
+
+  //Make sure there is at least one host
+  EXPECT_NE(nullptr, *hosts);
+  EXPECT_NE(nullptr, **hosts);
+
+  hdfsFreeHosts(hosts);
+  EXPECT_EQ(0, errno);
+
+  //Test invalid arguments
+  EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), 0, 
std::numeric_limits<int64_t>::max()+1));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+
+  //Test invalid arguments
+  EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), 
std::numeric_limits<int64_t>::max()+1, std::numeric_limits<int64_t>::max()));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
+}
+
+//Testing read statistics
+TEST_F(HdfsExtTest, TestReadStats) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  struct hdfsReadStatistics *stats;
+
+  //Write to a file
+  int size = 256;
+  std::string path = "/readStatTest";
+  hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
+  EXPECT_NE(nullptr, file);
+  void * buf = malloc(size);
+  bzero(buf, size);
+  EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
+  free(buf);
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+
+  //test before reading
+  file = hdfsOpenFile(fs, path.c_str(), O_RDONLY, 0, 0, 0);
+  EXPECT_EQ(0, hdfsFileGetReadStatistics(file, &stats));
+  EXPECT_EQ(0, stats->totalBytesRead);
+  hdfsFileFreeReadStatistics(stats);
+
+  //test after reading
+  char buffer[123];
+  //Read executes correctly, but causes a warning (captured in HDFS-10595)
+  EXPECT_EQ(sizeof(buffer), hdfsRead(fs, file, buffer, sizeof(buffer)));
+  EXPECT_EQ(0, hdfsFileGetReadStatistics(file, &stats));
+  EXPECT_EQ(sizeof(buffer), stats->totalBytesRead);
+  EXPECT_EQ(sizeof(buffer), stats->totalLocalBytesRead);
+  EXPECT_EQ(0, hdfsReadStatisticsGetRemoteBytesRead(stats));
+  hdfsFileFreeReadStatistics(stats);
+
+  //test after clearing
+  EXPECT_EQ(0, hdfsFileClearReadStatistics(file));
+  EXPECT_EQ(0, hdfsFileGetReadStatistics(file, &stats));
+  EXPECT_EQ(0, stats->totalBytesRead);
+  hdfsFileFreeReadStatistics(stats);
+
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+  EXPECT_EQ(0, errno);
+}
+
+//Testing working directory
+TEST_F(HdfsExtTest, TestWorkingDirectory) {
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+
+  //Correct operation of setter and getter
+  std::string pathDir = "/testWorkDir/";
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, pathDir.c_str()));
+  std::string pathFile = connection.newFile(pathDir.c_str(), 1024);
+  EXPECT_EQ(0, hdfsSetWorkingDirectory(fs, pathDir.c_str()));
+  char array[100];
+  EXPECT_STREQ(pathDir.c_str(), hdfsGetWorkingDirectory(fs, array, 100));
+
+  //Get relative path
+  std::size_t slashPos = pathFile.find_last_of("/");
+  std::string fileName = pathFile.substr(slashPos + 1);
+
+  //Testing various functions with relative path:
+
+  //hdfsGetDefaultBlockSizeAtPath
+  EXPECT_GT(hdfsGetDefaultBlockSizeAtPath(fs, fileName.c_str()), 0);
+
+  //hdfsSetReplication
+  EXPECT_EQ(0, hdfsSetReplication(fs, fileName.c_str(), 7));
+
+  //hdfsUtime
+  EXPECT_EQ(0, hdfsUtime(fs, fileName.c_str(), 123456789, 987654321));
+
+  //hdfsExists
+  EXPECT_EQ(0, hdfsExists(fs, fileName.c_str()));
+
+  //hdfsGetPathInfo
+  hdfsFileInfo *file_info;
+  EXPECT_NE(nullptr, file_info = hdfsGetPathInfo(fs, fileName.c_str()));
+  hdfsFreeFileInfo(file_info, 1);
+
+  //hdfsOpenFile
+  hdfsFile file;
+  file = hdfsOpenFile(fs, fileName.c_str(), O_RDONLY, 0, 0, 0);
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+
+  //hdfsCreateDirectory
+  EXPECT_EQ(0, hdfsCreateDirectory(fs, "newDir"));
+
+  //add another file
+  std::string fileName2 = connection.newFile(pathDir + "/newDir", 1024);
+
+  //hdfsListDirectory
+  int numEntries;
+  hdfsFileInfo * dirList;
+  EXPECT_NE(nullptr, dirList = hdfsListDirectory(fs, "newDir", &numEntries));
+  EXPECT_EQ(1, numEntries);
+  hdfsFreeFileInfo(dirList, 1);
+
+  //hdfsChmod
+  EXPECT_EQ(0, hdfsChmod(fs, fileName.c_str(), 0777));
+
+  //hdfsChown
+  EXPECT_EQ(0, hdfsChown(fs, fileName.c_str(), "cool", "nice"));
+
+  //hdfsDisallowSnapshot
+  EXPECT_EQ(0, hdfsDisallowSnapshot(fs, "newDir"));
+
+  //hdfsAllowSnapshot
+  EXPECT_EQ(0, hdfsAllowSnapshot(fs, "newDir"));
+
+  //hdfsCreateSnapshot
+  EXPECT_EQ(0, hdfsCreateSnapshot(fs, "newDir", "Some"));
+
+  //hdfsDeleteSnapshot
+  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, "newDir", "Some"));
+
+  //hdfsGetBlockLocations
+  hdfsBlockLocations * blocks = nullptr;
+  EXPECT_EQ(0, hdfsGetBlockLocations(connection, fileName.c_str(), &blocks));
+  hdfsFreeBlockLocations(blocks);
+
+  //hdfsGetHosts
+  char *** hosts;
+  EXPECT_NE(nullptr, hosts = hdfsGetHosts(fs, fileName.c_str(), 0, 
std::numeric_limits<int64_t>::max()));
+  hdfsFreeHosts(hosts);
+
+  //hdfsRename
+  EXPECT_EQ(0, hdfsRename(fs, fileName.c_str(), "new_file_name"));
+
+  //hdfsDelete
+  EXPECT_EQ(0, hdfsDelete(fs, "new_file_name", 0));
+}
+
+
+// Flags used to test event handlers
+static int connect_callback_invoked = 0;
+int basic_fs_callback(const char *event, const char *cluster, int64_t value, 
int64_t cookie) {
+  (void)cluster;
+  (void)value;
+  if(::strstr(FS_NN_CONNECT_EVENT, event) && cookie == 0xFFF0) {
+    connect_callback_invoked = 1;
+  }
+  return LIBHDFSPP_EVENT_OK;
+}
+
+// Make sure event handler gets called during connect
+TEST_F(HdfsExtTest, TestConnectEvent) {
+  connect_callback_invoked = 0;
+  hdfsPreAttachFSMonitor(basic_fs_callback, 0xFFF0);
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  EXPECT_EQ(connect_callback_invoked, 1);
+}
+
+int throwing_fs_callback(const char *event, const char *cluster, int64_t 
value, int64_t cookie) {
+  (void)cluster;
+  (void)value;
+  if(::strstr(FS_NN_CONNECT_EVENT, event) && cookie == 0xFFF1) {
+    connect_callback_invoked = 1;
+    throw std::runtime_error("Throwing in callbacks is a bad thing.");
+  }
+  return LIBHDFSPP_EVENT_OK;
+}
+
+// Make sure throwing in the connect event handler doesn't prevent connection
+TEST_F(HdfsExtTest, TestConnectEventThrow) {
+  connect_callback_invoked = 0;
+  hdfsPreAttachFSMonitor(throwing_fs_callback, 0xFFF1);
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  EXPECT_EQ(connect_callback_invoked, 1);
+}
+
+int char_throwing_fs_callback(const char *event, const char *cluster, int64_t 
value, int64_t cookie) {
+  (void)cluster;
+  (void)value;
+  if(::strstr(FS_NN_CONNECT_EVENT, event) && cookie == 0xFFF2) {
+    connect_callback_invoked = 1;
+    throw "Throwing non std::exceptions in callbacks is even worse.";
+  }
+  return LIBHDFSPP_EVENT_OK;
+}
+
+TEST_F(HdfsExtTest, TestConnectEventThrowChar) {
+  connect_callback_invoked = 0;
+  hdfsPreAttachFSMonitor(char_throwing_fs_callback, 0xFFF2);
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  EXPECT_EQ(connect_callback_invoked, 1);
+}
+
+// Make sure throwing in the read event handler doesn't prevent reads
+int read_handler_invokation_count = 0;
+int basic_read_event_handler(const char *event, const char *cluster, const 
char *file,
+                             int64_t value, int64_t cookie)
+{
+  (void)cluster;
+  (void)file;
+  (void)value;
+  if(::strstr(FILE_DN_READ_EVENT, event) && cookie == 0xFFF3) {
+    read_handler_invokation_count += 1;
+  }
+  return LIBHDFSPP_EVENT_OK;
+}
+
+// Testing that read handler is called.
+// Note: This is counting calls to async_read rather than hdfsPread.
+//  Typically a call to hdfs(P)Read that doesn't span blocks/packets
+//  invokes async_read 6 times; 4 more than required (improving that
+//  in HDFS-11266).
+TEST_F(HdfsExtTest, TestReadEvent) {
+  read_handler_invokation_count = 0;
+  hdfsPreAttachFileMonitor(basic_read_event_handler, 0xFFF3);
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  //Write to a file
+  errno = 0;
+  int size = 256;
+  std::string path = "/readEventTest";
+  hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
+  EXPECT_NE(nullptr, file);
+  void * buf = malloc(size);
+  memset(buf, ' ', size);
+  EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
+  free(buf);
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+
+  //Test that read counters are getting incremented
+  char buffer[300];
+  file = hdfsOpenFile(fs, path.c_str(), O_RDONLY, 0, 0, 0);
+  EXPECT_EQ(size, hdfsPread(fs, file, 0, buffer, sizeof(buffer)));
+  EXPECT_EQ(read_handler_invokation_count, 6);
+
+  EXPECT_EQ(size, hdfsPread(fs, file, 0, buffer, sizeof(buffer)));
+  EXPECT_EQ(read_handler_invokation_count, 12);
+
+
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+}
+
+int throwing_read_event_handler(const char *event, const char *cluster, const 
char *file,
+                             int64_t value, int64_t cookie)
+{
+  (void)cluster;
+  (void)file;
+  (void)value;
+  if(::strstr(FILE_DN_READ_EVENT, event) && cookie == 0xFFF4) {
+    read_handler_invokation_count += 1;
+    throw std::runtime_error("Throwing here is a bad idea, but shouldn't break 
reads");
+  }
+  return LIBHDFSPP_EVENT_OK;
+}
+
+// Testing that reads can be done when event handler throws.
+TEST_F(HdfsExtTest, TestReadEventThrow) {
+  read_handler_invokation_count = 0;
+  hdfsPreAttachFileMonitor(throwing_read_event_handler, 0xFFF4);
+
+  HdfsHandle connection = cluster.connect_c();
+  hdfsFS fs = connection.handle();
+  EXPECT_NE(nullptr, fs);
+  //Write to a file
+  errno = 0;
+  int size = 256;
+  std::string path = "/readEventTest";
+  hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0);
+  EXPECT_NE(nullptr, file);
+  void * buf = malloc(size);
+  memset(buf, ' ', size);
+  EXPECT_EQ(size, hdfsWrite(fs, file, buf, size));
+  free(buf);
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+
+  //Test that read counters are getting incremented
+  char buffer[300];
+  file = hdfsOpenFile(fs, path.c_str(), O_RDONLY, 0, 0, 0);
+  EXPECT_EQ(size, hdfsPread(fs, file, 0, buffer, sizeof(buffer)));
+  EXPECT_EQ(read_handler_invokation_count, 6);
+
+  EXPECT_EQ(size, hdfsPread(fs, file, 0, buffer, sizeof(buffer)));
+  EXPECT_EQ(read_handler_invokation_count, 12);
+
+
+  EXPECT_EQ(0, hdfsCloseFile(fs, file));
+}
+
+
+} // end namespace hdfs
+
+int main(int argc, char *argv[]) {
+  // The following line must be executed to initialize Google Mock
+  // (and Google Test) before running the tests.
+  ::testing::InitGoogleMock(&argc, argv);
+  int exit_code = RUN_ALL_TESTS();
+  google::protobuf::ShutdownProtobufLibrary();
+
+  return exit_code;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ioservice_test.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ioservice_test.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ioservice_test.cc
new file mode 100644
index 0000000..5ee9789
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ioservice_test.cc
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hdfs_ioservice.h"
+
+#include <future>
+#include <functional>
+#include <thread>
+#include <string>
+
+#include <gmock/gmock.h>
+
+using ::testing::_;
+using ::testing::InvokeArgument;
+using ::testing::Return;
+
+using namespace hdfs;
+
+// Make sure IoService spins up specified number of threads
+TEST(IoServiceTest, InitThreads) {
+#ifndef DISABLE_CONCURRENT_WORKERS
+  std::shared_ptr<IoServiceImpl> service = 
std::static_pointer_cast<IoServiceImpl>(IoService::MakeShared());
+  EXPECT_NE(service, nullptr);
+
+  unsigned int thread_count = 4;
+  unsigned int result_thread_count = service->InitWorkers(thread_count);
+  EXPECT_EQ(thread_count, result_thread_count);
+
+  service->Stop();
+#else
+  #pragma message("DISABLE_CONCURRENT_WORKERS is defined so 
hdfs_ioservice_test will compile out the InitThreads test")
+#endif
+}
+
+// Make sure IoService defaults to logical thread count
+TEST(IoServiceTest, InitDefaultThreads) {
+#ifndef DISABLE_CONCURRENT_WORKERS
+  std::shared_ptr<IoServiceImpl> service = 
std::static_pointer_cast<IoServiceImpl>(IoService::MakeShared());
+  EXPECT_NE(service, nullptr);
+
+  unsigned int thread_count = std::thread::hardware_concurrency();
+  unsigned int result_thread_count = service->InitDefaultWorkers();
+  EXPECT_EQ(thread_count, result_thread_count);
+
+  service->Stop();
+#else
+  #pragma message("DISABLE_CONCURRENT_WORKERS is defined so 
hdfs_ioservice_test will compile out the InitDefaultThreads test")
+#endif
+}
+
+
+// Check IoService::PostTask
+TEST(IoServiceTest, SimplePost) {
+  std::shared_ptr<IoServiceImpl> service = 
std::static_pointer_cast<IoServiceImpl>(IoService::MakeShared());
+  EXPECT_NE(service, nullptr);
+
+  unsigned int thread_count = std::thread::hardware_concurrency();
+  unsigned int result_thread_count = service->InitDefaultWorkers();
+#ifndef DISABLE_CONCURRENT_WORKERS
+  EXPECT_EQ(thread_count, result_thread_count);
+#else
+  (void)thread_count;
+  (void)result_thread_count;
+#endif
+  // Like with the C synchronous shims a promise/future is needed to block 
until the async call completes.
+  auto promise = std::make_shared<std::promise<std::string>>();
+  std::future<std::string> future = promise->get_future();
+
+  // this will get invoked on a worker thread
+  std::function<void()> example_callback = [promise](){
+    promise->set_value("hello from IoService");
+  };
+  service->PostTask(example_callback);
+
+  // block until worker thread finishes
+  std::string result = future.get();
+  EXPECT_EQ(result, "hello from IoService");
+
+  service->Stop();
+
+}
+
+int main(int argc, char *argv[]) {
+  // The following line must be executed to initialize Google Mock
+  // (and Google Test) before running the tests.
+  ::testing::InitGoogleMock(&argc, argv);
+  int exit_code =  RUN_ALL_TESTS();
+  google::protobuf::ShutdownProtobufLibrary();
+  return exit_code;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
new file mode 100644
index 0000000..54d4cf6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "libhdfs_wrapper.h"
+#include "libhdfspp_wrapper.h"
+#include "hdfs/hdfs.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Shim structs and functions that delegate to libhdfspp and libhdfs. */
+struct hdfs_internal {
+  libhdfs_hdfsFS libhdfsRep;
+  libhdfspp_hdfsFS libhdfsppRep;
+};
+typedef struct hdfs_internal* hdfsFS;
+
+struct hdfsFile_internal {
+  libhdfs_hdfsFile libhdfsRep;
+  libhdfspp_hdfsFile libhdfsppRep;
+};
+typedef struct hdfsFile_internal* hdfsFile;
+
+struct hdfsBuilder {
+  struct hdfsBuilder *  libhdfs_builder;
+  struct hdfsBuilder * libhdfspp_builder;
+};
+
+#define REPORT_FUNCTION_NOT_IMPLEMENTED                     \
+  fprintf(stderr, "%s failed: function not implemented by " \
+    "libhdfs++ test shim", __PRETTY_FUNCTION__);
+
+int hdfsFileIsOpenForWrite(hdfsFile file) {
+  return libhdfs_hdfsFileIsOpenForWrite(file->libhdfsRep);
+}
+
+int hdfsFileGetReadStatistics(hdfsFile file, struct hdfsReadStatistics 
**stats) {
+  //We do not track which bytes were remote or local, so we assume all are 
local
+  int ret = libhdfspp_hdfsFileGetReadStatistics(file->libhdfsppRep, (struct 
libhdfspp_hdfsReadStatistics **)stats);
+  if(!ret) {
+    (*stats)->totalLocalBytesRead = (*stats)->totalBytesRead;
+  }
+  return ret;
+}
+
+int64_t hdfsReadStatisticsGetRemoteBytesRead(const struct hdfsReadStatistics 
*stats) {
+  return libhdfspp_hdfsReadStatisticsGetRemoteBytesRead((struct 
libhdfspp_hdfsReadStatistics *)stats);
+}
+
+int hdfsFileClearReadStatistics(hdfsFile file) {
+  return libhdfspp_hdfsFileClearReadStatistics(file->libhdfsppRep);
+}
+
+void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats) {
+  libhdfspp_hdfsFileFreeReadStatistics((struct libhdfspp_hdfsReadStatistics 
*)stats);
+}
+
+hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user) {
+  hdfsFS ret = calloc(1, sizeof(struct hdfs_internal));
+  ret->libhdfsRep = libhdfs_hdfsConnectAsUser(nn, port, user);
+    if (!ret->libhdfsRep) {
+      libhdfs_hdfsDisconnect(ret->libhdfsRep);
+      free(ret);
+      return NULL;
+    }
+  ret->libhdfsppRep = libhdfspp_hdfsConnectAsUser(nn, port, user);
+  if (!ret->libhdfsppRep) {
+    libhdfs_hdfsDisconnect(ret->libhdfsRep);
+    free(ret);
+    return NULL;
+  }
+  return ret;
+}
+
+hdfsFS hdfsConnect(const char* nn, tPort port) {
+  hdfsFS ret = calloc(1, sizeof(struct hdfs_internal));
+  ret->libhdfsRep = libhdfs_hdfsConnect(nn, port);
+    if (!ret->libhdfsRep) {
+      libhdfs_hdfsDisconnect(ret->libhdfsRep);
+      free(ret);
+      return NULL;
+    }
+  ret->libhdfsppRep = libhdfspp_hdfsConnect(nn, port);
+  if (!ret->libhdfsppRep) {
+    libhdfs_hdfsDisconnect(ret->libhdfsRep);
+    free(ret);
+    return NULL;
+  }
+  return ret;
+}
+
+hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char 
*user ) {
+  hdfsFS ret = calloc(1, sizeof(struct hdfs_internal));
+  ret->libhdfsRep = libhdfs_hdfsConnectAsUserNewInstance(nn, port, user);
+    if (!ret->libhdfsRep) {
+      libhdfs_hdfsDisconnect(ret->libhdfsRep);
+      free(ret);
+      return NULL;
+    }
+  ret->libhdfsppRep = libhdfspp_hdfsConnectAsUserNewInstance(nn, port, user);
+  if (!ret->libhdfsppRep) {
+    libhdfs_hdfsDisconnect(ret->libhdfsRep);
+    free(ret);
+    return NULL;
+  }
+  return ret;
+}
+
+hdfsFS hdfsConnectNewInstance(const char* nn, tPort port) {
+  hdfsFS ret = calloc(1, sizeof(struct hdfs_internal));
+  ret->libhdfsRep = libhdfs_hdfsConnectNewInstance(nn, port);
+    if (!ret->libhdfsRep) {
+      libhdfs_hdfsDisconnect(ret->libhdfsRep);
+      free(ret);
+      return NULL;
+    }
+  ret->libhdfsppRep = libhdfspp_hdfsConnectNewInstance(nn, port);
+  if (!ret->libhdfsppRep) {
+    libhdfs_hdfsDisconnect(ret->libhdfsRep);
+    free(ret);
+    return NULL;
+  }
+  return ret;
+}
+
+hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) {
+  hdfsFS ret = calloc(1, sizeof(struct hdfs_internal));
+  ret->libhdfsRep = libhdfs_hdfsBuilderConnect(bld->libhdfs_builder);
+  if (!ret->libhdfsRep) {
+    free(ret);
+    return NULL;
+  }
+  /* Destroys bld object. */
+  ret->libhdfsppRep = libhdfspp_hdfsBuilderConnect(bld->libhdfspp_builder);
+  if (!ret->libhdfsppRep) {
+    libhdfs_hdfsDisconnect(ret->libhdfsRep);
+    free(ret);
+    return NULL;
+  }
+  return ret;
+}
+
+struct hdfsBuilder *hdfsNewBuilder(void) {
+  struct hdfsBuilder * result = calloc(1, sizeof(struct hdfsBuilder));
+  result -> libhdfs_builder = libhdfs_hdfsNewBuilder();
+  result -> libhdfspp_builder = libhdfspp_hdfsNewBuilder();
+  return result;
+}
+
+void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld) {
+  libhdfs_hdfsBuilderSetForceNewInstance(bld->libhdfs_builder);
+  libhdfspp_hdfsBuilderSetForceNewInstance(bld->libhdfspp_builder);
+}
+
+void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn) {
+  libhdfs_hdfsBuilderSetNameNode(bld->libhdfs_builder, nn);
+  libhdfspp_hdfsBuilderSetNameNode(bld->libhdfspp_builder, nn);
+}
+
+void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port) {
+  libhdfs_hdfsBuilderSetNameNodePort(bld->libhdfs_builder, port);
+  libhdfspp_hdfsBuilderSetNameNodePort(bld->libhdfspp_builder, port);
+}
+
+void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName) {
+  libhdfs_hdfsBuilderSetUserName(bld->libhdfs_builder, userName);
+  libhdfspp_hdfsBuilderSetUserName(bld->libhdfspp_builder, userName);
+}
+
+void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
+                               const char *kerbTicketCachePath) {
+  REPORT_FUNCTION_NOT_IMPLEMENTED
+}
+
+void hdfsFreeBuilder(struct hdfsBuilder *bld) {
+  libhdfs_hdfsFreeBuilder(bld->libhdfs_builder);
+  libhdfspp_hdfsFreeBuilder(bld->libhdfspp_builder);
+  free(bld);
+}
+
+int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                          const char *val) {
+  int ret = libhdfs_hdfsBuilderConfSetStr(bld->libhdfs_builder, key, val);
+  if (ret) {
+    return ret;
+  }
+  ret = libhdfspp_hdfsBuilderConfSetStr(bld->libhdfspp_builder, key, val);
+  if (ret) {
+    return ret;
+  }
+  return 0;
+}
+
+int hdfsConfGetStr(const char *key, char **val) {
+  return libhdfspp_hdfsConfGetStr(key, val);
+}
+
+int hdfsConfGetInt(const char *key, int32_t *val) {
+  return libhdfspp_hdfsConfGetInt(key, val);
+}
+
+void hdfsConfStrFree(char *val) {
+  libhdfspp_hdfsConfStrFree(val);
+}
+
+int hdfsDisconnect(hdfsFS fs) {
+  int ret1 = libhdfs_hdfsDisconnect(fs->libhdfsRep);
+  int ret2 = libhdfspp_hdfsDisconnect(fs->libhdfsppRep);
+  free(fs);
+  if (ret1){
+    return ret1;
+  } else if (ret2){
+    return ret2;
+  } else {
+    return 0;
+  }
+}
+
+hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
+                      int bufferSize, short replication, tSize blocksize) {
+  hdfsFile ret = calloc(1, sizeof(struct hdfsFile_internal));
+  /* Currently only open libhdf++ for reads. */
+  ret->libhdfsppRep = 0;
+  if (flags == O_RDONLY) {
+    ret->libhdfsppRep = libhdfspp_hdfsOpenFile(fs->libhdfsppRep, path, flags,
+        bufferSize, replication, blocksize);
+  }
+  ret->libhdfsRep = libhdfs_hdfsOpenFile(fs->libhdfsRep, path,
+      flags, bufferSize, replication, blocksize);
+  if (!ret->libhdfsRep) {
+    free(ret);
+    ret = NULL;
+  }
+  return ret;
+}
+
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength) {
+  return libhdfs_hdfsTruncateFile(fs->libhdfsRep, path, newlength);
+}
+
+int hdfsUnbufferFile(hdfsFile file) {
+  return libhdfs_hdfsUnbufferFile(file->libhdfsRep);
+}
+
+int hdfsCloseFile(hdfsFS fs, hdfsFile file) {
+  int ret;
+  if (file->libhdfsppRep) {
+    libhdfspp_hdfsCloseFile(fs->libhdfsppRep, file->libhdfsppRep);
+  }
+  ret = libhdfs_hdfsCloseFile(fs->libhdfsRep, file->libhdfsRep);
+  free(file);
+  return ret;
+}
+
+int hdfsExists(hdfsFS fs, const char *path) {
+  return libhdfspp_hdfsExists(fs->libhdfsppRep, path);
+}
+
+int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
+  int ret1 = libhdfs_hdfsSeek(fs->libhdfsRep, file->libhdfsRep, desiredPos);
+  int ret2 = libhdfspp_hdfsSeek(fs->libhdfsppRep, file->libhdfsppRep, 
desiredPos);
+  if (ret1) {
+    return ret1;
+  } else if (ret2) {
+    return ret2;
+  } else {
+    return 0;
+  }
+}
+
+tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
+  tOffset ret1 = libhdfs_hdfsTell(fs->libhdfsRep, file->libhdfsRep);
+  tOffset ret2 = libhdfspp_hdfsTell(fs->libhdfsppRep, file->libhdfsppRep);
+  if (ret1 != ret2) {
+    errno = EIO;
+    return -1;
+  } else {
+    return ret1;
+  }
+}
+
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length) {
+  // Read to update stats.
+  tSize nRead = libhdfs_hdfsRead(fs->libhdfsRep, file->libhdfsRep, buffer, 
length);
+  // Clear to avoid false positives.
+  if (nRead > 0) memset(buffer, 0, nRead);
+  return libhdfspp_hdfsRead(fs->libhdfsppRep, file->libhdfsppRep, buffer, 
length);
+}
+
+tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
+                void* buffer, tSize length) {
+  tSize ret = -1;
+  if (!fs->libhdfsppRep) {
+    fprintf(stderr, "hdfsPread failed: no libhdfs++ file system");
+  } else if (!file->libhdfsppRep) {
+    fprintf(stderr, "hdfsPread failed: no libhdfs++ file");
+  } else {
+    ret = libhdfspp_hdfsPread(fs->libhdfsppRep, file->libhdfsppRep,
+        position, buffer, length);
+  }
+  return ret;
+}
+
+tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
+                tSize length) {
+  return libhdfs_hdfsWrite(fs->libhdfsRep, file->libhdfsRep, buffer, length);
+}
+
+int hdfsFlush(hdfsFS fs, hdfsFile file) {
+  return libhdfs_hdfsFlush(fs->libhdfsRep, file->libhdfsRep);
+}
+
+int hdfsHFlush(hdfsFS fs, hdfsFile file) {
+  return libhdfs_hdfsHFlush(fs->libhdfsRep, file->libhdfsRep);
+}
+
+int hdfsHSync(hdfsFS fs, hdfsFile file) {
+  return libhdfs_hdfsHSync(fs->libhdfsRep, file->libhdfsRep);
+}
+
+int hdfsAvailable(hdfsFS fs, hdfsFile file) {
+  return libhdfspp_hdfsAvailable(fs->libhdfsppRep, file->libhdfsppRep);
+}
+
+int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst) {
+  return libhdfs_hdfsCopy(srcFS->libhdfsRep, src, dstFS->libhdfsRep, dst);
+}
+
+int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst) {
+  return libhdfs_hdfsMove(srcFS->libhdfsRep, src, dstFS->libhdfsRep, dst);
+}
+
+int hdfsDelete(hdfsFS fs, const char* path, int recursive) {
+  return libhdfspp_hdfsDelete(fs->libhdfsppRep, path, recursive);
+}
+
+int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath) {
+  return libhdfspp_hdfsRename(fs->libhdfsppRep, oldPath, newPath);
+}
+
+char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize) {
+  return libhdfspp_hdfsGetWorkingDirectory(fs->libhdfsppRep, buffer, 
bufferSize);
+}
+
+int hdfsSetWorkingDirectory(hdfsFS fs, const char* path) {
+  int ret1 = libhdfspp_hdfsSetWorkingDirectory(fs->libhdfsppRep, path);
+  int ret2 = libhdfs_hdfsSetWorkingDirectory(fs->libhdfsRep, path);
+  if (ret1) {
+    return ret1;
+  } else if (ret2) {
+    return ret2;
+  } else {
+    return 0;
+  }
+}
+
+int hdfsCreateDirectory(hdfsFS fs, const char* path) {
+  return libhdfspp_hdfsCreateDirectory(fs->libhdfsppRep, path);
+}
+
+int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication) {
+  return libhdfspp_hdfsSetReplication(fs->libhdfsppRep, path, replication);
+}
+
+hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
+                                int *numEntries) {
+  return (hdfsFileInfo *)libhdfspp_hdfsListDirectory(fs->libhdfsppRep, path, 
numEntries);
+}
+
+hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) {
+  return (hdfsFileInfo *)libhdfspp_hdfsGetPathInfo(fs->libhdfsppRep, path);
+}
+
+void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries) {
+  return libhdfspp_hdfsFreeFileInfo
+      ((libhdfspp_hdfsFileInfo *) hdfsFileInfo, numEntries);
+}
+
+int hdfsFileIsEncrypted(hdfsFileInfo *hdfsFileInfo) {
+  return libhdfs_hdfsFileIsEncrypted
+      ((libhdfs_hdfsFileInfo *) hdfsFileInfo);
+}
+
+char*** hdfsGetHosts(hdfsFS fs, const char* path,
+        tOffset start, tOffset length) {
+  return libhdfspp_hdfsGetHosts(fs->libhdfsppRep, path, start, length);
+}
+
+void hdfsFreeHosts(char ***blockHosts) {
+  return libhdfspp_hdfsFreeHosts(blockHosts);
+}
+
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs) {
+  return libhdfspp_hdfsGetDefaultBlockSize(fs->libhdfsppRep);
+}
+
+tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path) {
+  return libhdfspp_hdfsGetDefaultBlockSizeAtPath(fs->libhdfsppRep, path);
+}
+
+tOffset hdfsGetCapacity(hdfsFS fs) {
+  return libhdfspp_hdfsGetCapacity(fs->libhdfsppRep);
+}
+
+tOffset hdfsGetUsed(hdfsFS fs) {
+  return libhdfspp_hdfsGetUsed(fs->libhdfsppRep);
+}
+
+int hdfsChown(hdfsFS fs, const char* path, const char *owner,
+              const char *group) {
+  return libhdfspp_hdfsChown(fs->libhdfsppRep, path, owner, group);
+}
+
+int hdfsChmod(hdfsFS fs, const char* path, short mode) {
+  return libhdfspp_hdfsChmod(fs->libhdfsppRep, path, mode);
+}
+
+int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime) {
+  return libhdfspp_hdfsUtime(fs->libhdfsppRep, path, mtime, atime);
+}
+
+struct hadoopRzOptions *hadoopRzOptionsAlloc(void) {
+  return libhdfs_hadoopRzOptionsAlloc();
+}
+
+int hadoopRzOptionsSetSkipChecksum(
+        struct hadoopRzOptions *opts, int skip) {
+  return libhdfs_hadoopRzOptionsSetSkipChecksum(opts, skip);
+}
+
+int hadoopRzOptionsSetByteBufferPool(
+        struct hadoopRzOptions *opts, const char *className) {
+  return libhdfs_hadoopRzOptionsSetByteBufferPool(opts, className);
+}
+
+void hadoopRzOptionsFree(struct hadoopRzOptions *opts) {
+  libhdfs_hadoopRzOptionsFree(opts);
+}
+
+struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
+        struct hadoopRzOptions *opts, int32_t maxLength) {
+  return libhdfs_hadoopReadZero(file->libhdfsRep, opts, maxLength);
+}
+
+int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer) {
+  return libhdfs_hadoopRzBufferLength(buffer);
+}
+
+const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer) {
+  return libhdfs_hadoopRzBufferGet(buffer);
+}
+
+void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer) {
+  return libhdfs_hadoopRzBufferFree(file->libhdfsRep, buffer);
+}
+
+int hdfsGetHedgedReadMetrics(hdfsFS fs, struct hdfsHedgedReadMetrics 
**metrics) {
+  return  libhdfs_hdfsGetHedgedReadMetrics(fs->libhdfsRep, (struct 
libhdfs_hdfsHedgedReadMetrics **) metrics);
+}
+
+void hdfsFreeHedgedReadMetrics(struct hdfsHedgedReadMetrics *metrics) {
+  return  libhdfs_hdfsFreeHedgedReadMetrics((struct 
libhdfs_hdfsHedgedReadMetrics *) metrics);
+}
+
+/*************
+ * hdfs_ext functions
+ */
+
+int hdfsGetLastError(char *buf, int len) {
+  return libhdfspp_hdfsGetLastError(buf, len);
+}
+
+int hdfsCancel(hdfsFS fs, hdfsFile file) {
+  return libhdfspp_hdfsCancel(fs->libhdfsppRep, file->libhdfsppRep);
+}
+
+
+int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct 
hdfsBlockLocations ** locations) {
+  return libhdfspp_hdfsGetBlockLocations(fs->libhdfsppRep, path, locations);
+}
+
+int hdfsFreeBlockLocations(struct hdfsBlockLocations * locations) {
+  return libhdfspp_hdfsFreeBlockLocations(locations);
+}
+
+hdfsFileInfo *hdfsFind(hdfsFS fs, const char* path, const char* name, uint32_t 
*numEntries) {
+  return (hdfsFileInfo *)libhdfspp_hdfsFind(fs->libhdfsppRep, path, name, 
numEntries);
+}
+
+int hdfsCreateSnapshot(hdfsFS fs, const char* path, const char* name) {
+  return libhdfspp_hdfsCreateSnapshot(fs->libhdfsppRep, path, name);
+}
+
+int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
+  return libhdfspp_hdfsDeleteSnapshot(fs->libhdfsppRep, path, name);
+}
+
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, 
const char* new_name) {
+  return libhdfspp_hdfsRenameSnapshot(fs->libhdfsppRep, path, old_name, 
new_name);
+}
+
+int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
+  return libhdfspp_hdfsAllowSnapshot(fs->libhdfsppRep, path);
+}
+
+int hdfsDisallowSnapshot(hdfsFS fs, const char* path) {
+  return libhdfspp_hdfsDisallowSnapshot(fs->libhdfsppRep, path);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_errors.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_errors.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_errors.cc
new file mode 100644
index 0000000..f49f5fa
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_errors.cc
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <hdfs/hdfs.h>
+#include <hdfspp/hdfs_ext.h>
+
+#include <google/protobuf/io/coded_stream.h>
+#include <gmock/gmock.h>
+
+#include <string.h>
+#include <string>
+
+using ::testing::_;
+using ::testing::InvokeArgument;
+using ::testing::Return;
+
+/* Don't need a real minidfs cluster since this just passes invalid params. */
+
+TEST(HdfsppErrors, NullFileSystem) {
+
+  char buf[4096];
+
+  hdfsFS fs = nullptr;
+  hdfsFile fd = reinterpret_cast<hdfsFile>(1);
+
+  tSize res = hdfsRead(fs, fd, buf, 4096);
+  ASSERT_EQ(res, -1);
+
+  hdfsGetLastError(buf, 4096);
+
+  ASSERT_EQ(std::string(buf), "Cannot perform FS operations with null FS 
handle.");
+}
+
+TEST(HdfsppErrors, NullFileHandle) {
+  char buf[4096];
+
+  hdfsFS fs = reinterpret_cast<hdfsFS>(1);
+  hdfsFile fd = nullptr;
+
+  tSize res = hdfsRead(fs, fd, buf, 4096);
+  ASSERT_EQ(res, -1);
+
+  hdfsGetLastError(buf, 4096);
+
+  ASSERT_EQ(std::string(buf), "Cannot perform FS operations with null File 
handle.");
+}
+
+TEST(HdfsppErrors, ZeroLength) {
+  char buf[1];
+  buf[0] = 0;
+
+  hdfsFS fs = reinterpret_cast<hdfsFS>(1);
+  hdfsFile fd = nullptr;
+
+  tSize res = hdfsRead(fs, fd, buf, 1);
+  ASSERT_EQ(res, -1);
+
+  hdfsGetLastError(buf, 0);
+
+  ASSERT_EQ(std::string(buf), "");
+}
+
+TEST(HdfsppErrors, NegativeLength) {
+  char buf[1];
+  buf[0] = 0;
+
+  hdfsFS fs = reinterpret_cast<hdfsFS>(1);
+  hdfsFile fd = nullptr;
+
+  tSize res = hdfsRead(fs, fd, buf, 1);
+  ASSERT_EQ(res, -1);
+
+  hdfsGetLastError(buf, -1);
+
+  ASSERT_EQ(std::string(buf), "");
+}
+
+TEST(HdfsppErrors, MessageTruncation) {
+  char buf[4096];
+
+  hdfsFS fs = reinterpret_cast<hdfsFS>(1);
+  hdfsFile fd = nullptr;
+
+  tSize res = hdfsRead(fs, fd, buf, 4096);
+  ASSERT_EQ(res, -1);
+
+  hdfsGetLastError(buf, 10);
+
+  ASSERT_EQ(std::string(buf), "Cannot pe");
+}
+
+int main(int argc, char *argv[]) {
+  // The following line must be executed to initialize Google Mock
+  // (and Google Test) before running the tests.
+  ::testing::InitGoogleMock(&argc, argv);
+  int exit_code = RUN_ALL_TESTS();
+  google::protobuf::ShutdownProtobufLibrary();
+
+  return exit_code;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
new file mode 100644
index 0000000..aecced1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs/hdfs.h"
+#include "hdfspp/hdfspp.h"
+#include <native_mini_dfs.h>
+
+#include <google/protobuf/io/coded_stream.h>
+#include <gmock/gmock.h>
+
+#include <string>
+#include <atomic>
+
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
+#define TEST_BLOCK_SIZE 134217728
+
+namespace hdfs {
+
+
+static std::atomic<int> dirnum;
+static std::atomic<int> filenum;
+
+
+class FSHandle {
+public:
+  FSHandle() : fs(nullptr) {}
+  FSHandle(FileSystem * fs_in) : fs(fs_in) {}
+
+
+  FileSystem * handle()       { return fs.get(); }
+  operator     FileSystem *() { return fs.get(); }
+protected:
+  std::shared_ptr<FileSystem> fs;
+};
+
+
+/**
+ * For tests going through the C API to libhdfs++
+ */
+
+class HdfsHandle {
+public:
+    HdfsHandle() : fs(nullptr) {
+    }
+
+    HdfsHandle(hdfsFS fs_in) : fs(fs_in) {
+    }
+
+    ~HdfsHandle () {
+      if (fs)  {
+        EXPECT_EQ(0, hdfsDisconnect(fs));
+      }
+    }
+
+  std::string newDir(const std::string & parent_dir = "/") {
+    int newDirNum = dirnum++;
+
+    std::string path = parent_dir;
+    if (path.back() != '/')
+      path += "/";
+    path += "dir" + std::to_string(newDirNum) + "/";
+
+    EXPECT_EQ(0, hdfsCreateDirectory(*this, path.c_str()));
+    return path;
+  }
+
+  std::string newFile(const std::string & dir = "/", size_t size = 1024) {
+    int newFileNum = filenum++;
+
+    std::string path = dir;
+    if (path.back() != '/')
+      path += "/";
+    path += "file" + std::to_string(newFileNum);
+
+    hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0);
+    EXPECT_NE(nullptr, file);
+    void * buf = malloc(size);
+    bzero(buf, size);
+    EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size));
+    EXPECT_EQ(0, hdfsCloseFile(*this, file));
+    free(buf);
+
+    return path;
+  }
+
+  std::string newFile(size_t size) {
+    return newFile("/", size);
+  }
+
+  hdfsFS   handle() { return fs; }
+  operator hdfsFS() { return fs; }
+private:
+  hdfsFS fs;
+};
+
+
+class MiniCluster  {
+public:
+  MiniCluster() : io_service(IoService::MakeShared()) {
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+        0, /* webhdfs */
+        -1, /* webhdfs port */
+        1  /* shortcircuit */
+    };
+    clusterInfo = nmdCreate(&conf);
+    EXPECT_NE(nullptr, clusterInfo);
+    EXPECT_EQ(0, nmdWaitClusterUp(clusterInfo));
+
+    //TODO: Write some files for tests to read/check
+  }
+
+  virtual ~MiniCluster() {
+    if (clusterInfo) {
+        EXPECT_EQ(0, nmdShutdown(clusterInfo));
+    }
+    nmdFree(clusterInfo);
+  }
+
+  // Connect via the C++ API
+  FSHandle connect(const std::string username) {
+    Options options;
+
+    unsigned int worker_count = io_service->InitDefaultWorkers();
+    EXPECT_NE(0, worker_count);
+
+    FileSystem * fs = FileSystem::New(io_service, username, options);
+    EXPECT_NE(nullptr, fs);
+    FSHandle result(fs);
+
+    tPort port = (tPort)nmdGetNameNodePort(clusterInfo);
+    EXPECT_NE(0, port);
+    Status status = fs->Connect("localhost", std::to_string(port));
+    EXPECT_EQ(true, status.ok());
+    return result;
+  }
+
+  FSHandle connect() {
+    return connect("");
+  }
+
+  // Connect via the C API
+  HdfsHandle connect_c(const std::string & username) {
+    tPort port;
+    hdfsFS hdfs;
+    struct hdfsBuilder *bld;
+
+    port = (tPort)nmdGetNameNodePort(clusterInfo);
+    bld = hdfsNewBuilder();
+    EXPECT_NE(nullptr, bld);
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetNameNode(bld, "localhost");
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TEST_BLOCK_SIZE));
+    hdfsBuilderConfSetStr(bld, "dfs.blocksize",
+                          TO_STR(TEST_BLOCK_SIZE));
+    if (!username.empty()) {
+        hdfsBuilderSetUserName(bld, username.c_str());
+    }
+    hdfs = hdfsBuilderConnect(bld);
+    EXPECT_NE(nullptr, hdfs);
+
+    return HdfsHandle(hdfs);
+  }
+
+  // Connect via the C API
+  HdfsHandle connect_c() {
+    return connect_c("");
+  }
+
+protected:
+  struct NativeMiniDfsCluster* clusterInfo;
+  std::shared_ptr<IoService> io_service;
+};
+
+} // namespace

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs_smoke.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs_smoke.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs_smoke.cc
new file mode 100644
index 0000000..aaa2903
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs_smoke.cc
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfspp_mini_dfs.h"
+
+namespace hdfs {
+
+class HdfsMiniDfsSmokeTest: public ::testing::Test {
+public:
+  MiniCluster cluster;
+};
+
+// Make sure we can set up a mini-cluster and connect to it
+TEST_F(HdfsMiniDfsSmokeTest, SmokeTest) {
+  FSHandle handle = cluster.connect();
+  EXPECT_NE(nullptr, handle.handle());
+
+  HdfsHandle connection = cluster.connect_c();
+  EXPECT_NE(nullptr, connection.handle());
+}
+
+
+}
+
+
+int main(int argc, char *argv[]) {
+  // The following line must be executed to initialize Google Mock
+  // (and Google Test) before running the tests.
+  ::testing::InitGoogleMock(&argc, argv);
+  int exit_code = RUN_ALL_TESTS();
+  google::protobuf::ShutdownProtobufLibrary();
+
+  return exit_code;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.c
new file mode 100644
index 0000000..8a7124b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.c
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Rename libhdfs structs and functions */
+#include "libhdfs_wrapper_defines.h"
+#include "libhdfs/hdfs.c"
+#include "libhdfs_wrapper_undefs.h"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.h
new file mode 100644
index 0000000..e8e716c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper.h
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* "Original" symbols can be included elsewhere. */
+#undef LIBHDFS_HDFS_H
+
+/* Rename libhdfs structs and functions */
+#include "libhdfs_wrapper_defines.h"
+#include "hdfs/hdfs.h"
+#include "libhdfs_wrapper_undefs.h"
+
+/* "Original" symbols can be included elsewhere. */
+#undef LIBHDFS_HDFS_H

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
new file mode 100644
index 0000000..b907768
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define hdfsFileIsOpenForRead libhdfs_hdfsFileIsOpenForRead
+#define hdfsFileIsOpenForWrite libhdfs_hdfsFileIsOpenForWrite
+#define hdfsFileGetReadStatistics libhdfs_hdfsFileGetReadStatistics
+#define hdfsReadStatisticsGetRemoteBytesRead 
libhdfs_hdfsReadStatisticsGetRemoteBytesRead
+#define hdfsFileClearReadStatistics libhdfs_hdfsFileClearReadStatistics
+#define hdfsFileFreeReadStatistics libhdfs_hdfsFileFreeReadStatistics
+#define hdfsConnectAsUser libhdfs_hdfsConnectAsUser
+#define hdfsConnect libhdfs_hdfsConnect
+#define hdfsConnectAsUserNewInstance libhdfs_hdfsConnectAsUserNewInstance
+#define hdfsConnectNewInstance libhdfs_hdfsConnectNewInstance
+#define hdfsBuilderConnect libhdfs_hdfsBuilderConnect
+#define hdfsNewBuilder libhdfs_hdfsNewBuilder
+#define hdfsBuilderSetForceNewInstance libhdfs_hdfsBuilderSetForceNewInstance
+#define hdfsBuilderSetNameNode libhdfs_hdfsBuilderSetNameNode
+#define hdfsBuilderSetNameNodePort libhdfs_hdfsBuilderSetNameNodePort
+#define hdfsBuilderSetUserName libhdfs_hdfsBuilderSetUserName
+#define hdfsBuilderSetKerbTicketCachePath 
libhdfs_hdfsBuilderSetKerbTicketCachePath
+#define hdfsFreeBuilder libhdfs_hdfsFreeBuilder
+#define hdfsBuilderConfSetStr libhdfs_hdfsBuilderConfSetStr
+#define hdfsConfGetStr libhdfs_hdfsConfGetStr
+#define hdfsConfGetInt libhdfs_hdfsConfGetInt
+#define hdfsConfStrFree libhdfs_hdfsConfStrFree
+#define hdfsDisconnect libhdfs_hdfsDisconnect
+#define hdfsOpenFile libhdfs_hdfsOpenFile
+#define hdfsTruncateFile libhdfs_hdfsTruncateFile
+#define hdfsUnbufferFile libhdfs_hdfsUnbufferFile
+#define hdfsCloseFile libhdfs_hdfsCloseFile
+#define hdfsExists libhdfs_hdfsExists
+#define hdfsSeek libhdfs_hdfsSeek
+#define hdfsTell libhdfs_hdfsTell
+#define hdfsRead libhdfs_hdfsRead
+#define hdfsPread libhdfs_hdfsPread
+#define hdfsWrite libhdfs_hdfsWrite
+#define hdfsFlush libhdfs_hdfsFlush
+#define hdfsHFlush libhdfs_hdfsHFlush
+#define hdfsHSync libhdfs_hdfsHSync
+#define hdfsAvailable libhdfs_hdfsAvailable
+#define hdfsCopy libhdfs_hdfsCopy
+#define hdfsMove libhdfs_hdfsMove
+#define hdfsDelete libhdfs_hdfsDelete
+#define hdfsRename libhdfs_hdfsRename
+#define hdfsGetWorkingDirectory libhdfs_hdfsGetWorkingDirectory
+#define hdfsSetWorkingDirectory libhdfs_hdfsSetWorkingDirectory
+#define hdfsCreateDirectory libhdfs_hdfsCreateDirectory
+#define hdfsSetReplication libhdfs_hdfsSetReplication
+#define hdfsListDirectory libhdfs_hdfsListDirectory
+#define hdfsGetPathInfo libhdfs_hdfsGetPathInfo
+#define hdfsFreeFileInfo libhdfs_hdfsFreeFileInfo
+#define hdfsFileIsEncrypted libhdfs_hdfsFileIsEncrypted
+#define hdfsGetHosts libhdfs_hdfsGetHosts
+#define hdfsFreeHosts libhdfs_hdfsFreeHosts
+#define hdfsGetDefaultBlockSize libhdfs_hdfsGetDefaultBlockSize
+#define hdfsGetDefaultBlockSizeAtPath libhdfs_hdfsGetDefaultBlockSizeAtPath
+#define hdfsGetCapacity libhdfs_hdfsGetCapacity
+#define hdfsGetUsed libhdfs_hdfsGetUsed
+#define hdfsChown libhdfs_hdfsChown
+#define hdfsChmod libhdfs_hdfsChmod
+#define hdfsUtime libhdfs_hdfsUtime
+#define hadoopRzOptionsAlloc libhdfs_hadoopRzOptionsAlloc
+#define hadoopRzOptionsSetSkipChecksum libhdfs_hadoopRzOptionsSetSkipChecksum
+#define hadoopRzOptionsSetByteBufferPool 
libhdfs_hadoopRzOptionsSetByteBufferPool
+#define hadoopRzOptionsFree libhdfs_hadoopRzOptionsFree
+#define hadoopReadZero libhdfs_hadoopReadZero
+#define hadoopRzBufferLength libhdfs_hadoopRzBufferLength
+#define hadoopRzBufferGet libhdfs_hadoopRzBufferGet
+#define hadoopRzBufferFree libhdfs_hadoopRzBufferFree
+#define hdfs_internal libhdfs_hdfs_internal
+#define hdfsFS libhdfs_hdfsFS
+#define hdfsFile_internal libhdfs_hdfsFile_internal
+#define hdfsFile libhdfs_hdfsFile
+#define tObjectKind libhdfs_tObjectKind
+#define kObjectKindFile libhdfs_kObjectKindFile
+#define kObjectKindDirectory libhdfs_kObjectKindDirectory
+#define hdfsReadStatistics libhdfs_hdfsReadStatistics
+#define hdfsFileInfo libhdfs_hdfsFileInfo
+#define hdfsHedgedReadMetrics libhdfs_hdfsHedgedReadMetrics
+#define hdfsGetHedgedReadMetrics libhdfs_hdfsGetHedgedReadMetrics
+#define hdfsFreeHedgedReadMetrics libhdfs_hdfsFreeHedgedReadMetrics
+#define hdfsStreamBuilderAlloc libhdfs_hdfsStreamBuilderAlloc
+#define hdfsStreamBuilderBuild libhdfs_hdfsStreamBuilderBuild

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
new file mode 100644
index 0000000..fce0e82
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef hdfsFileIsOpenForRead
+#undef hdfsFileIsOpenForWrite
+#undef hdfsFileGetReadStatistics
+#undef hdfsReadStatisticsGetRemoteBytesRead
+#undef hdfsFileClearReadStatistics
+#undef hdfsFileFreeReadStatistics
+#undef hdfsConnectAsUser
+#undef hdfsConnect
+#undef hdfsConnectAsUserNewInstance
+#undef hdfsConnectNewInstance
+#undef hdfsBuilderConnect
+#undef hdfsNewBuilder
+#undef hdfsBuilderSetForceNewInstance
+#undef hdfsBuilderSetNameNode
+#undef hdfsBuilderSetNameNodePort
+#undef hdfsBuilderSetUserName
+#undef hdfsBuilderSetKerbTicketCachePath
+#undef hdfsFreeBuilder
+#undef hdfsBuilderConfSetStr
+#undef hdfsConfGetStr
+#undef hdfsConfGetInt
+#undef hdfsConfStrFree
+#undef hdfsDisconnect
+#undef hdfsOpenFile
+#undef hdfsTruncateFile
+#undef hdfsUnbufferFile
+#undef hdfsCloseFile
+#undef hdfsExists
+#undef hdfsSeek
+#undef hdfsTell
+#undef hdfsRead
+#undef hdfsPread
+#undef hdfsWrite
+#undef hdfsFlush
+#undef hdfsHFlush
+#undef hdfsHSync
+#undef hdfsAvailable
+#undef hdfsCopy
+#undef hdfsMove
+#undef hdfsDelete
+#undef hdfsRename
+#undef hdfsGetWorkingDirectory
+#undef hdfsSetWorkingDirectory
+#undef hdfsCreateDirectory
+#undef hdfsSetReplication
+#undef hdfsListDirectory
+#undef hdfsGetPathInfo
+#undef hdfsFreeFileInfo
+#undef hdfsFileIsEncrypted
+#undef hdfsGetHosts
+#undef hdfsFreeHosts
+#undef hdfsGetDefaultBlockSize
+#undef hdfsGetDefaultBlockSizeAtPath
+#undef hdfsGetCapacity
+#undef hdfsGetUsed
+#undef hdfsChown
+#undef hdfsChmod
+#undef hdfsUtime
+#undef hadoopRzOptionsAlloc
+#undef hadoopRzOptionsSetSkipChecksum
+#undef hadoopRzOptionsSetByteBufferPool
+#undef hadoopRzOptionsFree
+#undef hadoopReadZero
+#undef hadoopRzBufferLength
+#undef hadoopRzBufferGet
+#undef hadoopRzBufferFree
+#undef hdfs_internal
+#undef hdfsFS
+#undef hdfsFile_internal
+#undef hdfsFile
+#undef tObjectKind
+#undef kObjectKindFile
+#undef kObjectKindDirectory
+#undef hdfsReadStatistics
+#undef hdfsFileInfo
+#undef hdfsGetLastError
+#undef hdfsCancel
+#undef hdfsGetBlockLocations
+#undef hdfsFreeBlockLocations
+#undef hdfsFind
+#undef hdfsCreateSnapshot
+#undef hdfsDeleteSnapshot
+#undef hdfsRenameSnapshot
+#undef hdfsAllowSnapshot
+#undef hdfsDisallowSnapshot
+#undef hdfsHedgedReadMetrics
+#undef hdfsGetHedgedReadMetrics
+#undef hdfsFreeHedgedReadMetrics
+#undef hdfsStreamBuilderAlloc
+#undef hdfsStreamBuilderBuild

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.cc
new file mode 100644
index 0000000..913bd6c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.cc
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Rename libhdfspp structs and functions */
+#include "libhdfspp_wrapper_defines.h"
+#include "bindings/c/hdfs.cc"
+#include "libhdfs_wrapper_undefs.h"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.h
new file mode 100644
index 0000000..8cd78d2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper.h
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* "Original" symbols can be included elsewhere. */
+#undef LIBHDFS_HDFS_H
+
+/* Rename libhdfspp structs and functions */
+#include "libhdfspp_wrapper_defines.h"
+#include "hdfs/hdfs.h"
+#include "hdfspp/hdfs_ext.h"
+#include "libhdfs_wrapper_undefs.h"
+
+/* "Original" symbols can be included elsewhere. */
+#undef LIBHDFS_HDFS_H

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
new file mode 100644
index 0000000..d0411c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define hdfsFileIsOpenForRead libhdfspp_hdfsFileIsOpenForRead
+#define hdfsFileIsOpenForWrite libhdfspp_hdfsFileIsOpenForWrite
+#define hdfsFileGetReadStatistics libhdfspp_hdfsFileGetReadStatistics
+#define hdfsReadStatisticsGetRemoteBytesRead 
libhdfspp_hdfsReadStatisticsGetRemoteBytesRead
+#define hdfsFileClearReadStatistics libhdfspp_hdfsFileClearReadStatistics
+#define hdfsFileFreeReadStatistics libhdfspp_hdfsFileFreeReadStatistics
+#define hdfsConnectAsUser libhdfspp_hdfsConnectAsUser
+#define hdfsConnect libhdfspp_hdfsConnect
+#define hdfsConnectAsUserNewInstance libhdfspp_hdfsConnectAsUserNewInstance
+#define hdfsConnectNewInstance libhdfspp_hdfsConnectNewInstance
+#define hdfsBuilderConnect libhdfspp_hdfsBuilderConnect
+#define hdfsNewBuilder libhdfspp_hdfsNewBuilder
+#define hdfsBuilderSetForceNewInstance libhdfspp_hdfsBuilderSetForceNewInstance
+#define hdfsBuilderSetNameNode libhdfspp_hdfsBuilderSetNameNode
+#define hdfsBuilderSetNameNodePort libhdfspp_hdfsBuilderSetNameNodePort
+#define hdfsBuilderSetUserName libhdfspp_hdfsBuilderSetUserName
+#define hdfsBuilderSetKerbTicketCachePath 
libhdfspp_hdfsBuilderSetKerbTicketCachePath
+#define hdfsFreeBuilder libhdfspp_hdfsFreeBuilder
+#define hdfsBuilderConfSetStr libhdfspp_hdfsBuilderConfSetStr
+#define hdfsConfGetStr libhdfspp_hdfsConfGetStr
+#define hdfsConfGetInt libhdfspp_hdfsConfGetInt
+#define hdfsConfStrFree libhdfspp_hdfsConfStrFree
+#define hdfsDisconnect libhdfspp_hdfsDisconnect
+#define hdfsOpenFile libhdfspp_hdfsOpenFile
+#define hdfsTruncateFile libhdfspp_hdfsTruncateFile
+#define hdfsUnbufferFile libhdfspp_hdfsUnbufferFile
+#define hdfsCloseFile libhdfspp_hdfsCloseFile
+#define hdfsExists libhdfspp_hdfsExists
+#define hdfsSeek libhdfspp_hdfsSeek
+#define hdfsTell libhdfspp_hdfsTell
+#define hdfsRead libhdfspp_hdfsRead
+#define hdfsPread libhdfspp_hdfsPread
+#define hdfsWrite libhdfspp_hdfsWrite
+#define hdfsFlush libhdfspp_hdfsFlush
+#define hdfsHFlush libhdfspp_hdfsHFlush
+#define hdfsHSync libhdfspp_hdfsHSync
+#define hdfsAvailable libhdfspp_hdfsAvailable
+#define hdfsCopy libhdfspp_hdfsCopy
+#define hdfsMove libhdfspp_hdfsMove
+#define hdfsDelete libhdfspp_hdfsDelete
+#define hdfsRename libhdfspp_hdfsRename
+#define hdfsGetWorkingDirectory libhdfspp_hdfsGetWorkingDirectory
+#define hdfsSetWorkingDirectory libhdfspp_hdfsSetWorkingDirectory
+#define hdfsCreateDirectory libhdfspp_hdfsCreateDirectory
+#define hdfsSetReplication libhdfspp_hdfsSetReplication
+#define hdfsListDirectory libhdfspp_hdfsListDirectory
+#define hdfsGetPathInfo libhdfspp_hdfsGetPathInfo
+#define hdfsFreeFileInfo libhdfspp_hdfsFreeFileInfo
+#define hdfsFileIsEncrypted libhdfspp_hdfsFileIsEncrypted
+#define hdfsGetHosts libhdfspp_hdfsGetHosts
+#define hdfsFreeHosts libhdfspp_hdfsFreeHosts
+#define hdfsGetDefaultBlockSize libhdfspp_hdfsGetDefaultBlockSize
+#define hdfsGetDefaultBlockSizeAtPath libhdfspp_hdfsGetDefaultBlockSizeAtPath
+#define hdfsGetCapacity libhdfspp_hdfsGetCapacity
+#define hdfsGetUsed libhdfspp_hdfsGetUsed
+#define hdfsChown libhdfspp_hdfsChown
+#define hdfsChmod libhdfspp_hdfsChmod
+#define hdfsUtime libhdfspp_hdfsUtime
+#define hadoopRzOptionsAlloc libhdfspp_hadoopRzOptionsAlloc
+#define hadoopRzOptionsSetSkipChecksum libhdfspp_hadoopRzOptionsSetSkipChecksum
+#define hadoopRzOptionsSetByteBufferPool 
libhdfspp_hadoopRzOptionsSetByteBufferPool
+#define hadoopRzOptionsFree libhdfspp_hadoopRzOptionsFree
+#define hadoopReadZero libhdfspp_hadoopReadZero
+#define hadoopRzBufferLength libhdfspp_hadoopRzBufferLength
+#define hadoopRzBufferGet libhdfspp_hadoopRzBufferGet
+#define hadoopRzBufferFree libhdfspp_hadoopRzBufferFree
+#define hdfs_internal libhdfspp_hdfs_internal
+#define hdfsFS libhdfspp_hdfsFS
+#define hdfsFile_internal libhdfspp_hdfsFile_internal
+#define hdfsFile libhdfspp_hdfsFile
+#define tObjectKind libhdfspp_tObjectKind
+#define kObjectKindFile libhdfspp_kObjectKindFile
+#define kObjectKindDirectory libhdfspp_kObjectKindDirectory
+#define hdfsReadStatistics libhdfspp_hdfsReadStatistics
+#define hdfsFileInfo libhdfspp_hdfsFileInfo
+#define hdfsGetLastError libhdfspp_hdfsGetLastError
+#define hdfsCancel libhdfspp_hdfsCancel
+#define hdfsGetBlockLocations libhdfspp_hdfsGetBlockLocations
+#define hdfsFreeBlockLocations libhdfspp_hdfsFreeBlockLocations
+#define hdfsFind libhdfspp_hdfsFind
+#define hdfsCreateSnapshot libhdfspp_hdfsCreateSnapshot
+#define hdfsDeleteSnapshot libhdfspp_hdfsDeleteSnapshot
+#define hdfsRenameSnapshot libhdfspp_hdfsRenameSnapshot
+#define hdfsAllowSnapshot libhdfspp_hdfsAllowSnapshot
+#define hdfsDisallowSnapshot libhdfspp_hdfsDisallowSnapshot
+#define hdfsHedgedReadMetrics libhdfspp_hdfsHedgedReadMetrics
+#define hdfsGetHedgedReadMetrics libhdfspp_hdfsGetHedgedReadMetrics
+#define hdfsFreeHedgedReadMetrics libhdfspp_hdfsFreeHedgedReadMetrics
+#define hdfsStreamBuilderAlloc libhdfspp_hdfsStreamBuilderAlloc
+#define hdfsStreamBuilderBuild libhdfspp_hdfsStreamBuilderBuild


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to