http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c
new file mode 100644
index 0000000..bee5382
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/cat/cat.c
@@ -0,0 +1,121 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/*
+  A a stripped down version of unix's "cat".
+  Doesn't deal with any flags for now, will just attempt to read the whole 
file.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "hdfspp/hdfs_ext.h"
+#include "uriparser2/uriparser2.h"
+#include "common/util_c.h"
+
+#define SCHEME "hdfs"
+#define BUF_SIZE 1048576 //1 MB
+static char input_buffer[BUF_SIZE];
+
+int main(int argc, char** argv) {
+
+  char error_text[1024];
+  if (argc != 2) {
+    fprintf(stderr, "usage: cat 
[hdfs://[<hostname>:<port>]]/<path-to-file>\n");
+    return 1;
+  }
+
+  URI * uri = NULL;
+  const char * uri_path = argv[1];
+
+  //Separate check for scheme is required, otherwise uriparser2.h library 
causes memory issues under valgrind
+  const char * scheme_end = strstr(uri_path, "://");
+  if (scheme_end) {
+    if (strncmp(uri_path, SCHEME, strlen(SCHEME)) != 0) {
+      fprintf(stderr, "Scheme %.*s:// is not supported.\n", (int) (scheme_end 
- uri_path), uri_path);
+      return 1;
+    } else {
+      uri = uri_parse(uri_path);
+    }
+  }
+  if (!uri) {
+    fprintf(stderr, "Malformed URI: %s\n", uri_path);
+    return 1;
+  }
+
+  struct hdfsBuilder* builder = hdfsNewBuilder();
+  if (uri->host)
+    hdfsBuilderSetNameNode(builder, uri->host);
+  if (uri->port != 0)
+    hdfsBuilderSetNameNodePort(builder, uri->port);
+
+  hdfsFS fs = hdfsBuilderConnect(builder);
+  if (fs == NULL) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    const char * host = uri->host ? uri->host : "<default>";
+    int port = uri->port;
+    if (port == 0)
+      port = 8020;
+    fprintf(stderr, "Unable to connect to %s:%d, hdfsConnect returned 
null.\n%s\n",
+            host, port, error_text);
+    return 1;
+  }
+
+  hdfsFile file = hdfsOpenFile(fs, uri->path, 0, 0, 0, 0);
+  if (NULL == file) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Unable to open file %s: %s\n", uri->path, error_text );
+    hdfsDisconnect(fs);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  ssize_t read_bytes_count = 0;
+  ssize_t last_read_bytes = 0;
+
+  while (0 < (last_read_bytes =
+                  hdfsPread(fs, file, read_bytes_count, input_buffer, 
sizeof(input_buffer)))) {
+    fwrite(input_buffer, last_read_bytes, 1, stdout);
+    read_bytes_count += last_read_bytes;
+  }
+
+  int res = 0;
+  res = hdfsCloseFile(fs, file);
+  if (0 != res) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Error closing file: %s\n", error_text);
+    hdfsDisconnect(fs);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  res = hdfsDisconnect(fs);
+  if (0 != res) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Error disconnecting filesystem: %s", error_text);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  hdfsFreeBuilder(builder);
+  free(uri);
+  // Clean up static data and prevent valgrind memory leaks
+  ShutdownProtobufLibrary_C();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt
new file mode 100644
index 0000000..6276467
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(connect_cancel_c connect_cancel.c)
+target_link_libraries(connect_cancel_c hdfspp_static uriparser2)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c
new file mode 100644
index 0000000..f6af6d1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c
@@ -0,0 +1,107 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/*
+  Attempt to connect to a cluster and use Control-C to bail out if it takes a 
while.
+  Valid config must be in environment variable $HADOOP_CONF_DIR
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include "hdfspp/hdfs_ext.h"
+#include "common/util_c.h"
+
+#define ERROR_BUFFER_SIZE 1024
+
+// Global so signal handler can get at it
+hdfsFS fs = NULL;
+
+const char *catch_enter  = "In signal handler, going to try and cancel.\n";
+const char *catch_cancel = "hdfsCancelPendingConnect has been canceled in the 
signal handler.\n";
+const char *catch_exit   = "Exiting the signal handler.\n";
+
+// Print to stdout without calling malloc or otherwise indirectly modify 
userspace state.
+// Write calls to stdout may still interleave with stuff coming from elsewhere.
+static void sighandler_direct_stdout(const char *msg) {
+  if(!msg)
+    return;
+  ssize_t res = write(1 /*posix stdout fd*/, msg, strlen(msg));
+  (void)res;
+}
+
+static void sig_catch(int val) {
+  // Beware of calling things that aren't reentrant e.g. malloc while in a 
signal handler.
+  sighandler_direct_stdout(catch_enter);
+
+  if(fs) {
+    hdfsCancelPendingConnection(fs);
+    sighandler_direct_stdout(catch_cancel);
+  }
+  sighandler_direct_stdout(catch_exit);
+}
+
+
+int main(int argc, char** argv) {
+  hdfsSetLoggingLevel(HDFSPP_LOG_LEVEL_INFO);
+  signal(SIGINT, sig_catch);
+
+  char error_text[ERROR_BUFFER_SIZE];
+  if (argc != 1) {
+    fprintf(stderr, "usage: ./connect_cancel_c\n");
+    ShutdownProtobufLibrary_C();
+    exit(EXIT_FAILURE);
+  }
+
+  const char *hdfsconfdir = getenv("HADOOP_CONF_DIR");
+  if(!hdfsconfdir) {
+    fprintf(stderr, "$HADOOP_CONF_DIR must be set\n");
+    ShutdownProtobufLibrary_C();
+    exit(EXIT_FAILURE);
+  }
+
+  struct hdfsBuilder* builder = hdfsNewBuilderFromDirectory(hdfsconfdir);
+
+  fs = hdfsAllocateFileSystem(builder);
+  if (fs == NULL) {
+    hdfsGetLastError(error_text, ERROR_BUFFER_SIZE);
+    fprintf(stderr, "hdfsAllocateFileSystem returned null.\n%s\n", error_text);
+    hdfsFreeBuilder(builder);
+    ShutdownProtobufLibrary_C();
+    exit(EXIT_FAILURE);
+  }
+
+  int connected = hdfsConnectAllocated(fs, builder);
+  if (connected != 0) {
+    hdfsGetLastError(error_text, ERROR_BUFFER_SIZE);
+    fprintf(stderr, "hdfsConnectAllocated errored.\n%s\n", error_text);
+    hdfsFreeBuilder(builder);
+    ShutdownProtobufLibrary_C();
+    exit(EXIT_FAILURE);
+  }
+
+  hdfsDisconnect(fs);
+  hdfsFreeBuilder(builder);
+  // Clean up static data and prevent valgrind memory leaks
+  ShutdownProtobufLibrary_C();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt
new file mode 100644
index 0000000..1849779
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/CMakeLists.txt
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include_directories( ../../tools )
+
+add_subdirectory(cat)
+add_subdirectory(gendirs)
+add_subdirectory(find)
+add_subdirectory(connect_cancel)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt
new file mode 100644
index 0000000..a5bcf76
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(cat cat.cc)
+target_link_libraries(cat tools_common hdfspp_static)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc
new file mode 100644
index 0000000..9d400e7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/cat/cat.cc
@@ -0,0 +1,89 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/**
+   * Unix-like cat tool example.
+   *
+   * Reads the specified file from HDFS and outputs to stdout.
+   *
+   * Usage: cat /<path-to-file>
+   *
+   * Example: cat /dir/file
+   *
+   * @param path-to-file    Absolute path to the file to read.
+   *
+   **/
+
+#include "hdfspp/hdfspp.h"
+#include <google/protobuf/stubs/common.h>
+#include "tools_common.h"
+
+const std::size_t BUF_SIZE = 1048576; //1 MB
+static char input_buffer[BUF_SIZE];
+
+int main(int argc, char *argv[]) {
+  if (argc != 2) {
+    std::cerr << "usage: cat /<path-to-file>" << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::string path = argv[1];
+
+  //Building a URI object from the given uri path
+  hdfs::URI uri = hdfs::parse_path_or_exit(path);
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::FileHandle *file_raw = nullptr;
+  hdfs::Status status = fs->Open(path, &file_raw);
+  if (!status.ok()) {
+    std::cerr << "Could not open file " << path << ". " << status.ToString() 
<< std::endl;
+    exit(EXIT_FAILURE);
+  }
+  //wrapping file_raw into a unique pointer to guarantee deletion
+  std::unique_ptr<hdfs::FileHandle> file(file_raw);
+
+  ssize_t total_bytes_read = 0;
+  size_t last_bytes_read = 0;
+
+  do{
+    //Reading file chunks
+    status = file->Read(input_buffer, sizeof(input_buffer), &last_bytes_read);
+    if(status.ok()) {
+      //Writing file chunks to stdout
+      fwrite(input_buffer, last_bytes_read, 1, stdout);
+      total_bytes_read += last_bytes_read;
+    } else {
+      if(status.is_invalid_offset()){
+        //Reached the end of the file
+        break;
+      } else {
+        std::cerr << "Error reading the file: " << status.ToString() << 
std::endl;
+        exit(EXIT_FAILURE);
+      }
+    }
+  } while (last_bytes_read > 0);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt
new file mode 100644
index 0000000..e3cc0b5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(connect_cancel connect_cancel.cc)
+target_link_libraries(connect_cancel hdfspp_static)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc
new file mode 100644
index 0000000..46eef9d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc
@@ -0,0 +1,154 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+
+#include "hdfspp/hdfspp.h"
+#include "common/hdfs_configuration.h"
+#include "common/configuration_loader.h"
+
+#include <google/protobuf/stubs/common.h>
+
+#include <signal.h>
+#include <unistd.h>
+
+#include <thread>
+#include <iostream>
+
+// Simple example of how to cancel an async connect call.
+// Here Control-C (SIGINT) is caught in order to invoke the FS level cancel and
+// properly tear down the process.  Valgrind should show no leaked memory on 
exit
+// when cancel has been called.  URI parsing code is omitted and defaultFs from
+// /etc/hadoop/conf or $HADOOP_CONF_DIR is always used.
+
+// Scoped globally to make it simple to reference from the signal handler.
+std::shared_ptr<hdfs::FileSystem> fs;
+
+const std::string catch_enter("In signal handler, going to try and cancel 
FileSystem::Connect.\n");
+const std::string catch_cancel("FileSystem::Cancel has been canceled in the 
signal handler.\n");
+const std::string catch_exit("Exiting the signal handler.\n");
+
+// Avoid IO reentrancy issues, see comments in signal handler below.
+// It's possible that the write interleaves with another write call,
+// but it won't corrupt the stack or heap.
+static void sighandler_direct_stdout(const std::string &msg) {
+  ssize_t res = ::write(1 /*posix stdout FD*/, msg.data(), msg.size());
+  // In production you'd want to check res, but error handling code will
+  // need to be fairly application specific if it's going to properly
+  // avoid reentrant calls to malloc.
+  (void)res;
+}
+
+// Signal handler to make a SIGINT call cancel rather than exit().
+static void sig_catch(int val) {
+  (void)val;
+  // This is avoiding the tricky bits of signal handling, notably that the
+  // underlying string manipulation and IO functions used by the the logger
+  // are unlikely to be reentrant.
+  //
+  // Production code could mask out all logging on handler entry and enable
+  // it again on exit; here we just assume it's "good enough" and some
+  // (possibly broken) log messages are better than none.
+
+  sighandler_direct_stdout(catch_enter);
+  if(fs) {
+    // This will invoke the callback immediately with an OperationCanceled 
status
+    fs->CancelPendingConnect();
+    sighandler_direct_stdout(catch_cancel);
+  }
+  sighandler_direct_stdout(catch_exit);
+}
+
+
+int main(int arg_token_count, const char **args) {
+  (void)args;
+  if(arg_token_count != 1) {
+    std::cerr << "usage: ./connect_cancel";
+    google::protobuf::ShutdownProtobufLibrary();
+    exit(EXIT_FAILURE);
+  }
+
+  // Register signal handle to asynchronously invoke cancel from outside the 
main thread.
+  signal(SIGINT, sig_catch);
+
+  // Generic setup/config code much like the other examples.
+  hdfs::Options options;
+  //Setting the config path to the default: "$HADOOP_CONF_DIR" or 
"/etc/hadoop/conf"
+  hdfs::ConfigurationLoader loader;
+  //Loading default config files core-site.xml and hdfs-site.xml from the 
config path
+  hdfs::optional<hdfs::HdfsConfiguration> config = 
loader.LoadDefaultResources<hdfs::HdfsConfiguration>();
+  //TODO: HDFS-9539 - after this is resolved, valid config will always be 
returned.
+  if(config){
+    //Loading options from the config
+    options = config->GetOptions();
+  }
+
+
+  // Start an IoService and some worker threads
+  std::shared_ptr<hdfs::IoService> service = hdfs::IoService::MakeShared();
+  if(nullptr == service) {
+    std::cerr << "Unable to create IoService" << std::endl;
+    fs.reset();
+    // Nasty hack to clean up for valgrind since we don't have the C++17 
optional<T>::reset method
+    config = decltype(config)();
+    google::protobuf::ShutdownProtobufLibrary();
+    exit(EXIT_FAILURE);
+  }
+
+  unsigned int worker_count = service->InitDefaultWorkers();
+  if(worker_count < 1) {
+    std::cerr << "Unable to create IoService worker threads";
+    fs.reset();
+    service->Stop();
+    config = decltype(config)();
+    google::protobuf::ShutdownProtobufLibrary();
+    exit(EXIT_FAILURE);
+  }
+
+  // Set up and connect to the FileSystem
+  fs.reset(hdfs::FileSystem::New(service, "", options));
+  if(nullptr == fs) {
+    std::cerr << "Unable to create FileSystem" << std::endl;
+    fs.reset();
+    service->Stop();
+    config = decltype(config)();
+    google::protobuf::ShutdownProtobufLibrary();
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->ConnectToDefaultFs();
+  if (!status.ok()) {
+    if(!options.defaultFS.get_host().empty()){
+      std::cerr << "Error connecting to " << options.defaultFS << ". " << 
status.ToString() << std::endl;
+    } else {
+      std::cerr << "Error connecting to the cluster: defaultFS is empty. " << 
status.ToString() << std::endl;
+    }
+    fs.reset();
+    service->Stop();
+    config = decltype(config)();
+    google::protobuf::ShutdownProtobufLibrary();
+    exit(EXIT_FAILURE);
+  }
+
+  fs.reset();
+  service->Stop();
+  config = decltype(config)();
+  google::protobuf::ShutdownProtobufLibrary();
+
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt
new file mode 100644
index 0000000..c833676
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(find find.cc)
+target_link_libraries(find tools_common hdfspp_static)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc
new file mode 100644
index 0000000..5373890
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/find/find.cc
@@ -0,0 +1,140 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/**
+   * A parallel find tool example.
+   *
+   * Finds all files matching the specified name recursively starting from the
+   * specified directory and prints their filepaths. Works either synchronously
+   * or asynchronously.
+   *
+   * Usage: find /<path-to-file> <file-name> <use_async>
+   *
+   * Example: find /dir?/tree* some?file*name 1
+   *
+   * @param path-to-file    Absolute path at which to begin search, can have 
wild
+   *                        cards and must be non-blank
+   * @param file-name       Name to find, can have wild cards and must be 
non-blank
+   * @param use_async       If set to 1 it prints out results asynchronously as
+   *                        they arrive. If set to 0 results are printed in one
+   *                        big chunk when it becomes available.
+   *
+   **/
+
+#include "hdfspp/hdfspp.h"
+#include <google/protobuf/stubs/common.h>
+#include <future>
+#include "tools_common.h"
+
+void SyncFind(std::shared_ptr<hdfs::FileSystem> fs, const std::string &path, 
const std::string &name){
+  std::vector<hdfs::StatInfo> results;
+  //Synchronous call to Find
+  hdfs::Status stat = fs->Find(path, name, 
hdfs::FileSystem::GetDefaultFindMaxDepth(), &results);
+
+  if (!stat.ok()) {
+    std::cerr << "Error: " << stat.ToString() << std::endl;
+  }
+
+  if(results.empty()){
+    std::cout << "Nothing Found" << std::endl;
+  } else {
+    //Printing out the results
+    for (hdfs::StatInfo const& si : results) {
+      std::cout << si.full_path << std::endl;
+    }
+  }
+}
+
+void AsyncFind(std::shared_ptr<hdfs::FileSystem> fs, const std::string &path, 
const std::string &name){
+  std::promise<void> promise;
+  std::future<void> future(promise.get_future());
+  bool something_found = false;
+  hdfs::Status status = hdfs::Status::OK();
+
+  /**
+    * Keep requesting more until we get the entire listing. Set the promise
+    * when we have the entire listing to stop.
+    *
+    * Find guarantees that the handler will only be called once at a time,
+    * so we do not need any locking here
+    */
+  auto handler = [&promise, &status, &something_found]
+                  (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & 
si, bool has_more_results) -> bool {
+    //Print result chunks as they arrive
+    if(!si.empty()) {
+      something_found = true;
+      for (hdfs::StatInfo const& s : si) {
+        std::cout << s.full_path << std::endl;
+      }
+    }
+    if(!s.ok() && status.ok()){
+      //We make sure we set 'status' only on the first error.
+      status = s;
+    }
+    if (!has_more_results) {
+      promise.set_value();  //set promise
+      return false;         //request stop sending results
+    }
+    return true;  //request more results
+  };
+
+  //Asynchronous call to Find
+  fs->Find(path, name, hdfs::FileSystem::GetDefaultFindMaxDepth(), handler);
+
+  //block until promise is set
+  future.get();
+  if(!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+  }
+  if(!something_found){
+    std::cout << "Nothing Found" << std::endl;
+  }
+}
+
+int main(int argc, char *argv[]) {
+  if (argc != 4) {
+    std::cerr << "usage: find /<path-to-file> <file-name> <use_async>" << 
std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::string path = argv[1];
+  std::string name = argv[2];
+  bool use_async = (std::stoi(argv[3]) != 0);
+
+  //Building a URI object from the given uri path
+  hdfs::URI uri = hdfs::parse_path_or_exit(path);
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  if (use_async){
+    //Example of Async find
+    AsyncFind(fs, path, name);
+  } else {
+    //Example of Sync find
+    SyncFind(fs, path, name);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt
new file mode 100644
index 0000000..9bd8631
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(gendirs gendirs.cc)
+target_link_libraries(gendirs tools_common hdfspp_static)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc
new file mode 100644
index 0000000..35f7be9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/gendirs/gendirs.cc
@@ -0,0 +1,122 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/**
+   * A recursive directory generator tool.
+   *
+   * Generates a directory tree with specified depth and fanout starting from
+   * a given path. Generation is asynchronous.
+   *
+   * Usage:   gendirs /<path-to-dir> <depth> <fanout>
+   *
+   * Example: gendirs /dir0 3 10
+   *
+   * @param path-to-dir   Absolute path to the directory tree root where the
+   *                      directory tree will be generated
+   * @param depth         Depth of the directory tree (number of levels from
+   *                      root to leaves)
+   * @param fanout        Fanout of each directory (number of sub-directories 
to
+   *                      be created inside each directory except leaf 
directories)
+   *
+   **/
+
+#include "hdfspp/hdfspp.h"
+#include <google/protobuf/stubs/common.h>
+#include <future>
+#include "tools_common.h"
+
+#define DEFAULT_PERMISSIONS 0755
+
+void GenerateDirectories (std::shared_ptr<hdfs::FileSystem> fs, int depth, int 
level, int fanout, std::string path, std::vector<std::future<hdfs::Status>> & 
futures) {
+  //Level contains our current depth in the directory tree
+  if(level < depth) {
+    for(int i = 0; i < fanout; i++){
+      //Recursive calls to cover all possible paths from the root to the leave 
nodes
+      GenerateDirectories(fs, depth, level+1, fanout, path + "dir" + 
std::to_string(i) + "/", futures);
+    }
+  } else {
+    //We have reached the leaf nodes and now start making calls to create 
directories
+    //We make a promise which will be set when the call finishes and executes 
our handler
+    auto callstate = std::make_shared<std::promise<hdfs::Status>>();
+    //Extract a future from this promise
+    std::future<hdfs::Status> future(callstate->get_future());
+    //Save this future to the vector of futures which will be used to wait on 
all promises
+    //after the whole recursion is done
+    futures.push_back(std::move(future));
+    //Create a handler that will be executed when Mkdirs is done
+    auto handler = [callstate](const hdfs::Status &s) {
+      callstate->set_value(s);
+    };
+    //Asynchronous call to create this directory along with all missing parent 
directories
+    fs->Mkdirs(path, DEFAULT_PERMISSIONS, true, handler);
+  }
+}
+
+int main(int argc, char *argv[]) {
+  if (argc != 4) {
+    std::cerr << "usage: gendirs /<path-to-dir> <depth> <fanout>" << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::string path = argv[1];
+  int depth = std::stoi(argv[2]);
+  int fanout = std::stoi(argv[3]);
+
+  //Building a URI object from the given uri path
+  hdfs::URI uri = hdfs::parse_path_or_exit(path);
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /**
+   * We do not want the recursion to block on anything, therefore we will be
+   * making asynchronous calls recursively, and then just waiting for all
+   * the calls to finish.
+   *
+   * This array of futures will be populated by the recursive function below.
+   * Each new asynchronous Mkdirs call will add a future to this vector, and 
will
+   * create a promise, which will only be set when the call was completed and
+   * processed. After the whole recursion is complete we will need to wait 
until
+   * all promises are set before we can exit.
+   **/
+  std::vector<std::future<hdfs::Status>> futures;
+
+  GenerateDirectories(fs, depth, 0, fanout, path + "/", futures);
+
+  /**
+   * We are waiting here until all promises are set, and checking whether
+   * the returned statuses contained any errors.
+   **/
+  for(std::future<hdfs::Status> &fs : futures){
+    hdfs::Status status = fs.get();
+    if (!status.ok()) {
+      std::cerr << "Error: " << status.ToString() << std::endl;
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::cout << "All done!" << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
new file mode 100644
index 0000000..4d824d6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HDFSPP_BLOCK_LOCATION_H
+#define HDFSPP_BLOCK_LOCATION_H
+
+namespace hdfs {
+
+class DNInfo {
+public:
+  DNInfo() : xfer_port_(-1), info_port_(-1), IPC_port_(-1), 
info_secure_port_(-1) {}
+
+  std::string getHostname() const {
+    return hostname_;
+  }
+
+  void setHostname(const std::string & hostname) {
+    this->hostname_ = hostname;
+  }
+
+  std::string getIPAddr() const {
+    return ip_addr_;
+  }
+
+  void setIPAddr(const std::string & ip_addr) {
+    this->ip_addr_ = ip_addr;
+  }
+
+  std::string getNetworkLocation() const {
+    return network_location_;
+  }
+
+  void setNetworkLocation(const std::string & location) {
+    this->network_location_ = location;
+  }
+
+  int getXferPort() const {
+    return xfer_port_;
+  }
+
+  void setXferPort(int xfer_port) {
+    this->xfer_port_ = xfer_port;
+  }
+
+  int getInfoPort() const {
+    return info_port_;
+  }
+
+  void setInfoPort(int info_port) {
+    this->info_port_ = info_port;
+  }
+
+  int getIPCPort() const {
+    return IPC_port_;
+  }
+
+  void setIPCPort(int IPC_port) {
+    this->IPC_port_ = IPC_port;
+  }
+
+  int getInfoSecurePort() const {
+    return info_secure_port_;
+  }
+
+  void setInfoSecurePort(int info_secure_port) {
+    this->info_secure_port_ = info_secure_port;
+  }
+private:
+  std::string hostname_;
+  std::string ip_addr_;
+  std::string network_location_;
+  int         xfer_port_;
+  int         info_port_;
+  int         IPC_port_;
+  int         info_secure_port_;
+};
+
+class BlockLocation {
+public:
+    bool isCorrupt() const {
+        return corrupt_;
+    }
+
+    void setCorrupt(bool corrupt) {
+        this->corrupt_ = corrupt;
+    }
+
+    int64_t getLength() const {
+        return length_;
+    }
+
+    void setLength(int64_t length) {
+        this->length_ = length;
+    }
+
+    int64_t getOffset() const {
+        return offset_;
+    }
+
+    void setOffset(int64_t offset) {
+        this->offset_ = offset;
+    }
+
+    const std::vector<DNInfo> & getDataNodes() const {
+        return dn_info_;
+    }
+
+    void setDataNodes(const std::vector<DNInfo> & dn_info) {
+        this->dn_info_ = dn_info;
+    }
+
+private:
+    bool corrupt_;
+    int64_t length_;
+    int64_t offset_;  // Offset of the block in the file
+    std::vector<DNInfo> dn_info_; // Info about who stores each block
+};
+
+class FileBlockLocation {
+public:
+  uint64_t getFileLength() {
+    return fileLength_;
+  }
+
+  void setFileLength(uint64_t fileLength) {
+    this->fileLength_ = fileLength;
+  }
+
+  bool isLastBlockComplete() const {
+    return this->lastBlockComplete_;
+  }
+
+  void setLastBlockComplete(bool lastBlockComplete) {
+    this->lastBlockComplete_ = lastBlockComplete;
+  }
+
+  bool isUnderConstruction() const {
+    return underConstruction_;
+  }
+
+  void setUnderConstruction(bool underConstruction) {
+    this->underConstruction_ = underConstruction;
+  }
+
+  const std::vector<BlockLocation> & getBlockLocations() const {
+    return blockLocations_;
+  }
+
+  void setBlockLocations(const std::vector<BlockLocation> & blockLocations) {
+    this->blockLocations_ = blockLocations;
+  }
+private:
+  uint64_t fileLength_;
+  bool     lastBlockComplete_;
+  bool     underConstruction_;
+  std::vector<BlockLocation> blockLocations_;
+};
+
+} // namespace hdfs
+
+
+#endif /* HDFSPP_BLOCK_LOCATION_H */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/config_parser.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/config_parser.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/config_parser.h
new file mode 100644
index 0000000..e30e2cf
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/config_parser.h
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef LIBHDFSPP_CONFIGPARSER_H_
+#define LIBHDFSPP_CONFIGPARSER_H_
+
+#include "hdfspp/options.h"
+#include "hdfspp/uri.h"
+#include "hdfspp/status.h"
+
+#include <string>
+#include <memory>
+#include <vector>
+
+namespace hdfs {
+
+class ConfigParser {
+ public:
+  ConfigParser();
+  ConfigParser(const std::string& path);
+  ConfigParser(const std::vector<std::string>& configDirectories);
+  ~ConfigParser();
+  ConfigParser(ConfigParser&&);
+  ConfigParser& operator=(ConfigParser&&);
+
+  bool LoadDefaultResources();
+  std::vector<std::pair<std::string, Status> > ValidateResources() const;
+
+  // Return false if value couldn't be found or cast to desired type
+  bool get_int(const std::string& key, int& outval) const;
+  int get_int_or(const std::string& key, const int defaultval) const;
+
+  bool get_string(const std::string& key, std::string& outval) const;
+  std::string get_string_or(const std::string& key, const std::string& 
defaultval) const;
+
+  bool get_bool(const std::string& key, bool& outval) const;
+  bool get_bool_or(const std::string& key, const bool defaultval) const;
+
+  bool get_double(const std::string& key, double& outval) const;
+  double get_double_or(const std::string& key, const double defaultval) const;
+
+  bool get_uri(const std::string& key, URI& outval) const;
+  URI get_uri_or(const std::string& key, const URI& defaultval) const;
+
+  bool get_options(Options& outval) const;
+  Options get_options_or(const Options& defaultval) const;
+
+ private:
+  class impl;
+  std::unique_ptr<impl> pImpl;
+};
+
+}
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h
new file mode 100644
index 0000000..6762a65
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/content_summary.h
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HDFSPP_CONTENT_SUMMARY_H_
+#define HDFSPP_CONTENT_SUMMARY_H_
+
+#include <string>
+
+namespace hdfs {
+
+/**
+ * Content summary is assumed to be unchanging for the duration of the 
operation
+ */
+struct ContentSummary {
+  uint64_t length;
+  uint64_t filecount;
+  uint64_t directorycount;
+  uint64_t quota;
+  uint64_t spaceconsumed;
+  uint64_t spacequota;
+  std::string path;
+
+  ContentSummary();
+
+  //Converts ContentSummary object to std::string (hdfs_count format)
+  std::string str(bool include_quota) const;
+
+  //Converts ContentSummary object to std::string (hdfs_du format)
+  std::string str_du() const;
+};
+
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/events.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/events.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/events.h
new file mode 100644
index 0000000..83c0deb
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/events.h
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HDFSPP_EVENTS
+#define HDFSPP_EVENTS
+
+#include "hdfspp/status.h"
+
+#include <functional>
+
+namespace hdfs {
+
+/*
+ * Supported event names.  These names will stay consistent in libhdfs 
callbacks.
+ *
+ * Other events not listed here may be seen, but they are not stable and
+ * should not be counted on.  May need to be broken up into more components
+ * as more events are added.
+ */
+
+static constexpr const char * FS_NN_CONNECT_EVENT = "NN::connect";
+static constexpr const char * FS_NN_READ_EVENT = "NN::read";
+static constexpr const char * FS_NN_WRITE_EVENT = "NN::write";
+
+static constexpr const char * FILE_DN_CONNECT_EVENT = "DN::connect";
+static constexpr const char * FILE_DN_READ_EVENT = "DN::read";
+static constexpr const char * FILE_DN_WRITE_EVENT = "DN::write";
+
+
+// NN failover event due to issues with the current NN; might be standby, 
might be dead.
+// Invokes the fs_event_callback using the nameservice name in the cluster 
string.
+// The uint64_t value argument holds an address that can be reinterpreted as a 
const char *
+// and provides the full URI of the node the failover will attempt to connect 
to next.
+static constexpr const char * FS_NN_FAILOVER_EVENT = "NN::failover";
+
+// Invoked when RpcConnection tries to use an empty set of endpoints to figure 
out
+// which NN in a HA cluster to connect to.
+static constexpr const char * FS_NN_EMPTY_ENDPOINTS_EVENT = 
"NN::bad_failover::no_endpoints";
+
+// Invoked prior to determining if failed NN rpc calls should be retried or 
discarded.
+static constexpr const char * FS_NN_PRE_RPC_RETRY_EVENT = 
"NN::rpc::get_retry_action";
+
+class event_response {
+public:
+  // Helper factories
+  // The default ok response; libhdfspp should continue normally
+  static event_response make_ok() {
+    return event_response(kOk);
+  }
+  static event_response make_caught_std_exception(const char *what) {
+    return event_response(kCaughtStdException, what);
+  }
+  static event_response make_caught_unknown_exception() {
+    return event_response(kCaughtUnknownException);
+  }
+
+  // High level classification of responses
+  enum event_response_type {
+    kOk = 0,
+    // User supplied callback threw.
+    // Std exceptions will copy the what() string
+    kCaughtStdException = 1,
+    kCaughtUnknownException = 2,
+
+    // Responses to be used in testing only
+    kTest_Error = 100
+  };
+
+  event_response_type response_type() { return response_type_; }
+
+private:
+  // Use factories to construct for now
+  event_response();
+  event_response(event_response_type type)
+            : response_type_(type)
+  {
+    if(type == kCaughtUnknownException) {
+      status_ = Status::Exception("c++ unknown exception", "");
+    }
+  }
+  event_response(event_response_type type, const char *what)
+            : response_type_(type),
+              exception_msg_(what==nullptr ? "" : what)
+  {
+    status_ = Status::Exception("c++ std::exception", exception_msg_.c_str());
+  }
+
+
+  event_response_type response_type_;
+
+  // use to hold what str if event handler threw
+  std::string exception_msg_;
+
+
+///////////////////////////////////////////////
+//
+//   Testing support
+//
+// The consumer can stimulate errors
+// within libhdfdspp by returning a Status from the callback.
+///////////////////////////////////////////////
+public:
+  static event_response test_err(const Status &status) {
+    return event_response(status);
+  }
+
+  Status status() { return status_; }
+
+private:
+  event_response(const Status & status) :
+    response_type_(event_response_type::kTest_Error), status_(status) {}
+
+  Status status_; // To be used with kTest_Error
+};
+
+/* callback signature */
+typedef std::function<event_response (const char * event,
+                                      const char * cluster,
+                                      int64_t value)> fs_event_callback;
+
+typedef std::function<event_response (const char * event,
+                                      const char * cluster,
+                                      const char * file,
+                                      int64_t value)>file_event_callback;
+}
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h
new file mode 100644
index 0000000..103cf13
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/fsinfo.h
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HDFSPP_FSINFO_H_
+#define HDFSPP_FSINFO_H_
+
+#include <string>
+
+namespace hdfs {
+
+/**
+ * Information that is assumed to be unchanging about a file system for the 
duration of
+ * the operations.
+ */
+struct FsInfo {
+
+  uint64_t capacity;
+  uint64_t used;
+  uint64_t remaining;
+  uint64_t under_replicated;
+  uint64_t corrupt_blocks;
+  uint64_t missing_blocks;
+  uint64_t missing_repl_one_blocks;
+  uint64_t blocks_in_future;
+
+  FsInfo();
+
+  //Converts FsInfo object to std::string (hdfs_df format)
+  std::string str(const std::string fs_name) const;
+};
+
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b78c94f4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
new file mode 100644
index 0000000..cdd3d4d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef LIBHDFSPP_HDFS_HDFSEXT
+#define LIBHDFSPP_HDFS_HDFSEXT
+
+#include <hdfspp/log.h>
+
+/* get typdefs and #defines from libhdfs' hdfs.h to stay consistent */
+#include <hdfs/hdfs.h>
+
+/**
+ *  Note: The #defines below are copied directly from libhdfs'
+ *  hdfs.h.  LIBHDFS_EXTERNAL gets explicitly #undefed at the
+ *  end of the file so it must be redefined here.
+ **/
+
+#ifdef WIN32
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllexport)
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllimport)
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#else
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#endif
+
+
+/**
+ * Keep C bindings that are libhdfs++ specific in here.
+ **/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *  Reads the last error, if any, that happened in this thread
+ *  into the user supplied buffer.
+ *  @param buf  A chunk of memory with room for the error string.
+ *  @param len  Size of the buffer, if the message is longer than
+ *              len len-1 bytes of the message will be copied.
+ *  @return     0 on successful read of the last error, -1 otherwise.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsGetLastError(char *buf, int len);
+
+
+/**
+ *  Cancels operations being made by the FileHandle.
+ *  Note: Cancel cannot be reversed.  This is intended
+ *  to be used before hdfsClose to avoid waiting for
+ *  operations to complete.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsCancel(hdfsFS fs, hdfsFile file);
+
+/**
+ * Create an HDFS builder, using the configuration XML files from the indicated
+ * directory.  If the directory does not exist, or contains no configuration
+ * XML files, a Builder using all default values will be returned.
+ *
+ * @return The HDFS builder, or NULL on error.
+ */
+struct hdfsBuilder *hdfsNewBuilderFromDirectory(const char * configDirectory);
+
+
+/**
+ * Get a configuration string from the settings currently read into the 
builder.
+ *
+ * @param key      The key to find
+ * @param val      (out param) The value.  This will be set to NULL if the
+ *                 key isn't found.  You must free this string with
+ *                 hdfsConfStrFree.
+ *
+ * @return         0 on success; -1 otherwise.
+ *                 Failure to find the key is not an error.
+ */
+LIBHDFS_EXTERNAL
+int hdfsBuilderConfGetStr(struct hdfsBuilder *bld, const char *key,
+                          char **val);
+
+/**
+ * Get a configuration integer from the settings currently read into the 
builder.
+ *
+ * @param key      The key to find
+ * @param val      (out param) The value.  This will NOT be changed if the
+ *                 key isn't found.
+ *
+ * @return         0 on success; -1 otherwise.
+ *                 Failure to find the key is not an error.
+ */
+LIBHDFS_EXTERNAL
+int hdfsBuilderConfGetInt(struct hdfsBuilder *bld, const char *key, int32_t 
*val);
+
+
+/**
+ * Get a configuration long from the settings currently read into the builder.
+ *
+ * @param key      The key to find
+ * @param val      (out param) The value.  This will NOT be changed if the
+ *                 key isn't found.
+ *
+ * @return         0 on success; -1 otherwise.
+ *                 Failure to find the key is not an error.
+ */
+LIBHDFS_EXTERNAL
+int hdfsBuilderConfGetLong(struct hdfsBuilder *bld, const char *key, int64_t 
*val);
+
+struct hdfsDNInfo {
+  const char *    ip_address;
+  const char *    hostname;
+  const char *    network_location;
+  int             xfer_port;
+  int             info_port;
+  int             IPC_port;
+  int             info_secure_port;
+};
+
+struct hdfsBlockInfo {
+    uint64_t            start_offset;
+    uint64_t            num_bytes;
+
+    size_t              num_locations;
+    struct hdfsDNInfo * locations;
+};
+
+struct hdfsBlockLocations
+{
+    uint64_t               fileLength;
+    int                    isLastBlockComplete;
+    int                    isUnderConstruction;
+
+    size_t                 num_blocks;
+    struct hdfsBlockInfo * blocks;
+};
+
+/**
+ * Returns the block information and data nodes associated with a particular 
file.
+ *
+ * The hdfsBlockLocations structure will have zero or more hdfsBlockInfo 
elements,
+ * which will have zero or more ip_addr elements indicating which datanodes 
have
+ * each block.
+ *
+ * @param fs         A connected hdfs instance
+ * @param path       Path of the file to query
+ * @param locations  The address of an output pointer to contain the block 
information.
+ *                   On success, this pointer must be later freed with 
hdfsFreeBlockLocations.
+ *
+ * @return         0 on success; -1 otherwise.
+ *                 If the file does not exist, -1 will be returned and errno 
will be set.
+ */
+LIBHDFS_EXTERNAL
+int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct 
hdfsBlockLocations ** locations);
+
+/**
+ * Frees up an hdfsBlockLocations pointer allocated by hdfsGetBlockLocations.
+ *
+ * @param locations    The previously-populated pointer allocated by 
hdfsGetBlockLocations
+ * @return             0 on success, -1 on error
+ */
+LIBHDFS_EXTERNAL
+int hdfsFreeBlockLocations(struct hdfsBlockLocations * locations);
+
+
+
+
+/**
+ *  Client can supply a C style function pointer to be invoked any time 
something
+ *  is logged.  Unlike the C++ logger this will not filter by level or 
component,
+ *  it is up to the consumer to throw away messages they don't want.
+ *
+ *  Note: The callback provided must be reentrant, the library does not 
guarentee
+ *  that there won't be concurrent calls.
+ *  Note: Callback does not own the LogData struct.  If the client would like 
to
+ *  keep one around use hdfsCopyLogData/hdfsFreeLogData.
+ **/
+LIBHDFS_EXTERNAL
+void hdfsSetLogFunction(void (*hook)(LogData*));
+
+/**
+ *  Create a copy of the LogData object passed in and return a pointer to it.
+ *  Returns null if it was unable to copy/
+ **/
+LIBHDFS_EXTERNAL
+LogData *hdfsCopyLogData(const LogData*);
+
+/**
+ *  Client must call this to dispose of the LogData created by hdfsCopyLogData.
+ **/
+LIBHDFS_EXTERNAL
+void hdfsFreeLogData(LogData*);
+
+/**
+ * Enable loggind functionality for a component.
+ * Return -1 on failure, 0 otherwise.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsEnableLoggingForComponent(int component);
+
+/**
+ * Disable logging functionality for a component.
+ * Return -1 on failure, 0 otherwise.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsDisableLoggingForComponent(int component);
+
+/**
+ * Set level between trace and error.
+ * Return -1 on failure, 0 otherwise.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsSetLoggingLevel(int component);
+
+/*
+ * Supported event names.  These names will stay consistent in libhdfs 
callbacks.
+ *
+ * Other events not listed here may be seen, but they are not stable and
+ * should not be counted on.
+ */
+extern const char * FS_NN_CONNECT_EVENT;
+extern const char * FS_NN_READ_EVENT;
+extern const char * FS_NN_WRITE_EVENT;
+
+extern const char * FILE_DN_CONNECT_EVENT;
+extern const char * FILE_DN_READ_EVENT;
+extern const char * FILE_DN_WRITE_EVENT;
+
+
+#define LIBHDFSPP_EVENT_OK (0)
+#define DEBUG_SIMULATE_ERROR (-1)
+
+typedef int (*libhdfspp_fs_event_callback)(const char * event, const char * 
cluster,
+                                           int64_t value, int64_t cookie);
+typedef int (*libhdfspp_file_event_callback)(const char * event,
+                                             const char * cluster,
+                                             const char * file,
+                                             int64_t value, int64_t cookie);
+
+/**
+ * Registers a callback for the next filesystem connect operation the current
+ * thread executes.
+ *
+ *  @param handler A function pointer.  Taken as a void* and internally
+ *                 cast into the appropriate type.
+ *  @param cookie  An opaque value that will be passed into the handler; can
+ *                 be used to correlate the handler with some object in the
+ *                 consumer's space.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsPreAttachFSMonitor(libhdfspp_fs_event_callback handler, int64_t 
cookie);
+
+
+/**
+ * Registers a callback for the next file open operation the current thread
+ * executes.
+ *
+ *  @param fs      The filesystem
+ *  @param handler A function pointer.  Taken as a void* and internally
+ *                 cast into the appropriate type.
+ *  @param cookie  An opaque value that will be passed into the handler; can
+ *                 be used to correlate the handler with some object in the
+ *                 consumer's space.
+ **/
+LIBHDFS_EXTERNAL
+int hdfsPreAttachFileMonitor(libhdfspp_file_event_callback handler, int64_t 
cookie);
+
+
+/**
+ * Finds file name on the file system. hdfsFreeFileInfo should be called to 
deallocate memory.
+ *
+ *  @param fs         The filesystem (required)
+ *  @param path       Path at which to begin search, can have wild cards  
(must be non-blank)
+ *  @param name       Name to find, can have wild cards                   
(must be non-blank)
+ *  @param numEntries Set to the number of files/directories in the result.
+ *  @return           Returns a dynamically-allocated array of hdfsFileInfo
+ *                    objects; NULL on error or empty result.
+ *                    errno is set to non-zero on error or zero on success.
+ **/
+LIBHDFS_EXTERNAL
+hdfsFileInfo * hdfsFind(hdfsFS fs, const char* path, const char* name, 
uint32_t * numEntries);
+
+
+/*****************************************************************************
+ *                    HDFS SNAPSHOT FUNCTIONS
+ ****************************************************************************/
+
+/**
+ * Creates a snapshot of a snapshottable directory specified by path
+ *
+ *  @param fs      The filesystem (required)
+ *  @param path    Path to the directory to be snapshotted (must be non-blank)
+ *  @param name    Name to be given to the created snapshot (may be NULL)
+ *  @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsCreateSnapshot(hdfsFS fs, const char* path, const char* name);
+
+/**
+ * Deletes the directory snapshot specified by path and name
+ *
+ *  @param fs      The filesystem (required)
+ *  @param path    Path to the snapshotted directory (must be non-blank)
+ *  @param name    Name of the snapshot to be deleted (must be non-blank)
+ *  @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name);
+
+/**
+ * Renames the directory snapshot specified by path from old_name to new_name
+ *
+ *  @param fs         The filesystem (required)
+ *  @param path       Path to the snapshotted directory (must be non-blank)
+ *  @param old_name   Current name of the snapshot (must be non-blank)
+ *  @param new_name   New name of the snapshot (must be non-blank)
+ *  @return           0 on success, corresponding errno on failure
+ **/
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, 
const char* new_name);
+
+/**
+ * Allows snapshots to be made on the specified directory
+ *
+ *  @param fs      The filesystem (required)
+ *  @param path    Path to the directory to be made snapshottable (must be 
non-blank)
+ *  @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsAllowSnapshot(hdfsFS fs, const char* path);
+
+/**
+ * Disallows snapshots to be made on the specified directory
+ *
+ *  @param fs      The filesystem (required)
+ *  @param path    Path to the directory to be made non-snapshottable (must be 
non-blank)
+ *  @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsDisallowSnapshot(hdfsFS fs, const char* path);
+
+/**
+ * Create a FileSystem based on the builder but don't connect
+ * @param bld     Used to populate config options in the same manner as 
hdfsBuilderConnect.
+ *                Does not free builder.
+ **/
+LIBHDFS_EXTERNAL
+hdfsFS hdfsAllocateFileSystem(struct hdfsBuilder *bld);
+
+/**
+ * Connect a FileSystem created with hdfsAllocateFileSystem
+ * @param fs      A disconnected FS created with hdfsAllocateFileSystem
+ * @param bld     The same or exact copy of the builder used for Allocate, we 
still need a few fields.
+ *                Does not free builder.
+ * @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsConnectAllocated(hdfsFS fs, struct hdfsBuilder *bld);
+
+/**
+ * Cancel a pending connection on a FileSystem
+ * @param fs      A fs in the process of connecting using hdfsConnectAllocated 
in another thread.
+ * @return        0 on success, corresponding errno on failure
+ **/
+LIBHDFS_EXTERNAL
+int hdfsCancelPendingConnection(hdfsFS fs);
+
+
+#ifdef __cplusplus
+} /* end extern "C" */
+#endif
+
+#endif


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to