Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 50817dc4c -> e117c6a0a


 HDFS-9737: libhdfs++: Create examples of consuming libhdfs++ (pt 2).  
Contributed by Bob Hansen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e117c6a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e117c6a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e117c6a0

Branch: refs/heads/HDFS-8707
Commit: e117c6a0a3da29ed8ca94ff8a7b88a90cb6e9185
Parents: 50817dc
Author: Bob Hansen <b...@hp.com>
Authored: Thu Feb 4 11:33:31 2016 -0500
Committer: Bob Hansen <b...@hp.com>
Committed: Thu Feb 4 11:33:31 2016 -0500

----------------------------------------------------------------------
 .../native/libhdfspp/examples/CMakeLists.txt    |  19 +++
 .../libhdfspp/examples/cat/CMakeLists.txt       |  19 +++
 .../libhdfspp/examples/cat/c/CMakeLists.txt     |  27 ++++
 .../main/native/libhdfspp/examples/cat/c/cat.c  | 160 +++++++++++++++++++
 4 files changed, 225 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e117c6a0/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
new file mode 100644
index 0000000..76880cd
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/CMakeLists.txt
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+add_subdirectory(cat)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e117c6a0/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/CMakeLists.txt
new file mode 100644
index 0000000..93139ce
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/CMakeLists.txt
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+add_subdirectory(c)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e117c6a0/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/CMakeLists.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/CMakeLists.txt
new file mode 100644
index 0000000..a2dc4a4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default LIBHDFSPP_DIR to the default install location.  You can override
+#    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
+set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
+
+include_directories( ${LIBHDFSPP_DIR}/include )
+link_directories( ${LIBHDFSPP_DIR}/lib )
+
+add_executable(cat cat.c)
+target_link_libraries(cat hdfspp)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e117c6a0/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/cat.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/cat.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/cat.c
new file mode 100644
index 0000000..b5fe311
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cat/c/cat.c
@@ -0,0 +1,160 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+/*
+  A a stripped down version of unix's "cat".
+  Doesn't deal with any flags for now, will just attempt to read the whole 
file.
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "hdfspp/hdfs_ext.h"
+
+#define SCHEME "hdfs"
+#define MAX_STRING 1024
+
+struct Uri {
+  int valid;
+  char host[MAX_STRING];
+  int port;
+  char path[MAX_STRING];
+};
+
+int min(int a, int b) {
+    return a < b ? a : b;
+}
+
+void parse_uri(const char * uri_string, struct Uri * uri) {
+    uri->valid = 0;
+    uri->host[0] = 0;
+    uri->port = -1;
+    uri->path[0] = 0;
+
+    // most start with hdfs scheme
+    const char * remaining;
+    const char * scheme_end = strstr(uri_string, "://");
+    if (scheme_end != NULL) {
+      if (strncmp(uri_string, SCHEME, strlen(SCHEME)) != 0)
+        return;
+
+      remaining = scheme_end + 3;
+
+      // parse authority
+      const char * authority_end = strstr(remaining, "/");
+      if (authority_end != NULL) {
+        char authority[MAX_STRING];
+        strncpy(authority, remaining, min(authority_end - remaining, 
sizeof(authority)));
+        remaining = authority_end;
+
+        char * host_port_separator = strstr(authority, ":");
+        if (host_port_separator != NULL) {
+          uri->port = strtol(host_port_separator + 1, NULL, 10);
+          if (errno != 0)
+            return;
+
+          // Terminate authority at the new end of the host
+          *host_port_separator = 0;
+        }
+        strncpy(uri->host, authority, sizeof(uri->host));
+      }
+      strncpy(uri->path, remaining, sizeof(uri->path));
+    } else {
+      // Absolute path
+      strncpy(uri->path, uri_string, sizeof(uri->path));
+    }
+
+    uri->valid = 1;
+};
+
+int main(int argc, char** argv) {
+  char error_text[1024];
+  if (argc != 2) {
+    fprintf(stderr, "usage: cat 
[hdfs://[<hostname>:<port>]]/<path-to-file>\n");
+    return 1;
+  }
+
+  const char * uri_path = argv[1];
+  struct Uri uri;
+  parse_uri(uri_path, &uri);
+  if (!uri.valid) {
+    fprintf(stderr, "malformed URI: %s\n", uri_path);
+    return 1;
+  }
+
+  struct hdfsBuilder* builder = hdfsNewBuilder();
+  if (*uri.host != 0)
+    hdfsBuilderSetNameNode(builder, uri.host);
+  if (uri.port != -1)
+    hdfsBuilderSetNameNodePort(builder, uri.port);
+
+  hdfsFS fs = hdfsBuilderConnect(builder);
+  if (fs == NULL) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    const char * host = uri.host[0] ? uri.host : "<default>";
+    int port = uri.port;
+    if (-1 == port)
+      port = 8020;
+    fprintf(stderr, "Unable to connect to %s:%d, hdfsConnect returned 
null.\n%s\n",
+            host, port, error_text);
+    return 1;
+  }
+
+  hdfsFile file = hdfsOpenFile(fs, uri.path, 0, 0, 0, 0);
+  if (NULL == file) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Unable to open file %s: %s\n", uri.path, error_text );
+    hdfsDisconnect(fs);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  char input_buffer[4096];
+
+  ssize_t read_bytes_count = 0;
+  ssize_t last_read_bytes = 0;
+
+  while (0 < (last_read_bytes =
+                  hdfsPread(fs, file, read_bytes_count, input_buffer, 
sizeof(input_buffer)))) {
+    fwrite(input_buffer, last_read_bytes, 1, stdout);
+    read_bytes_count += last_read_bytes;
+  }
+
+  int res = 0;
+  res = hdfsCloseFile(fs, file);
+  if (0 != res) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Error closing file: %s\n", error_text);
+    hdfsDisconnect(fs);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  res = hdfsDisconnect(fs);
+  if (0 != res) {
+    hdfsGetLastError(error_text, sizeof(error_text));
+    fprintf(stderr, "Error disconnecting filesystem: %s", error_text);
+    hdfsFreeBuilder(builder);
+    return 1;
+  }
+
+  hdfsFreeBuilder(builder);
+  return 0;
+}

Reply via email to