http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
deleted file mode 100644
index 0c3861b..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common/htable.h"
-#include "expect.h"
-#include "hdfs_test.h"
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-// Disable type cast and loss of precision warnings, because the test
-// manipulates void* values manually on purpose.
-#ifdef WIN32
-#pragma warning(disable: 4244 4306)
-#endif
-
-static uint32_t simple_hash(const void *key, uint32_t size)
-{
-    uintptr_t k = (uintptr_t)key;
-    return ((13 + k) * 6367) % size;
-}
-
-static int simple_compare(const void *a, const void *b)
-{
-    return a == b;
-}
-
-static void expect_102(void *f, void *k, void *v)
-{
-    int *found_102 = f;
-    uintptr_t key = (uintptr_t)k;
-    uintptr_t val = (uintptr_t)v;
-
-    if ((key == 2) && (val == 102)) {
-        *found_102 = 1;
-    } else {
-        abort();
-    }
-}
-
-static void *htable_pop_val(struct htable *ht, void *key)
-{
-    void *old_key, *old_val;
-
-    htable_pop(ht, key, &old_key, &old_val);
-    return old_val;
-}
-
-int main(void)
-{
-    struct htable *ht;
-    int found_102 = 0;
-
-    ht = htable_alloc(4, simple_hash, simple_compare);
-    EXPECT_INT_EQ(0, htable_used(ht));
-    EXPECT_INT_EQ(4, htable_capacity(ht));
-    EXPECT_NULL(htable_get(ht, (void*)123));
-    EXPECT_NULL(htable_pop_val(ht, (void*)123));
-    EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
-    EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
-    EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
-    EXPECT_NULL(htable_pop_val(ht, (void*)123));
-
-    // Enlarge the hash table
-    EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
-    EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
-    EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
-    EXPECT_INT_EQ(3, htable_used(ht));
-    EXPECT_INT_EQ(8, htable_capacity(ht));
-    EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
-    EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
-    EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
-    EXPECT_INT_EQ(1, htable_used(ht));
-    htable_visit(ht, expect_102, &found_102);
-    EXPECT_INT_EQ(1, found_102);
-    htable_free(ht);
-
-    fprintf(stderr, "SUCCESS.\n");
-    return EXIT_SUCCESS;
-}
-
-// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
deleted file mode 100644
index f564de4..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
+++ /dev/null
@@ -1,540 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hdfs.h" 
-#include "hdfs_test.h" 
-#include "platform.h"
-
-#include <inttypes.h>
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <unistd.h>
-
-void permission_disp(short permissions, char *rtr) {
-  int i;
-  short permissionsId;
-  char* perm;
-  rtr[9] = '\0';
-  for(i=2;i>=0;i--)
-    {
-      permissionsId = permissions >> (i * 3) & (short)7;
-      switch(permissionsId) {
-      case 7:
-        perm = "rwx"; break;
-      case 6:
-        perm = "rw-"; break;
-      case 5:
-        perm = "r-x"; break;
-      case 4:
-        perm = "r--"; break;
-      case 3:
-        perm = "-wx"; break;
-      case 2:
-        perm = "-w-"; break;
-      case 1:
-        perm = "--x"; break;
-      case 0:
-        perm = "---"; break;
-      default:
-        perm = "???";
-      }
-      strncpy(rtr, perm, 3);
-      rtr+=3;
-    }
-} 
-
-int main(int argc, char **argv) {
-    const char *writePath = "/tmp/testfile.txt";
-    const char *fileContents = "Hello, World!";
-    const char *readPath = "/tmp/testfile.txt";
-    const char *srcPath = "/tmp/testfile.txt";
-    const char *dstPath = "/tmp/testfile2.txt";
-    const char *slashTmp = "/tmp";
-    const char *newDirectory = "/tmp/newdir";
-    const char *newOwner = "root";
-    const char *tuser = "nobody";
-    const char *appendPath = "/tmp/appends";
-    const char *userPath = "/tmp/usertestfile.txt";
-
-    char buffer[32], buffer2[256], rdbuffer[32];
-    tSize num_written_bytes, num_read_bytes;
-    hdfsFS fs, lfs;
-    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
-    tOffset currentPos, seekPos;
-    int exists, totalResult, result, numEntries, i, j;
-    const char *resp;
-    hdfsFileInfo *fileInfo, *fileList, *finfo;
-    char *buffer3;
-    char permissions[10];
-    char ***hosts;
-    short newPerm = 0666;
-    tTime newMtime, newAtime;
-
-    fs = hdfsConnectNewInstance("default", 0);
-    if(!fs) {
-        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
-    } 
- 
-    lfs = hdfsConnectNewInstance(NULL, 0);
-    if(!lfs) {
-        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
-        exit(-1);
-    } 
-
-    {
-        //Write tests
-        
-        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
-        if(!writeFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
-        num_written_bytes =
-          hdfsWrite(fs, writeFile, (void*)fileContents,
-            (tSize)(strlen(fileContents)+1));
-        if (num_written_bytes != strlen(fileContents) + 1) {
-          fprintf(stderr, "Failed to write correct number of bytes - expected 
%d, got %d\n",
-                  (int)(strlen(fileContents) + 1), (int)num_written_bytes);
-            exit(-1);
-        }
-        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
-        currentPos = -1;
-        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
-            fprintf(stderr, 
-                    "Failed to get current file position correctly! Got %" 
PRId64 "!\n",
-                    currentPos);
-            exit(-1);
-        }
-        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
-
-        if (hdfsFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", writePath); 
-
-        if (hdfsHFlush(fs, writeFile)) {
-            fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
-            exit(-1);
-        }
-        fprintf(stderr, "HFlushed %s successfully!\n", writePath);
-
-        hdfsCloseFile(fs, writeFile);
-    }
-
-    {
-        //Read tests
-        
-        exists = hdfsExists(fs, readPath);
-
-        if (exists) {
-          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
-          exit(-1);
-        }
-
-        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
-        if (!readFile) {
-            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
-            exit(-1);
-        }
-
-        if (!hdfsFileIsOpenForRead(readFile)) {
-            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
-                    "with O_RDONLY, and it did not show up as 'open for "
-                    "read'\n");
-            exit(-1);
-        }
-
-        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
-
-        seekPos = 1;
-        if(hdfsSeek(fs, readFile, seekPos)) {
-            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
-        }
-
-        currentPos = -1;
-        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
-            fprintf(stderr, 
-                    "Failed to get current file position correctly! Got %" 
PRId64 "!\n",
-                    currentPos);
-            exit(-1);
-        }
-        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
-
-        if (!hdfsFileUsesDirectRead(readFile)) {
-          fprintf(stderr, "Direct read support incorrectly not detected "
-                  "for HDFS filesystem\n");
-          exit(-1);
-        }
-
-        fprintf(stderr, "Direct read support detected for HDFS\n");
-
-        // Test the direct read path
-        if(hdfsSeek(fs, readFile, 0)) {
-            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
-            exit(-1);
-        }
-        memset(buffer, 0, sizeof(buffer));
-        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
-                sizeof(buffer));
-        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
-            fprintf(stderr, "Failed to read (direct). Expected %s but got %s 
(%d bytes)\n",
-                    fileContents, buffer, num_read_bytes);
-            exit(-1);
-        }
-        fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
-                num_read_bytes, buffer);
-        if (hdfsSeek(fs, readFile, 0L)) {
-            fprintf(stderr, "Failed to seek to file start!\n");
-            exit(-1);
-        }
-
-        // Disable the direct read path so that we really go through the slow
-        // read path
-        hdfsFileDisableDirectRead(readFile);
-
-        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
-                sizeof(buffer));
-        fprintf(stderr, "Read following %d bytes:\n%s\n", 
-                num_read_bytes, buffer);
-
-        memset(buffer, 0, strlen(fileContents + 1));
-
-        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
-                sizeof(buffer));
-        fprintf(stderr, "Read following %d bytes:\n%s\n", 
-                num_read_bytes, buffer);
-
-        hdfsCloseFile(fs, readFile);
-
-        // Test correct behaviour for unsupported filesystems
-        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
-        if(!localFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
-            exit(-1);
-        }
-
-        num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
-                                      (tSize)(strlen(fileContents) + 1));
-
-        hdfsCloseFile(lfs, localFile);
-        localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
-
-        if (hdfsFileUsesDirectRead(localFile)) {
-          fprintf(stderr, "Direct read support incorrectly detected for local "
-                  "filesystem\n");
-          exit(-1);
-        }
-
-        hdfsCloseFile(lfs, localFile);
-    }
-
-    totalResult = 0;
-    result = 0;
-    {
-        //Generic file-system operations
-
-        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = 
hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = 
hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = 
hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = 
hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, 
srcPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = 
hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = 
hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = 
hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = 
hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : 
"Failed!"));
-        totalResult += (resp ? 0 : 1);
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = 
hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = 
hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : 
"Failed!"));
-        totalResult += (resp ? 0 : 1);
-
-        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", 
hdfsGetDefaultBlockSize(fs));
-        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
-        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
-
-        fileInfo = NULL;
-        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
-            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
-            fprintf(stderr, "Name: %s, ", fileInfo->mName);
-            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
-            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
-            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
-            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
-            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
-            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
-            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
-            permission_disp(fileInfo->mPermissions, permissions);
-            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, 
permissions);
-            hdfsFreeFileInfo(fileInfo, 1);
-        } else {
-            totalResult++;
-            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", 
slashTmp);
-        }
-
-        fileList = 0;
-        fileList = hdfsListDirectory(fs, newDirectory, &numEntries);
-        if (!(fileList == NULL && numEntries == 0 && !errno)) {
-            fprintf(stderr, "waah! hdfsListDirectory for empty %s - 
FAILED!\n", newDirectory);
-            totalResult++;
-        } else {
-            fprintf(stderr, "hdfsListDirectory for empty %s - SUCCESS!\n", 
newDirectory);
-        }
-
-        fileList = 0;
-        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
-            for(i=0; i < numEntries; ++i) {
-                fprintf(stderr, "Name: %s, ", fileList[i].mName);
-                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
-                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
-                fprintf(stderr, "BlockSize: %" PRId64 ", ", 
fileList[i].mBlockSize);
-                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
-                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
-                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
-                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
-                permission_disp(fileList[i].mPermissions, permissions);
-                fprintf(stderr, "Permissions: %d (%s)\n", 
fileList[i].mPermissions, permissions);
-            }
-            hdfsFreeFileInfo(fileList, numEntries);
-        } else {
-            if (errno) {
-                totalResult++;
-                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
-            } else {
-                fprintf(stderr, "Empty directory!\n");
-            }
-        }
-
-        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
-        if(hosts) {
-            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
-            i=0; 
-            while(hosts[i]) {
-                j = 0;
-                while(hosts[i][j]) {
-                    fprintf(stderr, 
-                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
-                    ++j;
-                }
-                ++i;
-            }
-        } else {
-            totalResult++;
-            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
-        }
-       
-        // setting tmp dir to 777 so later when connectAsUser nobody, we can 
write to it
-
-        // chown write
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, 
NULL, "users")) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, 
newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        // chmod write
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, 
newPerm)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-
-
-        sleep(2);
-        newMtime = time(NULL);
-        newAtime = time(NULL);
-
-        // utime write
-        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, 
newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
-
-        totalResult += result;
-
-        // chown/chmod/utime read
-        finfo = hdfsGetPathInfo(fs, writePath);
-
-        fprintf(stderr, "hdfsChown read: %s\n", ((result = 
(strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr, "hdfsChmod read: %s\n", ((result = 
(finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        // will later use /tmp/ as a different user so enable it
-        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 
0777)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        fprintf(stderr,"newMTime=%ld\n",newMtime);
-        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
-
-
-        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = 
(finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-
-        // No easy way to turn on access times from hdfs_test right now
-        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = 
(finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
-        //        totalResult += result;
-
-        hdfsFreeFileInfo(finfo, 1);
-
-        // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, 
newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 
1)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, 
srcPath, 1)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, 
dstPath, 1)) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, 
newDirectory)) != 0 ? "Success!" : "Failed!"));
-        totalResult += (result ? 0 : 1);
-    }
-
-    {
-      // TEST APPENDS
-
-      // CREATE
-      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
-      if(!appendFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
-      }
-      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
-
-      buffer3 = "Hello,";
-      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
-        (tSize)strlen(buffer3));
-      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
-      if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
-        }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
-
-      hdfsCloseFile(fs, appendFile);
-
-      // RE-OPEN
-      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
-      if(!appendFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
-        exit(-1);
-      }
-      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
-
-      buffer3 = " World";
-      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
-        (tSize)(strlen(buffer3) + 1));
-      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
-      if (hdfsFlush(fs, appendFile)) {
-        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
-        exit(-1);
-      }
-      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
-
-      hdfsCloseFile(fs, appendFile);
-
-      // CHECK size
-      finfo = hdfsGetPathInfo(fs, appendPath);
-      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = 
(finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : 
"Failed!"));
-      totalResult += (result ? 0 : 1);
-
-      // READ and check data
-      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
-      if (!readFile) {
-        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
-        exit(-1);
-      }
-
-      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, 
sizeof(rdbuffer));
-      fprintf(stderr, "Read following %d bytes:\n%s\n", 
-              num_read_bytes, rdbuffer);
-
-      fprintf(stderr, "read == Hello, World %s\n", ((result = 
(strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
-
-      hdfsCloseFile(fs, readFile);
-
-      // DONE test appends
-    }
-      
-      
-    totalResult += (hdfsDisconnect(fs) != 0);
-
-    {
-      //
-      // Now test as connecting as a specific user
-      // This is only meant to test that we connected as that user, not to test
-      // the actual fs user capabilities. Thus just create a file and read
-      // the owner is correct.
-
-      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
-      if(!fs) {
-        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
-        exit(-1);
-      } 
-
-        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
-        if(!userFile) {
-            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
-            exit(-1);
-        }
-        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
-
-        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
-          (tSize)(strlen(fileContents)+1));
-        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
-
-        if (hdfsFlush(fs, userFile)) {
-            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
-            exit(-1);
-        }
-        fprintf(stderr, "Flushed %s successfully!\n", userPath); 
-
-        hdfsCloseFile(fs, userFile);
-
-        finfo = hdfsGetPathInfo(fs, userPath);
-        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = 
(strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
-        totalResult += result;
-    }
-    
-    totalResult += (hdfsDisconnect(fs) != 0);
-
-    if (totalResult != 0) {
-        return -1;
-    } else {
-        return 0;
-    }
-}
-
-/**
- * vim: ts=4: sw=4: et:
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
deleted file mode 100644
index 6e44741..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hdfs.h" 
-
-#include <stdio.h>
-#include <stdlib.h>
-
-int main(int argc, char **argv) {
-    hdfsFS fs;
-    const char *rfile = argv[1];
-    tSize bufferSize = strtoul(argv[3], NULL, 10);
-    hdfsFile readFile;
-    char* buffer;
-    tSize curSize;
-
-    if (argc != 4) {
-        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> 
<buffersize>\n");
-        exit(-1);
-    }
-    
-    fs = hdfsConnect("default", 0);
-    if (!fs) {
-        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
-    } 
-
-    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
-    if (!readFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
-        exit(-2);
-    }
-
-    // data to be written to the file
-    buffer = malloc(sizeof(char) * bufferSize);
-    if(buffer == NULL) {
-        return -2;
-    }
-    
-    // read from the file
-    curSize = bufferSize;
-    for (; curSize == bufferSize;) {
-        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
-    }
-    
-
-    free(buffer);
-    hdfsCloseFile(fs, readFile);
-    hdfsDisconnect(fs);
-
-    return 0;
-}
-
-/**
- * vim: ts=4: sw=4: et:
- */
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
deleted file mode 100644
index 42b3df7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "hdfs.h" 
-
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-
-int main(int argc, char **argv) {
-    hdfsFS fs;
-    const char *writeFileName = argv[1];
-    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
-    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
-    tSize bufferSize;
-    hdfsFile writeFile;
-    char* buffer;
-    int i;
-    off_t nrRemaining;
-    tSize curSize;
-    tSize written;
-
-    if (argc != 4) {
-        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> 
<buffersize>\n");
-        exit(-1);
-    }
-    
-    fs = hdfsConnect("default", 0);
-    if (!fs) {
-        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
-        exit(-1);
-    } 
-
-    // sanity check
-    if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
-      fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], 
ULONG_MAX);
-      exit(-3);
-    }
-
-    // currently libhdfs writes are of tSize which is int32
-    if(tmpBufferSize > INT_MAX) {
-      fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= 
%d\n",INT_MAX);
-      exit(-3);
-    }
-
-    bufferSize = (tSize)tmpBufferSize;
-
-    writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
-    if (!writeFile) {
-        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
-        exit(-2);
-    }
-
-    // data to be written to the file
-    buffer = malloc(sizeof(char) * bufferSize);
-    if(buffer == NULL) {
-        fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
-        return -2;
-    }
-    for (i=0; i < bufferSize; ++i) {
-        buffer[i] = 'a' + (i%26);
-    }
-
-    // write to the file
-    for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= 
bufferSize ) {
-      curSize = ( bufferSize < nrRemaining ) ? bufferSize : 
(tSize)nrRemaining; 
-      if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != 
curSize) {
-        fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", 
written);
-        exit(-3);
-      }
-    }
-
-    free(buffer);
-    hdfsCloseFile(fs, writeFile);
-    hdfsDisconnect(fs);
-
-    return 0;
-}
-
-/**
- * vim: ts=4: sw=4: et:
- */
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
deleted file mode 100644
index 92941cf..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "hdfs.h"
-#include "native_mini_dfs.h"
-#include "platform.h"
-
-#include <errno.h>
-#include <inttypes.h>
-#include <unistd.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-
-#define TO_STR_HELPER(X) #X
-#define TO_STR(X) TO_STR_HELPER(X)
-
-#define TEST_FILE_NAME_LENGTH 128
-#define TEST_ZEROCOPY_FULL_BLOCK_SIZE 4096
-#define TEST_ZEROCOPY_LAST_BLOCK_SIZE 3215
-#define TEST_ZEROCOPY_NUM_BLOCKS 6
-#define SMALL_READ_LEN 16
-#define TEST_ZEROCOPY_FILE_LEN \
-  (((TEST_ZEROCOPY_NUM_BLOCKS - 1) * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + \
-    TEST_ZEROCOPY_LAST_BLOCK_SIZE)
-
-#define ZC_BUF_LEN 32768
-
-static uint8_t *getZeroCopyBlockData(int blockIdx)
-{
-    uint8_t *buf = malloc(TEST_ZEROCOPY_FULL_BLOCK_SIZE);
-    int i;
-    if (!buf) {
-        fprintf(stderr, "malloc(%d) failed\n", TEST_ZEROCOPY_FULL_BLOCK_SIZE);
-        exit(1);
-    }
-    for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
-      buf[i] = (uint8_t)(blockIdx + (i % 17));
-    }
-    return buf;
-}
-
-static int getZeroCopyBlockLen(int blockIdx)
-{
-    if (blockIdx >= TEST_ZEROCOPY_NUM_BLOCKS) {
-        return 0;
-    } else if (blockIdx == (TEST_ZEROCOPY_NUM_BLOCKS - 1)) {
-        return TEST_ZEROCOPY_LAST_BLOCK_SIZE;
-    } else {
-        return TEST_ZEROCOPY_FULL_BLOCK_SIZE;
-    }
-}
-
-static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
-{
-    hdfsFile file = NULL;
-    struct hadoopRzOptions *opts = NULL;
-    struct hadoopRzBuffer *buffer = NULL;
-    uint8_t *block;
-
-    file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0);
-    EXPECT_NONNULL(file);
-    opts = hadoopRzOptionsAlloc();
-    EXPECT_NONNULL(opts);
-    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 1));
-    /* haven't read anything yet */
-    EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL));
-    block = getZeroCopyBlockData(0);
-    EXPECT_NONNULL(block);
-    /* first read is half of a block. */
-    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
-    EXPECT_NONNULL(buffer);
-    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
-          hadoopRzBufferLength(buffer));
-    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), block,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
-    hadoopRzBufferFree(file, buffer);
-    /* read the next half of the block */
-    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
-    EXPECT_NONNULL(buffer);
-    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
-          hadoopRzBufferLength(buffer));
-    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer),
-          block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2),
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
-    hadoopRzBufferFree(file, buffer);
-    free(block);
-    EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, 
-              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
-              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
-              TEST_ZEROCOPY_FULL_BLOCK_SIZE));
-    /* Now let's read just a few bytes. */
-    buffer = hadoopReadZero(file, opts, SMALL_READ_LEN);
-    EXPECT_NONNULL(buffer);
-    EXPECT_INT_EQ(SMALL_READ_LEN, hadoopRzBufferLength(buffer));
-    block = getZeroCopyBlockData(1);
-    EXPECT_NONNULL(block);
-    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
-    hadoopRzBufferFree(file, buffer);
-    EXPECT_INT64_EQ(
-          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
-          hdfsTell(fs, file));
-    EXPECT_ZERO(expectFileStats(file,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
-
-    /* Clear 'skip checksums' and test that we can't do zero-copy reads any
-     * more.  Since there is no ByteBufferPool set, we should fail with
-     * EPROTONOSUPPORT.
-     */
-    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
-    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
-    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
-
-    /* Verify that setting a NULL ByteBufferPool class works. */
-    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, NULL));
-    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
-    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
-    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
-
-    /* Now set a ByteBufferPool and try again.  It should succeed this time. */
-    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts,
-          ELASTIC_BYTE_BUFFER_POOL_CLASS));
-    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE);
-    EXPECT_NONNULL(buffer);
-    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, hadoopRzBufferLength(buffer));
-    EXPECT_ZERO(expectFileStats(file,
-          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
-          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
-          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
-          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
-    EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, hadoopRzBufferGet(buffer),
-        TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN));
-    free(block);
-    block = getZeroCopyBlockData(2);
-    EXPECT_NONNULL(block);
-    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
-        (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
-    hadoopRzBufferFree(file, buffer);
-
-    /* Check the result of a zero-length read. */
-    buffer = hadoopReadZero(file, opts, 0);
-    EXPECT_NONNULL(buffer);
-    EXPECT_NONNULL(hadoopRzBufferGet(buffer));
-    EXPECT_INT_EQ(0, hadoopRzBufferLength(buffer));
-    hadoopRzBufferFree(file, buffer);
-
-    /* Check the result of reading past EOF */
-    EXPECT_INT_EQ(0, hdfsSeek(fs, file, TEST_ZEROCOPY_FILE_LEN));
-    buffer = hadoopReadZero(file, opts, 1);
-    EXPECT_NONNULL(buffer);
-    EXPECT_NULL(hadoopRzBufferGet(buffer));
-    hadoopRzBufferFree(file, buffer);
-
-    /* Cleanup */
-    free(block);
-    hadoopRzOptionsFree(opts);
-    EXPECT_ZERO(hdfsCloseFile(fs, file));
-    return 0;
-}
-
-static int createZeroCopyTestFile(hdfsFS fs, char *testFileName,
-                                  size_t testFileNameLen)
-{
-    int blockIdx, blockLen;
-    hdfsFile file;
-    uint8_t *data;
-
-    snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d",
-             getpid(), rand());
-    file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1,
-                        TEST_ZEROCOPY_FULL_BLOCK_SIZE);
-    EXPECT_NONNULL(file);
-    for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) {
-        blockLen = getZeroCopyBlockLen(blockIdx);
-        data = getZeroCopyBlockData(blockIdx);
-        EXPECT_NONNULL(data);
-        EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen));
-    }
-    EXPECT_ZERO(hdfsCloseFile(fs, file));
-    return 0;
-}
-
-static int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
-                            struct hdfsBuilder *bld) {
-    int ret;
-    tPort port;
-    const char *domainSocket;
-
-    hdfsBuilderSetNameNode(bld, "localhost");
-    port = (tPort) nmdGetNameNodePort(cl);
-    if (port < 0) {
-      fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
-      return EIO;
-    }
-    hdfsBuilderSetNameNodePort(bld, port);
-
-    domainSocket = hdfsGetDomainSocketPath(cl);
-
-    if (domainSocket) {
-      ret = hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit", "true");
-      if (ret) {
-        return ret;
-      }
-      ret = hdfsBuilderConfSetStr(bld, "dfs.domain.socket.path",
-                                  domainSocket);
-      if (ret) {
-        return ret;
-      }
-    }
-    return 0;
-}
-
-
-/**
- * Test that we can write a file with libhdfs and then read it back
- */
-int main(void)
-{
-    int port;
-    struct NativeMiniDfsConf conf = {
-        1, /* doFormat */
-        0, /* webhdfsEnabled */
-        0, /* namenodeHttpPort */
-        1, /* configureShortCircuit */
-    };
-    char testFileName[TEST_FILE_NAME_LENGTH];
-    hdfsFS fs;
-    struct NativeMiniDfsCluster* cl;
-    struct hdfsBuilder *bld;
-
-    cl = nmdCreate(&conf);
-    EXPECT_NONNULL(cl);
-    EXPECT_ZERO(nmdWaitClusterUp(cl));
-    port = nmdGetNameNodePort(cl);
-    if (port < 0) {
-        fprintf(stderr, "TEST_ERROR: test_zerocopy: "
-                "nmdGetNameNodePort returned error %d\n", port);
-        return EXIT_FAILURE;
-    }
-    bld = hdfsNewBuilder();
-    EXPECT_NONNULL(bld);
-    EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld));
-    hdfsBuilderSetForceNewInstance(bld);
-    hdfsBuilderConfSetStr(bld, "dfs.block.size",
-                          TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE));
-    /* ensure that we'll always get our mmaps */
-    hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum",
-                          "true");
-    fs = hdfsBuilderConnect(bld);
-    EXPECT_NONNULL(fs);
-    EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName,
-          TEST_FILE_NAME_LENGTH));
-    EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName));
-    EXPECT_ZERO(hdfsDisconnect(fs));
-    EXPECT_ZERO(nmdShutdown(cl));
-    nmdFree(cl);
-    fprintf(stderr, "TEST_SUCCESS\n"); 
-    return EXIT_SUCCESS;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
deleted file mode 100644
index 80a64b4..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/vecsum.c
+++ /dev/null
@@ -1,825 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-#ifdef __MACH__ // OS X does not have clock_gettime
-#include <mach/clock.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#endif
-
-#include "config.h"
-#include "hdfs.h"
-
-#define VECSUM_CHUNK_SIZE (8 * 1024 * 1024)
-#define ZCR_READ_CHUNK_SIZE (1024 * 1024 * 8)
-#define NORMAL_READ_CHUNK_SIZE (8 * 1024 * 1024)
-#define DOUBLES_PER_LOOP_ITER 16
-
-static double timespec_to_double(const struct timespec *ts)
-{
-    double sec = ts->tv_sec;
-    double nsec = ts->tv_nsec;
-    return sec + (nsec / 1000000000L);
-}
-
-struct stopwatch {
-    struct timespec start;
-    struct timespec stop;
-};
-
-
-#ifdef __MACH__
-static int clock_gettime_mono(struct timespec * ts) {
-    static mach_timebase_info_data_t tb;
-    static uint64_t timestart = 0;
-    uint64_t t = 0;
-    if (timestart == 0) {
-        mach_timebase_info(&tb);
-        timestart = mach_absolute_time();
-    }
-    t = mach_absolute_time() - timestart;
-    t *= tb.numer;
-    t /= tb.denom;
-    ts->tv_sec = t / 1000000000ULL;
-    ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL);
-    return 0;
-}
-#else
-static int clock_gettime_mono(struct timespec * ts) {
-    return clock_gettime(CLOCK_MONOTONIC, ts);
-}
-#endif
-
-static struct stopwatch *stopwatch_create(void)
-{
-    struct stopwatch *watch;
-
-    watch = calloc(1, sizeof(struct stopwatch));
-    if (!watch) {
-        fprintf(stderr, "failed to allocate memory for stopwatch\n");
-        goto error;
-    }
-    if (clock_gettime_mono(&watch->start)) {
-        int err = errno;
-        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
-            "error %d (%s)\n", err, strerror(err));
-        goto error;
-    }
-    return watch;
-
-error:
-    free(watch);
-    return NULL;
-}
-
-static void stopwatch_stop(struct stopwatch *watch,
-        long long bytes_read)
-{
-    double elapsed, rate;
-
-    if (clock_gettime_mono(&watch->stop)) {
-        int err = errno;
-        fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
-            "error %d (%s)\n", err, strerror(err));
-        goto done;
-    }
-    elapsed = timespec_to_double(&watch->stop) -
-        timespec_to_double(&watch->start);
-    rate = (bytes_read / elapsed) / (1024 * 1024 * 1024);
-    printf("stopwatch: took %.5g seconds to read %lld bytes, "
-        "for %.5g GB/s\n", elapsed, bytes_read, rate);
-    printf("stopwatch:  %.5g seconds\n", elapsed);
-done:
-    free(watch);
-}
-
-enum vecsum_type {
-    VECSUM_LOCAL = 0,
-    VECSUM_LIBHDFS,
-    VECSUM_ZCR,
-};
-
-#define VECSUM_TYPE_VALID_VALUES "libhdfs, zcr, or local"
-
-int parse_vecsum_type(const char *str)
-{
-    if (strcasecmp(str, "local") == 0)
-        return VECSUM_LOCAL;
-    else if (strcasecmp(str, "libhdfs") == 0)
-        return VECSUM_LIBHDFS;
-    else if (strcasecmp(str, "zcr") == 0)
-        return VECSUM_ZCR;
-    else
-        return -1;
-}
-
-struct options {
-    // The path to read.
-    const char *path;
-
-    // Length of the file.
-    long long length;
-
-    // The number of times to read the path.
-    int passes;
-
-    // Type of vecsum to do
-    enum vecsum_type ty;
-
-    // RPC address to use for HDFS
-    const char *rpc_address;
-};
-
-static struct options *options_create(void)
-{
-    struct options *opts = NULL;
-    const char *pass_str;
-    const char *ty_str;
-    const char *length_str;
-    int ty;
-
-    opts = calloc(1, sizeof(struct options));
-    if (!opts) {
-        fprintf(stderr, "failed to calloc options\n");
-        goto error;
-    }
-    opts->path = getenv("VECSUM_PATH");
-    if (!opts->path) {
-        fprintf(stderr, "You must set the VECSUM_PATH environment "
-            "variable to the path of the file to read.\n");
-        goto error;
-    }
-    length_str = getenv("VECSUM_LENGTH");
-    if (!length_str) {
-        length_str = "2147483648";
-    }
-    opts->length = atoll(length_str);
-    if (!opts->length) {
-        fprintf(stderr, "Can't parse VECSUM_LENGTH of '%s'.\n",
-                length_str);
-        goto error;
-    }
-    if (opts->length % VECSUM_CHUNK_SIZE) {
-        fprintf(stderr, "VECSUM_LENGTH must be a multiple of '%lld'.  The "
-                "currently specified length of '%lld' is not.\n",
-                (long long)VECSUM_CHUNK_SIZE, (long long)opts->length);
-        goto error;
-    }
-    pass_str = getenv("VECSUM_PASSES");
-    if (!pass_str) {
-        fprintf(stderr, "You must set the VECSUM_PASSES environment "
-            "variable to the number of passes to make.\n");
-        goto error;
-    }
-    opts->passes = atoi(pass_str);
-    if (opts->passes <= 0) {
-        fprintf(stderr, "Invalid value for the VECSUM_PASSES "
-            "environment variable.  You must set this to a "
-            "number greater than 0.\n");
-        goto error;
-    }
-    ty_str = getenv("VECSUM_TYPE");
-    if (!ty_str) {
-        fprintf(stderr, "You must set the VECSUM_TYPE environment "
-            "variable to " VECSUM_TYPE_VALID_VALUES "\n");
-        goto error;
-    }
-    ty = parse_vecsum_type(ty_str);
-    if (ty < 0) {
-        fprintf(stderr, "Invalid VECSUM_TYPE environment variable.  "
-            "Valid values are " VECSUM_TYPE_VALID_VALUES "\n");
-        goto error;
-    }
-    opts->ty = ty;
-    opts->rpc_address = getenv("VECSUM_RPC_ADDRESS");
-    if (!opts->rpc_address) {
-        opts->rpc_address = "default";
-    }
-    return opts;
-error:
-    free(opts);
-    return NULL;
-}
-
-static int test_file_chunk_setup(double **chunk)
-{
-    int i;
-    double *c, val;
-
-    c = malloc(VECSUM_CHUNK_SIZE);
-    if (!c) {
-        fprintf(stderr, "test_file_create: failed to malloc "
-                "a buffer of size '%lld'\n",
-                (long long) VECSUM_CHUNK_SIZE);
-        return EIO;
-    }
-    val = 0.0;
-    for (i = 0; i < VECSUM_CHUNK_SIZE / sizeof(double); i++) {
-        c[i] = val;
-        val += 0.5;
-    }
-    *chunk = c;
-    return 0;
-}
-
-static void options_free(struct options *opts)
-{
-    free(opts);
-}
-
-struct local_data {
-    int fd;
-    double *mmap;
-    long long length;
-};
-
-static int local_data_create_file(struct local_data *cdata,
-                                  const struct options *opts)
-{
-    int ret = EIO;
-    int dup_fd = -1;
-    FILE *fp = NULL;
-    double *chunk = NULL;
-    long long offset = 0;
-
-    dup_fd = dup(cdata->fd);
-    if (dup_fd < 0) {
-        ret = errno;
-        fprintf(stderr, "local_data_create_file: dup failed: %s (%d)\n",
-                strerror(ret), ret);
-        goto done;
-    }
-    fp = fdopen(dup_fd, "w");
-    if (!fp) {
-        ret = errno;
-        fprintf(stderr, "local_data_create_file: fdopen failed: %s (%d)\n",
-                strerror(ret), ret);
-        goto done;
-    }
-    ret = test_file_chunk_setup(&chunk);
-    if (ret)
-        goto done;
-    while (offset < opts->length) {
-        if (fwrite(chunk, VECSUM_CHUNK_SIZE, 1, fp) != 1) {
-            fprintf(stderr, "local_data_create_file: failed to write to "
-                    "the local file '%s' at offset %lld\n",
-                    opts->path, offset);
-            ret = EIO;
-            goto done;
-        }
-        offset += VECSUM_CHUNK_SIZE;
-    }
-    fprintf(stderr, "local_data_create_file: successfully re-wrote %s as "
-            "a file of length %lld\n", opts->path, opts->length);
-    ret = 0;
-
-done:
-    if (dup_fd >= 0) {
-        close(dup_fd);
-    }
-    if (fp) {
-        fclose(fp);
-    }
-    free(chunk);
-    return ret;
-}
-
-static struct local_data *local_data_create(const struct options *opts)
-{
-    struct local_data *cdata = NULL;
-    struct stat st_buf;
-
-    cdata = malloc(sizeof(*cdata));
-    if (!cdata) {
-        fprintf(stderr, "Failed to allocate local test data.\n");
-        goto error;
-    }
-    cdata->fd = -1;
-    cdata->mmap = MAP_FAILED;
-    cdata->length = opts->length;
-
-    cdata->fd = open(opts->path, O_RDWR | O_CREAT, 0777);
-    if (cdata->fd < 0) {
-        int err = errno;
-        fprintf(stderr, "local_data_create: failed to open %s "
-            "for read/write: error %d (%s)\n", opts->path, err, strerror(err));
-        goto error;
-    }
-    if (fstat(cdata->fd, &st_buf)) {
-        int err = errno;
-        fprintf(stderr, "local_data_create: fstat(%s) failed: "
-            "error %d (%s)\n", opts->path, err, strerror(err));
-        goto error;
-    }
-    if (st_buf.st_size != opts->length) {
-        int err;
-        fprintf(stderr, "local_data_create: current size of %s is %lld, but "
-                "we want %lld.  Re-writing the file.\n",
-                opts->path, (long long)st_buf.st_size,
-                (long long)opts->length);
-        err = local_data_create_file(cdata, opts);
-        if (err)
-            goto error;
-    }
-    cdata->mmap = mmap(NULL, cdata->length, PROT_READ,
-                       MAP_PRIVATE, cdata->fd, 0);
-    if (cdata->mmap == MAP_FAILED) {
-        int err = errno;
-        fprintf(stderr, "local_data_create: mmap(%s) failed: "
-            "error %d (%s)\n", opts->path, err, strerror(err));
-        goto error;
-    }
-    return cdata;
-
-error:
-    if (cdata) {
-        if (cdata->fd >= 0) {
-            close(cdata->fd);
-        }
-        free(cdata);
-    }
-    return NULL;
-}
-
-static void local_data_free(struct local_data *cdata)
-{
-    close(cdata->fd);
-    munmap(cdata->mmap, cdata->length);
-}
-
-struct libhdfs_data {
-    hdfsFS fs;
-    hdfsFile file;
-    long long length;
-    double *buf;
-};
-
-static void libhdfs_data_free(struct libhdfs_data *ldata)
-{
-    if (ldata->fs) {
-        free(ldata->buf);
-        if (ldata->file) {
-            hdfsCloseFile(ldata->fs, ldata->file);
-        }
-        hdfsDisconnect(ldata->fs);
-    }
-    free(ldata);
-}
-
-static int libhdfs_data_create_file(struct libhdfs_data *ldata,
-                                    const struct options *opts)
-{
-    int ret;
-    double *chunk = NULL;
-    long long offset = 0;
-
-    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_WRONLY, 0, 1, 0);
-    if (!ldata->file) {
-        ret = errno;
-        fprintf(stderr, "libhdfs_data_create_file: hdfsOpenFile(%s, "
-            "O_WRONLY) failed: error %d (%s)\n", opts->path, ret,
-            strerror(ret));
-        goto done;
-    }
-    ret = test_file_chunk_setup(&chunk);
-    if (ret)
-        goto done;
-    while (offset < opts->length) {
-        ret = hdfsWrite(ldata->fs, ldata->file, chunk, VECSUM_CHUNK_SIZE);
-        if (ret < 0) {
-            ret = errno;
-            fprintf(stderr, "libhdfs_data_create_file: got error %d (%s) at "
-                    "offset %lld of %s\n", ret, strerror(ret),
-                    offset, opts->path);
-            goto done;
-        } else if (ret < VECSUM_CHUNK_SIZE) {
-            fprintf(stderr, "libhdfs_data_create_file: got short write "
-                    "of %d at offset %lld of %s\n", ret, offset, opts->path);
-            goto done;
-        }
-        offset += VECSUM_CHUNK_SIZE;
-    }
-    ret = 0;
-done:
-    free(chunk);
-    if (ldata->file) {
-        if (hdfsCloseFile(ldata->fs, ldata->file)) {
-            fprintf(stderr, "libhdfs_data_create_file: hdfsCloseFile error.");
-            ret = EIO;
-        }
-        ldata->file = NULL;
-    }
-    return ret;
-}
-
-static struct libhdfs_data *libhdfs_data_create(const struct options *opts)
-{
-    struct libhdfs_data *ldata = NULL;
-    struct hdfsBuilder *builder = NULL;
-    hdfsFileInfo *pinfo = NULL;
-
-    ldata = calloc(1, sizeof(struct libhdfs_data));
-    if (!ldata) {
-        fprintf(stderr, "Failed to allocate libhdfs test data.\n");
-        goto error;
-    }
-    builder = hdfsNewBuilder();
-    if (!builder) {
-        fprintf(stderr, "Failed to create builder.\n");
-        goto error;
-    }
-    hdfsBuilderSetNameNode(builder, opts->rpc_address);
-    hdfsBuilderConfSetStr(builder,
-        "dfs.client.read.shortcircuit.skip.checksum", "true");
-    ldata->fs = hdfsBuilderConnect(builder);
-    if (!ldata->fs) {
-        fprintf(stderr, "Could not connect to default namenode!\n");
-        goto error;
-    }
-    pinfo = hdfsGetPathInfo(ldata->fs, opts->path);
-    if (!pinfo) {
-        int err = errno;
-        fprintf(stderr, "hdfsGetPathInfo(%s) failed: error %d (%s).  "
-                "Attempting to re-create file.\n",
-            opts->path, err, strerror(err));
-        if (libhdfs_data_create_file(ldata, opts))
-            goto error;
-    } else if (pinfo->mSize != opts->length) {
-        fprintf(stderr, "hdfsGetPathInfo(%s) failed: length was %lld, "
-                "but we want length %lld.  Attempting to re-create file.\n",
-                opts->path, (long long)pinfo->mSize, (long long)opts->length);
-        if (libhdfs_data_create_file(ldata, opts))
-            goto error;
-    }
-    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_RDONLY, 0, 0, 0);
-    if (!ldata->file) {
-        int err = errno;
-        fprintf(stderr, "hdfsOpenFile(%s) failed: error %d (%s)\n",
-            opts->path, err, strerror(err));
-        goto error;
-    }
-    ldata->length = opts->length;
-    return ldata;
-
-error:
-    if (pinfo)
-        hdfsFreeFileInfo(pinfo, 1);
-    if (ldata)
-        libhdfs_data_free(ldata);
-    return NULL;
-}
-
-static int check_byte_size(int byte_size, const char *const str)
-{
-    if (byte_size % sizeof(double)) {
-        fprintf(stderr, "%s is not a multiple "
-            "of sizeof(double)\n", str);
-        return EINVAL;
-    }
-    if ((byte_size / sizeof(double)) % DOUBLES_PER_LOOP_ITER) {
-        fprintf(stderr, "The number of doubles contained in "
-            "%s is not a multiple of DOUBLES_PER_LOOP_ITER\n",
-            str);
-        return EINVAL;
-    }
-    return 0;
-}
-
-#ifdef HAVE_INTEL_SSE_INTRINSICS
-
-#include <emmintrin.h>
-
-static double vecsum(const double *buf, int num_doubles)
-{
-    int i;
-    double hi, lo;
-    __m128d x0, x1, x2, x3, x4, x5, x6, x7;
-    __m128d sum0 = _mm_set_pd(0.0,0.0);
-    __m128d sum1 = _mm_set_pd(0.0,0.0);
-    __m128d sum2 = _mm_set_pd(0.0,0.0);
-    __m128d sum3 = _mm_set_pd(0.0,0.0);
-    __m128d sum4 = _mm_set_pd(0.0,0.0);
-    __m128d sum5 = _mm_set_pd(0.0,0.0);
-    __m128d sum6 = _mm_set_pd(0.0,0.0);
-    __m128d sum7 = _mm_set_pd(0.0,0.0);
-    for (i = 0; i < num_doubles; i+=DOUBLES_PER_LOOP_ITER) {
-        x0 = _mm_load_pd(buf + i + 0);
-        x1 = _mm_load_pd(buf + i + 2);
-        x2 = _mm_load_pd(buf + i + 4);
-        x3 = _mm_load_pd(buf + i + 6);
-        x4 = _mm_load_pd(buf + i + 8);
-        x5 = _mm_load_pd(buf + i + 10);
-        x6 = _mm_load_pd(buf + i + 12);
-        x7 = _mm_load_pd(buf + i + 14);
-        sum0 = _mm_add_pd(sum0, x0);
-        sum1 = _mm_add_pd(sum1, x1);
-        sum2 = _mm_add_pd(sum2, x2);
-        sum3 = _mm_add_pd(sum3, x3);
-        sum4 = _mm_add_pd(sum4, x4);
-        sum5 = _mm_add_pd(sum5, x5);
-        sum6 = _mm_add_pd(sum6, x6);
-        sum7 = _mm_add_pd(sum7, x7);
-    }
-    x0 = _mm_add_pd(sum0, sum1);
-    x1 = _mm_add_pd(sum2, sum3);
-    x2 = _mm_add_pd(sum4, sum5);
-    x3 = _mm_add_pd(sum6, sum7);
-    x4 = _mm_add_pd(x0, x1);
-    x5 = _mm_add_pd(x2, x3);
-    x6 = _mm_add_pd(x4, x5);
-    _mm_storeh_pd(&hi, x6);
-    _mm_storel_pd(&lo, x6);
-    return hi + lo;
-}
-
-#else
-
-static double vecsum(const double *buf, int num_doubles)
-{
-    int i;
-    double sum = 0.0;
-    for (i = 0; i < num_doubles; i++) {
-        sum += buf[i];
-    }
-    return sum;
-}
-
-#endif
-
-static int vecsum_zcr_loop(int pass, struct libhdfs_data *ldata,
-        struct hadoopRzOptions *zopts,
-        const struct options *opts)
-{
-    int32_t len;
-    double sum = 0.0;
-    const double *buf;
-    struct hadoopRzBuffer *rzbuf = NULL;
-    int ret;
-
-    while (1) {
-        rzbuf = hadoopReadZero(ldata->file, zopts, ZCR_READ_CHUNK_SIZE);
-        if (!rzbuf) {
-            ret = errno;
-            fprintf(stderr, "hadoopReadZero failed with error "
-                "code %d (%s)\n", ret, strerror(ret));
-            goto done;
-        }
-        buf = hadoopRzBufferGet(rzbuf);
-        if (!buf) break;
-        len = hadoopRzBufferLength(rzbuf);
-        if (len < ZCR_READ_CHUNK_SIZE) {
-            fprintf(stderr, "hadoopReadZero got a partial read "
-                "of length %d\n", len);
-            ret = EINVAL;
-            goto done;
-        }
-        sum += vecsum(buf,
-            ZCR_READ_CHUNK_SIZE / sizeof(double));
-        hadoopRzBufferFree(ldata->file, rzbuf);
-    }
-    printf("finished zcr pass %d.  sum = %g\n", pass, sum);
-    ret = 0;
-
-done:
-    if (rzbuf)
-        hadoopRzBufferFree(ldata->file, rzbuf);
-    return ret;
-}
-
-static int vecsum_zcr(struct libhdfs_data *ldata,
-        const struct options *opts)
-{
-    int ret, pass;
-    struct hadoopRzOptions *zopts = NULL;
-
-    zopts = hadoopRzOptionsAlloc();
-    if (!zopts) {
-        fprintf(stderr, "hadoopRzOptionsAlloc failed.\n");
-        ret = ENOMEM;
-        goto done;
-    }
-    if (hadoopRzOptionsSetSkipChecksum(zopts, 1)) {
-        ret = errno;
-        perror("hadoopRzOptionsSetSkipChecksum failed: ");
-        goto done;
-    }
-    if (hadoopRzOptionsSetByteBufferPool(zopts, NULL)) {
-        ret = errno;
-        perror("hadoopRzOptionsSetByteBufferPool failed: ");
-        goto done;
-    }
-    for (pass = 0; pass < opts->passes; ++pass) {
-        ret = vecsum_zcr_loop(pass, ldata, zopts, opts);
-        if (ret) {
-            fprintf(stderr, "vecsum_zcr_loop pass %d failed "
-                "with error %d\n", pass, ret);
-            goto done;
-        }
-        hdfsSeek(ldata->fs, ldata->file, 0);
-    }
-    ret = 0;
-done:
-    if (zopts)
-        hadoopRzOptionsFree(zopts);
-    return ret;
-}
-
-tSize hdfsReadFully(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
-{
-    uint8_t *buf = buffer;
-    tSize ret, nread = 0;
-
-    while (length > 0) {
-        ret = hdfsRead(fs, f, buf, length);
-        if (ret < 0) {
-            if (errno != EINTR) {
-                return -1;
-            }
-        }
-        if (ret == 0) {
-            break;
-        }
-        nread += ret;
-        length -= ret;
-        buf += ret;
-    }
-    return nread;
-}
-
-static int vecsum_normal_loop(int pass, const struct libhdfs_data *ldata,
-            const struct options *opts)
-{
-    double sum = 0.0;
-
-    while (1) {
-        int res = hdfsReadFully(ldata->fs, ldata->file, ldata->buf,
-                NORMAL_READ_CHUNK_SIZE);
-        if (res == 0) // EOF
-            break;
-        if (res < 0) {
-            int err = errno;
-            fprintf(stderr, "hdfsRead failed with error %d (%s)\n",
-                err, strerror(err));
-            return err;
-        }
-        if (res < NORMAL_READ_CHUNK_SIZE) {
-            fprintf(stderr, "hdfsRead got a partial read of "
-                "length %d\n", res);
-            return EINVAL;
-        }
-        sum += vecsum(ldata->buf,
-                  NORMAL_READ_CHUNK_SIZE / sizeof(double));
-    }
-    printf("finished normal pass %d.  sum = %g\n", pass, sum);
-    return 0;
-}
-
-static int vecsum_libhdfs(struct libhdfs_data *ldata,
-            const struct options *opts)
-{
-    int pass;
-
-    ldata->buf = malloc(NORMAL_READ_CHUNK_SIZE);
-    if (!ldata->buf) {
-        fprintf(stderr, "failed to malloc buffer of size %d\n",
-            NORMAL_READ_CHUNK_SIZE);
-        return ENOMEM;
-    }
-    for (pass = 0; pass < opts->passes; ++pass) {
-        int ret = vecsum_normal_loop(pass, ldata, opts);
-        if (ret) {
-            fprintf(stderr, "vecsum_normal_loop pass %d failed "
-                "with error %d\n", pass, ret);
-            return ret;
-        }
-        hdfsSeek(ldata->fs, ldata->file, 0);
-    }
-    return 0;
-}
-
-static void vecsum_local(struct local_data *cdata, const struct options *opts)
-{
-    int pass;
-
-    for (pass = 0; pass < opts->passes; pass++) {
-        double sum = vecsum(cdata->mmap, cdata->length / sizeof(double));
-        printf("finished vecsum_local pass %d.  sum = %g\n", pass, sum);
-    }
-}
-
-static long long vecsum_length(const struct options *opts,
-                const struct libhdfs_data *ldata)
-{
-    if (opts->ty == VECSUM_LOCAL) {
-        struct stat st_buf = { 0 };
-        if (stat(opts->path, &st_buf)) {
-            int err = errno;
-            fprintf(stderr, "vecsum_length: stat(%s) failed: "
-                "error %d (%s)\n", opts->path, err, strerror(err));
-            return -EIO;
-        }
-        return st_buf.st_size;
-    } else {
-        return ldata->length;
-    }
-}
-
-/*
- * vecsum is a microbenchmark which measures the speed of various ways of
- * reading from HDFS.  It creates a file containing floating-point 'doubles',
- * and computes the sum of all the doubles several times.  For some CPUs,
- * assembly optimizations are used for the summation (SSE, etc).
- */
-int main(void)
-{
-    int ret = 1;
-    struct options *opts = NULL;
-    struct local_data *cdata = NULL;
-    struct libhdfs_data *ldata = NULL;
-    struct stopwatch *watch = NULL;
-
-    if (check_byte_size(VECSUM_CHUNK_SIZE, "VECSUM_CHUNK_SIZE") ||
-        check_byte_size(ZCR_READ_CHUNK_SIZE,
-                "ZCR_READ_CHUNK_SIZE") ||
-        check_byte_size(NORMAL_READ_CHUNK_SIZE,
-                "NORMAL_READ_CHUNK_SIZE")) {
-        goto done;
-    }
-    opts = options_create();
-    if (!opts)
-        goto done;
-    if (opts->ty == VECSUM_LOCAL) {
-        cdata = local_data_create(opts);
-        if (!cdata)
-            goto done;
-    } else {
-        ldata = libhdfs_data_create(opts);
-        if (!ldata)
-            goto done;
-    }
-    watch = stopwatch_create();
-    if (!watch)
-        goto done;
-    switch (opts->ty) {
-    case VECSUM_LOCAL:
-        vecsum_local(cdata, opts);
-        ret = 0;
-        break;
-    case VECSUM_LIBHDFS:
-        ret = vecsum_libhdfs(ldata, opts);
-        break;
-    case VECSUM_ZCR:
-        ret = vecsum_zcr(ldata, opts);
-        break;
-    }
-    if (ret) {
-        fprintf(stderr, "vecsum failed with error %d\n", ret);
-        goto done;
-    }
-    ret = 0;
-done:
-    fprintf(stderr, "cleaning up...\n");
-    if (watch && (ret == 0)) {
-        long long length = vecsum_length(opts, ldata);
-        if (length >= 0) {
-            stopwatch_stop(watch, length * opts->passes);
-        }
-    }
-    if (cdata)
-        local_data_free(cdata);
-    if (ldata)
-        libhdfs_data_free(ldata);
-    if (opts)
-        options_free(opts);
-    return ret;
-}
-
-// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
deleted file mode 100644
index 702430c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_libhdfs_threaded.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "hdfs.h"
-#include "native_mini_dfs.h"
-#include "os/thread.h"
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define TO_STR_HELPER(X) #X
-#define TO_STR(X) TO_STR_HELPER(X)
-
-#define TLH_MAX_THREADS 100
-
-#define TLH_DEFAULT_BLOCK_SIZE 134217728
-
-static struct NativeMiniDfsCluster* tlhCluster;
-
-struct tlhThreadInfo {
-    /** Thread index */
-    int threadIdx;
-    /** 0 = thread was successful; error code otherwise */
-    int success;
-    /** thread identifier */
-    thread theThread;
-};
-
-static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS 
*fs,
-                                     const char *username)
-{
-    int ret;
-    tPort port;
-    hdfsFS hdfs;
-    struct hdfsBuilder *bld;
-    
-    port = (tPort)nmdGetNameNodePort(cl);
-    if (port < 0) {
-        fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
-                "returned error %d\n", port);
-        return port;
-    }
-    bld = hdfsNewBuilder();
-    if (!bld)
-        return -ENOMEM;
-    hdfsBuilderSetForceNewInstance(bld);
-    hdfsBuilderSetNameNode(bld, "localhost");
-    hdfsBuilderSetNameNodePort(bld, port);
-    hdfsBuilderConfSetStr(bld, "dfs.block.size",
-                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
-    hdfsBuilderConfSetStr(bld, "dfs.blocksize",
-                          TO_STR(TLH_DEFAULT_BLOCK_SIZE));
-    if (username) {
-        hdfsBuilderSetUserName(bld, username);
-    }
-    hdfs = hdfsBuilderConnect(bld);
-    if (!hdfs) {
-        ret = -errno;
-        return ret;
-    }
-    *fs = hdfs;
-    return 0;
-}
-
-static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
-{
-    int64_t blockSize;
-    int ret;
-
-    blockSize = hdfsGetDefaultBlockSize(fs);
-    if (blockSize < 0) {
-        ret = errno;
-        fprintf(stderr, "hdfsGetDefaultBlockSize failed with error %d\n", ret);
-        return ret;
-    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
-        fprintf(stderr, "hdfsGetDefaultBlockSize got %"PRId64", but we "
-                "expected %d\n", blockSize, TLH_DEFAULT_BLOCK_SIZE);
-        return EIO;
-    }
-
-    blockSize = hdfsGetDefaultBlockSizeAtPath(fs, path);
-    if (blockSize < 0) {
-        ret = errno;
-        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) failed with "
-                "error %d\n", path, ret);
-        return ret;
-    } else if (blockSize != TLH_DEFAULT_BLOCK_SIZE) {
-        fprintf(stderr, "hdfsGetDefaultBlockSizeAtPath(%s) got "
-                "%"PRId64", but we expected %d\n", 
-                path, blockSize, TLH_DEFAULT_BLOCK_SIZE);
-        return EIO;
-    }
-    return 0;
-}
-
-struct tlhPaths {
-    char prefix[256];
-    char file1[256];
-    char file2[256];
-};
-
-static int setupPaths(const struct tlhThreadInfo *ti, struct tlhPaths *paths)
-{
-    memset(paths, 0, sizeof(*paths));
-    if (snprintf(paths->prefix, sizeof(paths->prefix), "/tlhData%04d",
-                 ti->threadIdx) >= sizeof(paths->prefix)) {
-        return ENAMETOOLONG;
-    }
-    if (snprintf(paths->file1, sizeof(paths->file1), "%s/file1",
-                 paths->prefix) >= sizeof(paths->file1)) {
-        return ENAMETOOLONG;
-    }
-    if (snprintf(paths->file2, sizeof(paths->file2), "%s/file2",
-                 paths->prefix) >= sizeof(paths->file2)) {
-        return ENAMETOOLONG;
-    }
-    return 0;
-}
-
-static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
-                                const struct tlhPaths *paths)
-{
-    char tmp[4096];
-    hdfsFile file;
-    int ret, expected, numEntries;
-    hdfsFileInfo *fileInfo;
-    struct hdfsReadStatistics *readStats = NULL;
-
-    if (hdfsExists(fs, paths->prefix) == 0) {
-        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
-    }
-    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));
-
-    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));
-
-    /* There should be no entry in the directory. */
-    errno = EACCES; // see if errno is set to 0 on success
-    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 
0);
-    if (numEntries != 0) {
-        fprintf(stderr, "hdfsListDirectory set numEntries to "
-                "%d on empty directory.", numEntries);
-    }
-
-    /* There should not be any file to open for reading. */
-    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));
-
-    /* hdfsOpenFile should not accept mode = 3 */
-    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));
-
-    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
-    EXPECT_NONNULL(file);
-
-    /* TODO: implement writeFully and use it here */
-    expected = (int)strlen(paths->prefix);
-    ret = hdfsWrite(fs, file, paths->prefix, expected);
-    if (ret < 0) {
-        ret = errno;
-        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
-        return ret;
-    }
-    if (ret != expected) {
-        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
-                "it wrote %d\n", ret, expected);
-        return EIO;
-    }
-    EXPECT_ZERO(hdfsFlush(fs, file));
-    EXPECT_ZERO(hdfsHSync(fs, file));
-    EXPECT_ZERO(hdfsCloseFile(fs, file));
-
-    /* There should be 1 entry in the directory. */
-    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
-    if (numEntries != 1) {
-        fprintf(stderr, "hdfsListDirectory set numEntries to "
-                "%d on directory containing 1 file.", numEntries);
-    }
-
-    /* Let's re-open the file for reading */
-    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
-    EXPECT_NONNULL(file);
-
-    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
-    errno = 0;
-    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
-    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
-    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
-    hdfsFileFreeReadStatistics(readStats);
-    /* TODO: implement readFully and use it here */
-    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
-    if (ret < 0) {
-        ret = errno;
-        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
-        return ret;
-    }
-    if (ret != expected) {
-        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
-                "it read %d\n", ret, expected);
-        return EIO;
-    }
-    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
-    errno = 0;
-    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
-    hdfsFileFreeReadStatistics(readStats);
-    EXPECT_ZERO(hdfsFileClearReadStatistics(file));
-    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
-    EXPECT_UINT64_EQ((uint64_t)0, readStats->totalBytesRead);
-    hdfsFileFreeReadStatistics(readStats);
-    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
-    EXPECT_ZERO(hdfsCloseFile(fs, file));
-
-    // TODO: Non-recursive delete should fail?
-    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
-    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));
-
-    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
-    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
-    fileInfo = hdfsGetPathInfo(fs, paths->file2);
-    EXPECT_NONNULL(fileInfo);
-    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
-    EXPECT_ZERO(hdfsFileIsEncrypted(fileInfo));
-    hdfsFreeFileInfo(fileInfo, 1);
-
-    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
-    fileInfo = hdfsGetPathInfo(fs, paths->file2);
-    EXPECT_NONNULL(fileInfo);
-    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
-    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
-    hdfsFreeFileInfo(fileInfo, 1);
-
-    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
-    fileInfo = hdfsGetPathInfo(fs, paths->file2);
-    EXPECT_NONNULL(fileInfo);
-    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
-    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
-    hdfsFreeFileInfo(fileInfo, 1);
-
-    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
-    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
-    return 0;
-}
-
-static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
-{
-    hdfsFS fs = NULL;
-    struct tlhPaths paths;
-
-    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
-        ti->threadIdx);
-    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
-    EXPECT_ZERO(setupPaths(ti, &paths));
-    // test some operations
-    EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
-    EXPECT_ZERO(hdfsDisconnect(fs));
-    // reconnect as user "foo" and verify that we get permission errors
-    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
-    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), 
EACCES);
-    EXPECT_ZERO(hdfsDisconnect(fs));
-    // reconnect to do the final delete.
-    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
-    EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
-    EXPECT_ZERO(hdfsDisconnect(fs));
-    return 0;
-}
-
-static void testHdfsOperations(void *v)
-{
-    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
-    int ret = testHdfsOperationsImpl(ti);
-    ti->success = ret;
-}
-
-static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
-{
-    int i, threadsFailed = 0;
-    const char *sep = "";
-
-    for (i = 0; i < tlhNumThreads; i++) {
-        if (ti[i].success != 0) {
-            threadsFailed = 1;
-        }
-    }
-    if (!threadsFailed) {
-        fprintf(stderr, "testLibHdfs: all threads succeeded.  SUCCESS.\n");
-        return EXIT_SUCCESS;
-    }
-    fprintf(stderr, "testLibHdfs: some threads failed: [");
-    for (i = 0; i < tlhNumThreads; i++) {
-        if (ti[i].success != 0) {
-            fprintf(stderr, "%s%d", sep, i);
-            sep = ", "; 
-        }
-    }
-    fprintf(stderr, "].  FAILURE.\n");
-    return EXIT_FAILURE;
-}
-
-/**
- * Test that we can write a file with libhdfs and then read it back
- */
-int main(void)
-{
-    int i, tlhNumThreads;
-    const char *tlhNumThreadsStr;
-    struct tlhThreadInfo ti[TLH_MAX_THREADS];
-    struct NativeMiniDfsConf conf = {
-        1, /* doFormat */
-    };
-
-    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
-    if (!tlhNumThreadsStr) {
-        tlhNumThreadsStr = "3";
-    }
-    tlhNumThreads = atoi(tlhNumThreadsStr);
-    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
-        fprintf(stderr, "testLibHdfs: must have a number of threads "
-                "between 1 and %d inclusive, not %d\n",
-                TLH_MAX_THREADS, tlhNumThreads);
-        return EXIT_FAILURE;
-    }
-    memset(&ti[0], 0, sizeof(ti));
-    for (i = 0; i < tlhNumThreads; i++) {
-        ti[i].threadIdx = i;
-    }
-
-    tlhCluster = nmdCreate(&conf);
-    EXPECT_NONNULL(tlhCluster);
-    EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
-
-    for (i = 0; i < tlhNumThreads; i++) {
-        ti[i].theThread.start = testHdfsOperations;
-        ti[i].theThread.arg = &ti[i];
-        EXPECT_ZERO(threadCreate(&ti[i].theThread));
-    }
-    for (i = 0; i < tlhNumThreads; i++) {
-        EXPECT_ZERO(threadJoin(&ti[i].theThread));
-    }
-
-    EXPECT_ZERO(nmdShutdown(tlhCluster));
-    nmdFree(tlhCluster);
-    return checkFailures(ti, tlhNumThreads);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
deleted file mode 100644
index 850b0fc..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test_native_mini_dfs.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "expect.h"
-#include "native_mini_dfs.h"
-
-#include <errno.h>
-
-static struct NativeMiniDfsConf conf = {
-    1, /* doFormat */
-};
-
-/**
- * Test that we can create a MiniDFSCluster and shut it down.
- */
-int main(void) {
-    struct NativeMiniDfsCluster* cl;
-    
-    cl = nmdCreate(&conf);
-    EXPECT_NONNULL(cl);
-    EXPECT_ZERO(nmdWaitClusterUp(cl));
-    EXPECT_ZERO(nmdShutdown(cl));
-    nmdFree(cl);
-
-    return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 858325f..f6cee76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1568,6 +1568,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9167. Update pom.xml in other modules to depend on hdfs-client instead
     of hdfs. (Mingliang Liu via wheat9)
 
+    HDFS-9253. Refactor tests of libhdfs into a directory. (wheat9)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

Reply via email to