Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c Fri Aug 8 16:29:07 2014 @@ -18,6 +18,7 @@ #include "hdfs.h" #include "hdfs_test.h" +#include "platform.h" #include <inttypes.h> #include <jni.h> @@ -28,12 +29,13 @@ #include <unistd.h> void permission_disp(short permissions, char *rtr) { - rtr[9] = '\0'; int i; + short permissionsId; + char* perm; + rtr[9] = '\0'; for(i=2;i>=0;i--) { - short permissionsId = permissions >> (i * 3) & (short)7; - char* perm; + permissionsId = permissions >> (i * 3) & (short)7; switch(permissionsId) { case 7: perm = "rwx"; break; @@ -60,35 +62,56 @@ void permission_disp(short permissions, } int main(int argc, char **argv) { - char buffer[32]; - tSize num_written_bytes; + const char *writePath = "/tmp/testfile.txt"; + const char *fileContents = "Hello, World!"; + const char *readPath = "/tmp/testfile.txt"; + const char *srcPath = "/tmp/testfile.txt"; + const char *dstPath = "/tmp/testfile2.txt"; + const char *slashTmp = "/tmp"; + const char *newDirectory = "/tmp/newdir"; + const char *newOwner = "root"; + const char *tuser = "nobody"; + const char *appendPath = "/tmp/appends"; + const char *userPath = "/tmp/usertestfile.txt"; + + char buffer[32], buffer2[256], rdbuffer[32]; + tSize num_written_bytes, num_read_bytes; + hdfsFS fs, lfs; + hdfsFile writeFile, readFile, localFile, appendFile, userFile; + tOffset currentPos, seekPos; + int exists, totalResult, result, numEntries, i, j; + const char *resp; + hdfsFileInfo *fileInfo, *fileList, *finfo; + char *buffer3; + char permissions[10]; + char ***hosts; + short newPerm = 0666; + tTime newMtime, newAtime; - hdfsFS fs = hdfsConnectNewInstance("default", 0); + fs = hdfsConnectNewInstance("default", 0); if(!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); exit(-1); } - hdfsFS lfs = hdfsConnectNewInstance(NULL, 0); + lfs = hdfsConnectNewInstance(NULL, 0); if(!lfs) { fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n"); exit(-1); } - const char* writePath = "/tmp/testfile.txt"; - const char* fileContents = "Hello, World!"; - { //Write tests - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); + writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); if(!writeFile) { fprintf(stderr, "Failed to open %s for writing!\n", writePath); exit(-1); } fprintf(stderr, "Opened %s for writing successfully...\n", writePath); num_written_bytes = - hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1); + hdfsWrite(fs, writeFile, (void*)fileContents, + (tSize)(strlen(fileContents)+1)); if (num_written_bytes != strlen(fileContents) + 1) { fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n", (int)(strlen(fileContents) + 1), (int)num_written_bytes); @@ -96,7 +119,7 @@ int main(int argc, char **argv) { } fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - tOffset currentPos = -1; + currentPos = -1; if ((currentPos = hdfsTell(fs, writeFile)) == -1) { fprintf(stderr, "Failed to get current file position correctly! Got %ld!\n", @@ -123,15 +146,14 @@ int main(int argc, char **argv) { { //Read tests - const char* readPath = "/tmp/testfile.txt"; - int exists = hdfsExists(fs, readPath); + exists = hdfsExists(fs, readPath); if (exists) { fprintf(stderr, "Failed to validate existence of %s\n", readPath); exit(-1); } - hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0); + readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0); if (!readFile) { fprintf(stderr, "Failed to open %s for reading!\n", readPath); exit(-1); @@ -146,13 +168,13 @@ int main(int argc, char **argv) { fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile)); - tOffset seekPos = 1; + seekPos = 1; if(hdfsSeek(fs, readFile, seekPos)) { fprintf(stderr, "Failed to seek %s for reading!\n", readPath); exit(-1); } - tOffset currentPos = -1; + currentPos = -1; if((currentPos = hdfsTell(fs, readFile)) != seekPos) { fprintf(stderr, "Failed to get current file position correctly! Got %ld!\n", @@ -175,7 +197,7 @@ int main(int argc, char **argv) { exit(-1); } memset(buffer, 0, sizeof(buffer)); - tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, + num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, sizeof(buffer)); if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) { fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n", @@ -208,14 +230,14 @@ int main(int argc, char **argv) { hdfsCloseFile(fs, readFile); // Test correct behaviour for unsupported filesystems - hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); + localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); if(!localFile) { fprintf(stderr, "Failed to open %s for writing!\n", writePath); exit(-1); } num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents, - strlen(fileContents) + 1); + (tSize)(strlen(fileContents) + 1)); hdfsCloseFile(lfs, localFile); localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0); @@ -229,50 +251,43 @@ int main(int argc, char **argv) { hdfsCloseFile(lfs, localFile); } - int totalResult = 0; - int result = 0; + totalResult = 0; + result = 0; { //Generic file-system operations - const char* srcPath = "/tmp/testfile.txt"; - const char* dstPath = "/tmp/testfile2.txt"; - - fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!")); totalResult += result; - const char* slashTmp = "/tmp"; - const char* newDirectory = "/tmp/newdir"; - fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!")); totalResult += result; - char buffer[256]; - const char *resp; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!")); totalResult += (resp ? 0 : 1); - fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!")); totalResult += (resp ? 0 : 1); fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs)); fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs)); fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs)); - hdfsFileInfo *fileInfo = NULL; + fileInfo = NULL; if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) { fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n"); fprintf(stderr, "Name: %s, ", fileInfo->mName); @@ -283,7 +298,6 @@ int main(int argc, char **argv) { fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); fprintf(stderr, "Owner: %s, ", fileInfo->mOwner); fprintf(stderr, "Group: %s, ", fileInfo->mGroup); - char permissions[10]; permission_disp(fileInfo->mPermissions, permissions); fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions); hdfsFreeFileInfo(fileInfo, 1); @@ -292,10 +306,8 @@ int main(int argc, char **argv) { fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp); } - hdfsFileInfo *fileList = 0; - int numEntries = 0; + fileList = 0; if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) { - int i = 0; for(i=0; i < numEntries; ++i) { fprintf(stderr, "Name: %s, ", fileList[i].mName); fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind); @@ -305,7 +317,6 @@ int main(int argc, char **argv) { fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod)); fprintf(stderr, "Owner: %s, ", fileList[i].mOwner); fprintf(stderr, "Group: %s, ", fileList[i].mGroup); - char permissions[10]; permission_disp(fileList[i].mPermissions, permissions); fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions); } @@ -319,12 +330,12 @@ int main(int argc, char **argv) { } } - char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1); + hosts = hdfsGetHosts(fs, srcPath, 0, 1); if(hosts) { fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n"); - int i=0; + i=0; while(hosts[i]) { - int j = 0; + j = 0; while(hosts[i][j]) { fprintf(stderr, "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]); @@ -337,131 +348,129 @@ int main(int argc, char **argv) { fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n"); } - char *newOwner = "root"; // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it - short newPerm = 0666; // chown write - fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!")); totalResult += result; // chmod write - fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!")); totalResult += result; sleep(2); - tTime newMtime = time(NULL); - tTime newAtime = time(NULL); + newMtime = time(NULL); + newAtime = time(NULL); // utime write - fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!")); totalResult += result; // chown/chmod/utime read - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); + finfo = hdfsGetPathInfo(fs, writePath); - fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!")); totalResult += result; // will later use /tmp/ as a different user so enable it - fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!")); totalResult += result; fprintf(stderr,"newMTime=%ld\n",newMtime); fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod); - fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!")); totalResult += result; // No easy way to turn on access times from hdfs_test right now - // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!")); + // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!")); // totalResult += result; hdfsFreeFileInfo(finfo, 1); // Clean up - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!")); + fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!")); + fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!")); totalResult += (result ? 0 : 1); } { // TEST APPENDS - const char *writePath = "/tmp/appends"; // CREATE - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0); - if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); + appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0); + if(!appendFile) { + fprintf(stderr, "Failed to open %s for writing!\n", appendPath); exit(-1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); + fprintf(stderr, "Opened %s for writing successfully...\n", appendPath); - char* buffer = "Hello,"; - tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)); + buffer3 = "Hello,"; + num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3, + (tSize)strlen(buffer3)); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); + if (hdfsFlush(fs, appendFile)) { + fprintf(stderr, "Failed to 'flush' %s\n", appendPath); exit(-1); } - fprintf(stderr, "Flushed %s successfully!\n", writePath); + fprintf(stderr, "Flushed %s successfully!\n", appendPath); - hdfsCloseFile(fs, writeFile); + hdfsCloseFile(fs, appendFile); // RE-OPEN - writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0); - if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); + appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0); + if(!appendFile) { + fprintf(stderr, "Failed to open %s for writing!\n", appendPath); exit(-1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); + fprintf(stderr, "Opened %s for writing successfully...\n", appendPath); - buffer = " World"; - num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1); + buffer3 = " World"; + num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3, + (tSize)(strlen(buffer3) + 1)); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); + if (hdfsFlush(fs, appendFile)) { + fprintf(stderr, "Failed to 'flush' %s\n", appendPath); exit(-1); } - fprintf(stderr, "Flushed %s successfully!\n", writePath); + fprintf(stderr, "Flushed %s successfully!\n", appendPath); - hdfsCloseFile(fs, writeFile); + hdfsCloseFile(fs, appendFile); // CHECK size - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); - fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!")); + finfo = hdfsGetPathInfo(fs, appendPath); + fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!")); totalResult += (result ? 0 : 1); // READ and check data - hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0); + readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0); if (!readFile) { - fprintf(stderr, "Failed to open %s for reading!\n", writePath); + fprintf(stderr, "Failed to open %s for reading!\n", appendPath); exit(-1); } - char rdbuffer[32]; - tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer)); + num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer)); fprintf(stderr, "Read following %d bytes:\n%s\n", num_read_bytes, rdbuffer); - fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!"); + fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!")); hdfsCloseFile(fs, readFile); @@ -478,36 +487,33 @@ int main(int argc, char **argv) { // the actual fs user capabilities. Thus just create a file and read // the owner is correct. - const char *tuser = "nobody"; - const char* writePath = "/tmp/usertestfile.txt"; - fs = hdfsConnectAsUserNewInstance("default", 0, tuser); if(!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser); exit(-1); } - hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0); - if(!writeFile) { - fprintf(stderr, "Failed to open %s for writing!\n", writePath); + userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0); + if(!userFile) { + fprintf(stderr, "Failed to open %s for writing!\n", userPath); exit(-1); } - fprintf(stderr, "Opened %s for writing successfully...\n", writePath); + fprintf(stderr, "Opened %s for writing successfully...\n", userPath); - char* buffer = "Hello, World!"; - tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1); + num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents, + (tSize)(strlen(fileContents)+1)); fprintf(stderr, "Wrote %d bytes\n", num_written_bytes); - if (hdfsFlush(fs, writeFile)) { - fprintf(stderr, "Failed to 'flush' %s\n", writePath); + if (hdfsFlush(fs, userFile)) { + fprintf(stderr, "Failed to 'flush' %s\n", userPath); exit(-1); } - fprintf(stderr, "Flushed %s successfully!\n", writePath); + fprintf(stderr, "Flushed %s successfully!\n", userPath); - hdfsCloseFile(fs, writeFile); + hdfsCloseFile(fs, userFile); - hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath); - fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!")); + finfo = hdfsGetPathInfo(fs, userPath); + fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!")); totalResult += result; }
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c Fri Aug 8 16:29:07 2014 @@ -22,35 +22,38 @@ #include <stdlib.h> int main(int argc, char **argv) { + hdfsFS fs; + const char *rfile = argv[1]; + tSize bufferSize = strtoul(argv[3], NULL, 10); + hdfsFile readFile; + char* buffer; + tSize curSize; if (argc != 4) { fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n"); exit(-1); } - hdfsFS fs = hdfsConnect("default", 0); + fs = hdfsConnect("default", 0); if (!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); exit(-1); } - - const char* rfile = argv[1]; - tSize bufferSize = strtoul(argv[3], NULL, 10); - - hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0); + + readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0); if (!readFile) { fprintf(stderr, "Failed to open %s for writing!\n", rfile); exit(-2); } // data to be written to the file - char* buffer = malloc(sizeof(char) * bufferSize); + buffer = malloc(sizeof(char) * bufferSize); if(buffer == NULL) { return -2; } // read from the file - tSize curSize = bufferSize; + curSize = bufferSize; for (; curSize == bufferSize;) { curSize = hdfsRead(fs, readFile, (void*)buffer, curSize); } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c Fri Aug 8 16:29:07 2014 @@ -21,23 +21,31 @@ #include <limits.h> #include <stdio.h> #include <stdlib.h> +#include <sys/types.h> int main(int argc, char **argv) { + hdfsFS fs; + const char *writeFileName = argv[1]; + off_t fileTotalSize = strtoul(argv[2], NULL, 10); + long long tmpBufferSize = strtoul(argv[3], NULL, 10); + tSize bufferSize; + hdfsFile writeFile; + char* buffer; + int i; + off_t nrRemaining; + tSize curSize; + tSize written; if (argc != 4) { fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n"); exit(-1); } - hdfsFS fs = hdfsConnect("default", 0); + fs = hdfsConnect("default", 0); if (!fs) { fprintf(stderr, "Oops! Failed to connect to hdfs!\n"); exit(-1); } - - const char* writeFileName = argv[1]; - off_t fileTotalSize = strtoul(argv[2], NULL, 10); - long long tmpBufferSize = strtoul(argv[3], NULL, 10); // sanity check if(fileTotalSize == ULONG_MAX && errno == ERANGE) { @@ -51,30 +59,27 @@ int main(int argc, char **argv) { exit(-3); } - tSize bufferSize = tmpBufferSize; + bufferSize = (tSize)tmpBufferSize; - hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0); + writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0); if (!writeFile) { fprintf(stderr, "Failed to open %s for writing!\n", writeFileName); exit(-2); } // data to be written to the file - char* buffer = malloc(sizeof(char) * bufferSize); + buffer = malloc(sizeof(char) * bufferSize); if(buffer == NULL) { fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize); return -2; } - int i = 0; for (i=0; i < bufferSize; ++i) { buffer[i] = 'a' + (i%26); } // write to the file - off_t nrRemaining; for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) { - tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; - tSize written; + curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) { fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written); exit(-3); Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c Fri Aug 8 16:29:07 2014 @@ -19,12 +19,12 @@ #include "expect.h" #include "hdfs.h" #include "native_mini_dfs.h" +#include "platform.h" #include <errno.h> #include <inttypes.h> -#include <semaphore.h> -#include <pthread.h> #include <unistd.h> +#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int exit(1); } for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) { - buf[i] = blockIdx + (i % 17); + buf[i] = (uint8_t)(blockIdx + (i % 17)); } return buf; } @@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int block } } -static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused)); - -static void printBuf(const uint8_t *buf, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - fprintf(stderr, "%02x", buf[i]); - } - fprintf(stderr, "\n"); -} - static int doTestZeroCopyReads(hdfsFS fs, const char *fileName) { hdfsFile file = NULL; @@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs EXPECT_NONNULL(block); EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN)); hadoopRzBufferFree(file, buffer); - EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, - hdfsTell(fs, file)); + EXPECT_INT64_EQ( + (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN, + hdfsTell(fs, file)); EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, @@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs free(block); block = getZeroCopyBlockData(2); EXPECT_NONNULL(block); - EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) + + EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) + (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN)); hadoopRzBufferFree(file, buffer); @@ -219,8 +208,10 @@ int main(void) { int port; struct NativeMiniDfsConf conf = { - .doFormat = 1, - .configureShortCircuit = 1, + 1, /* doFormat */ + 0, /* webhdfsEnabled */ + 0, /* namenodeHttpPort */ + 1, /* configureShortCircuit */ }; char testFileName[TEST_FILE_NAME_LENGTH]; hdfsFS fs; Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Fri Aug 8 16:29:07 2014 @@ -19,11 +19,11 @@ #include "expect.h" #include "hdfs.h" #include "native_mini_dfs.h" +#include "os/thread.h" #include <errno.h> #include <inttypes.h> -#include <semaphore.h> -#include <pthread.h> +#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -35,8 +35,6 @@ #define TLH_DEFAULT_BLOCK_SIZE 134217728 -static sem_t tlhSem; - static struct NativeMiniDfsCluster* tlhCluster; struct tlhThreadInfo { @@ -44,18 +42,19 @@ struct tlhThreadInfo { int threadIdx; /** 0 = thread was successful; error code otherwise */ int success; - /** pthread identifier */ - pthread_t thread; + /** thread identifier */ + thread theThread; }; static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs, const char *username) { - int ret, port; + int ret; + tPort port; hdfsFS hdfs; struct hdfsBuilder *bld; - port = nmdGetNameNodePort(cl); + port = (tPort)nmdGetNameNodePort(cl); if (port < 0) { fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort " "returned error %d\n", port); @@ -164,7 +163,7 @@ static int doTestHdfsOperations(struct t EXPECT_NONNULL(file); /* TODO: implement writeFully and use it here */ - expected = strlen(paths->prefix); + expected = (int)strlen(paths->prefix); ret = hdfsWrite(fs, file, paths->prefix, expected); if (ret < 0) { ret = errno; @@ -186,9 +185,9 @@ static int doTestHdfsOperations(struct t EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats)); errno = 0; - EXPECT_ZERO(readStats->totalBytesRead); - EXPECT_ZERO(readStats->totalLocalBytesRead); - EXPECT_ZERO(readStats->totalShortCircuitBytesRead); + EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead); + EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead); + EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead); hdfsFileFreeReadStatistics(readStats); /* TODO: implement readFully and use it here */ ret = hdfsRead(fs, file, tmp, sizeof(tmp)); @@ -204,7 +203,7 @@ static int doTestHdfsOperations(struct t } EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats)); errno = 0; - EXPECT_INT_EQ(expected, readStats->totalBytesRead); + EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead); hdfsFileFreeReadStatistics(readStats); EXPECT_ZERO(memcmp(paths->prefix, tmp, expected)); EXPECT_ZERO(hdfsCloseFile(fs, file)); @@ -262,12 +261,11 @@ static int testHdfsOperationsImpl(struct return 0; } -static void *testHdfsOperations(void *v) +static void testHdfsOperations(void *v) { struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v; int ret = testHdfsOperationsImpl(ti); ti->success = ret; - return NULL; } static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads) @@ -304,7 +302,7 @@ int main(void) const char *tlhNumThreadsStr; struct tlhThreadInfo ti[TLH_MAX_THREADS]; struct NativeMiniDfsConf conf = { - .doFormat = 1, + 1, /* doFormat */ }; tlhNumThreadsStr = getenv("TLH_NUM_THREADS"); @@ -323,21 +321,20 @@ int main(void) ti[i].threadIdx = i; } - EXPECT_ZERO(sem_init(&tlhSem, 0, tlhNumThreads)); tlhCluster = nmdCreate(&conf); EXPECT_NONNULL(tlhCluster); EXPECT_ZERO(nmdWaitClusterUp(tlhCluster)); for (i = 0; i < tlhNumThreads; i++) { - EXPECT_ZERO(pthread_create(&ti[i].thread, NULL, - testHdfsOperations, &ti[i])); + ti[i].theThread.start = testHdfsOperations; + ti[i].theThread.arg = &ti[i]; + EXPECT_ZERO(threadCreate(&ti[i].theThread)); } for (i = 0; i < tlhNumThreads; i++) { - EXPECT_ZERO(pthread_join(ti[i].thread, NULL)); + EXPECT_ZERO(threadJoin(&ti[i].theThread)); } EXPECT_ZERO(nmdShutdown(tlhCluster)); nmdFree(tlhCluster); - EXPECT_ZERO(sem_destroy(&tlhSem)); return checkFailures(ti, tlhNumThreads); } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c?rev=1616817&r1=1616816&r2=1616817&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_native_mini_dfs.c Fri Aug 8 16:29:07 2014 @@ -22,7 +22,7 @@ #include <errno.h> static struct NativeMiniDfsConf conf = { - .doFormat = 1, + 1, /* doFormat */ }; /**