Github user linwen commented on a diff in the pull request:

    https://github.com/apache/incubator-hawq/pull/1273#discussion_r130778902
  
    --- Diff: depends/libhdfs3/test/function/TestCInterface.cpp ---
    @@ -369,29 +448,154 @@ TEST(TestCInterfaceTDE, 
TestAppendWithTDELargeFiles_Success) {
             if (NULL == (out = hdfsOpenFile(fs, tdefile, O_WRONLY | O_APPEND, 
0, 0, 1024))) {
                 break;
             }
    -        Hdfs::FillBuffer(&buffer[0], buffer.size(), 1024);
    -        buffer.push_back(0);
    +        Hdfs::FillBuffer(&buffer[0], 128 * 3, 1024);
             while (todo > 0) {
    -            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], todo))) {
    +            if (0 > (rc = hdfsWrite(fs, out, &buffer[offset], 128))) {
                     break;
                 }
                 todo -= rc;
                 offset += rc;
             }
             rc = hdfsCloseFile(fs, out);
         } while (0);
    +
    +    //Read buffer from tdefile with hadoop API.
    +    FILE *file = popen("hadoop fs -cat /TDEAppend3/testfile", "r");
    +    char bufGets[128];
    +    while (fgets(bufGets, sizeof(bufGets), file)) {
    +    }
    +    pclose(file);
    +    //Check the buffer's md5 value is eaqual to the tdefile's md5 value.
         system("rm -rf ./testfile");
    -    system("hadoop fs -get /TDE/testfile ./");
    -    diff_file2buffer("testfile", &buffer[0]);
    +    system("hadoop fs -get /TDEAppend3/testfile ./");
    +    char resultFile[33] = { 0 };
    --- End diff --
    
    Since 33 is the size of MD5(32 characters) + 1, so I suggest define a const 
variable or macro for it. So that we can avoid use such strange number in this 
source file. 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to