http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/CMakeLists.txt 
b/depends/libhdfs3/mock/CMakeLists.txt
new file mode 100644
index 0000000..2d01f84
--- /dev/null
+++ b/depends/libhdfs3/mock/CMakeLists.txt
@@ -0,0 +1,7 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+
+AUTO_SOURCES(files "*.cpp" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
+LIST(APPEND libhdfs3_MOCK_SOURCES ${files})
+
+SET(libhdfs3_MOCK_SOURCES ${libhdfs3_MOCK_SOURCES} PARENT_SCOPE)
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockBufferedSocketReader.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockBufferedSocketReader.h 
b/depends/libhdfs3/mock/MockBufferedSocketReader.h
new file mode 100644
index 0000000..eca29d9
--- /dev/null
+++ b/depends/libhdfs3/mock/MockBufferedSocketReader.h
@@ -0,0 +1,49 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_
+
+#include "gmock/gmock.h"
+#include "network/BufferedSocketReader.h"
+
+namespace Hdfs {
+namespace Mock {
+
+class MockBufferedSocketReader: public Hdfs::Internal::BufferedSocketReader {
+public:
+       MOCK_METHOD2(read, int32_t(char * b, int32_t s));
+       MOCK_METHOD3(readFully, void(char * b, int32_t s, int timeout));
+       MOCK_METHOD1(readBigEndianInt32, int32_t(int timeout));
+       MOCK_METHOD1(readVarint32, int32_t(int timeout));
+       MOCK_METHOD1(poll, bool(int timeout));
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKBUFFEREDSOCKETREADER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockDatanode.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockDatanode.h 
b/depends/libhdfs3/mock/MockDatanode.h
new file mode 100644
index 0000000..4acbefb
--- /dev/null
+++ b/depends/libhdfs3/mock/MockDatanode.h
@@ -0,0 +1,50 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_
+
+#include "gmock/gmock.h"
+#include "server/Datanode.h"
+
+using namespace Hdfs::Internal;
+namespace Hdfs {
+
+namespace Mock {
+
+class MockDatanode: public Datanode {
+public:
+       MOCK_METHOD1(getReplicaVisibleLength, int64_t (const 
Hdfs::Internal::ExtendedBlock & b));
+       MOCK_METHOD3(getBlockLocalPathInfo, void (const 
Hdfs::Internal::ExtendedBlock & block,
+                        const Hdfs::Internal::Token & token, 
Hdfs::Internal::BlockLocalPathInfo & info));
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKDATANODE_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockFileSystemInter.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockFileSystemInter.h 
b/depends/libhdfs3/mock/MockFileSystemInter.h
new file mode 100644
index 0000000..b9543b7
--- /dev/null
+++ b/depends/libhdfs3/mock/MockFileSystemInter.h
@@ -0,0 +1,112 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKFILESYSTEMINTER_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKFILESYSTEMINTER_H_
+
+#include "gmock/gmock.h"
+
+#include "client/DirectoryIterator.h"
+#include "client/FileStatus.h"
+#include "client/FileSystem.h"
+#include "client/FileSystemInter.h"
+#include "client/FileSystemKey.h"
+#include "client/FileSystemStats.h"
+#include "client/Permission.h"
+#include "client/UserInfo.h"
+#include "Memory.h"
+#include "server/DatanodeInfo.h"
+#include "server/ExtendedBlock.h"
+#include "server/LocatedBlock.h"
+#include "server/LocatedBlocks.h"
+#include "SessionConfig.h"
+
+#include <string>
+#include <vector>
+
+class MockFileSystemInter: public Hdfs::Internal::FileSystemInter {
+public:
+  MOCK_METHOD0(connect, void());
+  MOCK_METHOD0(disconnect, void());
+  MOCK_METHOD1(getStandardPath, const std::string(const char * path));
+  MOCK_METHOD0(getClientName, const char *());
+  MOCK_CONST_METHOD0(getDefaultReplication, int());
+  MOCK_CONST_METHOD0(getDefaultBlockSize, int64_t());
+  MOCK_CONST_METHOD0(getHomeDirectory, std::string());
+  MOCK_METHOD2(deletePath, bool(const char * path, bool recursive));
+  MOCK_METHOD2(mkdir, bool(const char * path, const Hdfs::Permission & 
permission));
+  MOCK_METHOD2(mkdirs, bool(const char * path, const Hdfs::Permission & 
permission));
+  MOCK_METHOD1(getFileStatus, Hdfs::FileStatus(const char * path));
+  MOCK_METHOD3(setOwner, void(const char * path, const char * username, const 
char * groupname));
+  MOCK_METHOD3(setTimes, void(const char * path, int64_t mtime, int64_t 
atime));
+  MOCK_METHOD2(setPermission, void(const char * path, const Hdfs::Permission 
&));
+  MOCK_METHOD2(setReplication, bool(const char * path, short replication));
+  MOCK_METHOD2(rename, bool(const char * src, const char * dst));
+  MOCK_METHOD1(setWorkingDirectory, void(const char * path));
+  MOCK_CONST_METHOD0(getWorkingDirectory, std::string());
+  MOCK_METHOD1(exist, bool(const char * path));
+  MOCK_METHOD0(getFsStats, Hdfs::FileSystemStats());
+  MOCK_METHOD2(truncate, bool(const char * src, int64_t size));
+  MOCK_METHOD1(getDelegationToken, std::string(const char * renewer));
+  MOCK_METHOD0(getDelegationToken, std::string());
+  MOCK_METHOD1(renewDelegationToken, int64_t(const std::string & token));
+  MOCK_METHOD1(cancelDelegationToken, void(const std::string & token));
+  MOCK_METHOD6(create, void(const std::string & src, const Hdfs::Permission & 
masked, int flag, bool createParent, short replication, int64_t blockSize));
+  MOCK_METHOD1(append, 
std::pair<Hdfs::Internal::shared_ptr<Hdfs::Internal::LocatedBlock>,
+               Hdfs::Internal::shared_ptr<Hdfs::FileStatus> >(const 
std::string & src));
+  MOCK_METHOD2(abandonBlock, void(const Hdfs::Internal::ExtendedBlock & b, 
const std::string & srcr));
+  MOCK_METHOD3(addBlock, 
Hdfs::Internal::shared_ptr<Hdfs::Internal::LocatedBlock>(const std::string & 
src,
+          const Hdfs::Internal::ExtendedBlock * previous,
+          const std::vector<Hdfs::Internal::DatanodeInfo> & excludeNodes));
+  MOCK_METHOD6(getAdditionalDatanode, 
Hdfs::Internal::shared_ptr<Hdfs::Internal::LocatedBlock> (const std::string & 
src,
+          const Hdfs::Internal::ExtendedBlock & blk,
+          const std::vector<Hdfs::Internal::DatanodeInfo> & existings,
+          const std::vector<std::string> & storageIDs,
+          const std::vector<Hdfs::Internal::DatanodeInfo> & excludes, int 
numAdditionalNodes));
+  MOCK_METHOD2(complete, bool(const std::string & src,
+          const Hdfs::Internal::ExtendedBlock * last));
+  MOCK_METHOD1(reportBadBlocks, void(const 
std::vector<Hdfs::Internal::LocatedBlock> & blocks));
+  MOCK_METHOD1(fsync, void(const std::string & src));
+  MOCK_METHOD1(updateBlockForPipeline, 
Hdfs::Internal::shared_ptr<Hdfs::Internal::LocatedBlock>(const 
Hdfs::Internal::ExtendedBlock & block));
+  MOCK_METHOD4(updatePipeline, void(const Hdfs::Internal::ExtendedBlock & 
oldBlock,
+          const Hdfs::Internal::ExtendedBlock & newBlock,
+          const std::vector<Hdfs::Internal::DatanodeInfo> & newNodes,
+          const std::vector<std::string> & storageIDs));
+  MOCK_CONST_METHOD0(getConf, const Hdfs::Internal::SessionConfig &());
+  MOCK_CONST_METHOD0(getUserInfo, const Hdfs::Internal::UserInfo &());
+  MOCK_METHOD4(getBlockLocations, void(const std::string & src, int64_t 
offset, int64_t length, Hdfs::Internal::LocatedBlocks & lbs));
+  MOCK_METHOD4(getListing, bool(const std::string & src, const std::string & , 
bool needLocation, std::vector<Hdfs::FileStatus> &));
+  MOCK_METHOD2(listDirectory, Hdfs::DirectoryIterator(const char *, bool));
+  MOCK_METHOD0(renewLease, bool());
+  MOCK_METHOD0(registerOpenedOutputStream, void());
+  MOCK_METHOD0(unregisterOpenedOutputStream, bool());
+  MOCK_METHOD3(getFileBlockLocations, std::vector<Hdfs::BlockLocation> (const 
char * path, int64_t start, int64_t len));
+  MOCK_METHOD2(listAllDirectoryItems, std::vector<Hdfs::FileStatus> (const 
char * path, bool needLocation));
+  MOCK_METHOD0(getPeerCache, Hdfs::Internal::PeerCache &());
+};
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockLeaseRenewer.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockLeaseRenewer.h 
b/depends/libhdfs3/mock/MockLeaseRenewer.h
new file mode 100644
index 0000000..c6669e8
--- /dev/null
+++ b/depends/libhdfs3/mock/MockLeaseRenewer.h
@@ -0,0 +1,62 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_
+
+#include "client/LeaseRenewer.h"
+#include "gmock/gmock.h"
+
+namespace Hdfs {
+namespace Mock {
+
+class MockLeaseRenewer: public Hdfs::Internal::LeaseRenewer {
+public:
+       MOCK_METHOD1(StartRenew, void(shared_ptr<FileSystemInter>));
+       MOCK_METHOD1(StopRenew, void(shared_ptr<FileSystemInter>));
+};
+
+static inline shared_ptr<LeaseRenewer> MakeMockLeaseRenewer() {
+       Hdfs::Internal::LeaseRenewer::GetLeaseRenewer();
+       shared_ptr<LeaseRenewer> old = Hdfs::Internal::LeaseRenewer::renewer;
+       Hdfs::Internal::LeaseRenewer::renewer = shared_ptr<LeaseRenewer>(new 
MockLeaseRenewer);
+       return old;
+}
+
+static inline MockLeaseRenewer & GetMockLeaseRenewer(){
+       assert(Hdfs::Internal::LeaseRenewer::renewer);
+       return static_cast<MockLeaseRenewer &> 
(*Hdfs::Internal::LeaseRenewer::renewer);
+}
+
+static inline void ResetMockLeaseRenewer(shared_ptr<LeaseRenewer> old){
+       Hdfs::Internal::LeaseRenewer::renewer = old;
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKLEASERENEWER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockNamenode.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockNamenode.h 
b/depends/libhdfs3/mock/MockNamenode.h
new file mode 100644
index 0000000..94cfece
--- /dev/null
+++ b/depends/libhdfs3/mock/MockNamenode.h
@@ -0,0 +1,116 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKNAMENODE_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKNAMENODE_H_
+
+#include "gmock/gmock.h"
+#include "server/Namenode.h"
+#include "server/LocatedBlocks.h"
+#include "server/LocatedBlock.h"
+#include "server/ExtendedBlock.h"
+#include "server/DatanodeInfo.h"
+#include "client/FileStatus.h"
+
+using namespace Hdfs::Internal;
+namespace Hdfs {
+
+namespace Mock {
+
+class MockNamenode: public Namenode {
+public:
+    MOCK_METHOD4(getBlockLocations, void(const std::string & src, int64_t 
offset,
+                            int64_t length, LocatedBlocks & lbs));
+    MOCK_METHOD7(create, void(const std::string & src, const Permission & 
masked,
+          const std::string & clientName, int flag, bool createParent,
+          short replication, int64_t blockSize));
+    MOCK_METHOD2(append, std::pair<shared_ptr<LocatedBlock>,
+                 shared_ptr<FileStatus> >(const std::string & src, const 
std::string & clientName));
+    MOCK_METHOD2(setReplication, bool(const std::string & src, short 
replication));
+    MOCK_METHOD2(setPermission, void(const std::string & src,
+          const Permission & permission));
+    MOCK_METHOD3(setOwner, void(const std::string & src, const std::string & 
username, const std::string & groupname));
+    MOCK_METHOD3(abandonBlock, void(const ExtendedBlock & b, const std::string 
& src,
+          const std::string & holder));
+    MOCK_METHOD4(addBlock, shared_ptr<LocatedBlock>(const std::string & src,
+          const std::string & clientName, const ExtendedBlock * previous,
+          const std::vector<DatanodeInfo> & excludeNodes));
+    MOCK_METHOD7(getAdditionalDatanode, shared_ptr<LocatedBlock>(const 
std::string & src,
+             const ExtendedBlock & blk,
+             const std::vector<DatanodeInfo> & existings,
+             const std::vector<std::string> & storageIDs,
+             const std::vector<DatanodeInfo> & excludes, int 
numAdditionalNodes,
+             const std::string & clientName));
+    MOCK_METHOD3(complete, bool(const std::string & src,
+                        const std::string & clientName, const ExtendedBlock * 
last));
+    MOCK_METHOD1(reportBadBlocks, void(const std::vector<LocatedBlock> & 
blocks));
+    MOCK_METHOD2(concat, void(const std::string & trg,
+                       const std::vector<std::string> & srcs));
+    MOCK_METHOD3(truncate, bool(const std::string & src, int64_t size,
+                        const std::string & clientName));
+    MOCK_METHOD2(getLease, void(const std::string & src,
+                        const std::string & clientName)) ;
+    MOCK_METHOD2(releaseLease, void(const std::string & src,
+                          const std::string & clientName));
+    MOCK_METHOD2(deleteFile, bool(const std::string & src, bool recursive));
+    MOCK_METHOD3(mkdirs, bool(const std::string & src, const Permission & 
masked,
+                     bool createParent)) ;
+    MOCK_METHOD1(renewLease, void(const std::string & clientName));
+    MOCK_METHOD2(recoverLease, bool(const std::string & src,
+                           const std::string & clientName));
+    MOCK_METHOD0(getFsStats, std::vector<int64_t>());
+    MOCK_METHOD1(metaSave, void(
+          const std::string & filename));
+    MOCK_METHOD2(getFileInfo, FileStatus(const std::string & src, bool 
*exist));
+    MOCK_METHOD1(getFileLinkInfo, FileStatus(const std::string & src));
+    MOCK_METHOD3(setQuota, void(const std::string & path, int64_t 
namespaceQuota,
+                        int64_t diskspaceQuota));
+    MOCK_METHOD2(fsync, void(const std::string & src, const std::string & 
client));
+    MOCK_METHOD3(setTimes, void(const std::string & src, int64_t mtime, 
int64_t atime));
+    MOCK_METHOD4(createSymlink, void(const std::string & target,
+                             const std::string & link, const Permission & 
dirPerm,
+                             bool createParent));
+    MOCK_METHOD1(getLinkTarget, std::string(const std::string & path));
+    MOCK_METHOD2(updateBlockForPipeline, shared_ptr<LocatedBlock>(const 
ExtendedBlock & block,
+              const std::string & clientName));
+    MOCK_METHOD5(updatePipeline, void(const std::string & clientName,
+                              const ExtendedBlock & oldBlock, const 
ExtendedBlock & newBlock,
+                              const std::vector<DatanodeInfo> & newNodes,
+                              const std::vector<std::string> & storageIDs));
+    MOCK_METHOD4(getListing, bool(const std::string & src,
+                           const std::string & startAfter, bool needLocation,
+                           std::vector<FileStatus> & dl));
+    MOCK_METHOD2(rename, bool(const std::string & src, const std::string & 
dst));
+    MOCK_METHOD1(getDelegationToken, Token(const std::string & renewer) );
+    MOCK_METHOD1(renewDelegationToken, int64_t(const Token & token));
+    MOCK_METHOD1(cancelDelegationToken, void(const Token & token));
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKNAMENODE_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockOperationCanceledCallback.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockOperationCanceledCallback.h 
b/depends/libhdfs3/mock/MockOperationCanceledCallback.h
new file mode 100644
index 0000000..b3bdef4
--- /dev/null
+++ b/depends/libhdfs3/mock/MockOperationCanceledCallback.h
@@ -0,0 +1,54 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_
+
+#include "gmock/gmock.h"
+
+namespace Hdfs {
+namespace Mock {
+
+class MockCancelObject {
+public:
+       bool canceled() {
+               return check();
+       }
+       virtual bool check() = 0;
+       virtual ~MockCancelObject() {
+       }
+};
+
+class MockOperationCanceledCallback: public MockCancelObject {
+public:
+       MOCK_METHOD0(check, bool());
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKOPERATIONCANCELEDCALLBACK_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockPipeline.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockPipeline.h 
b/depends/libhdfs3/mock/MockPipeline.h
new file mode 100644
index 0000000..ccd31c5
--- /dev/null
+++ b/depends/libhdfs3/mock/MockPipeline.h
@@ -0,0 +1,53 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_
+
+#include "gmock/gmock.h"
+#include "client/Packet.h"
+#include "server/ExtendedBlock.h"
+#include "client/FileSystem.h"
+
+using namespace Hdfs::Internal;
+namespace Hdfs {
+
+namespace Mock {
+
+class MockPipeline: public Pipeline {
+public:
+    MOCK_METHOD0(flush, void());
+    MOCK_METHOD1(close, shared_ptr<LocatedBlock> (shared_ptr<Packet> 
lastPacket));
+    MOCK_METHOD1(send, void (shared_ptr<Packet> packet));
+    MOCK_METHOD1(setFilesystem, void (FileSystemInter * fs));
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKPIPELINE_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockRpcChannel.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockRpcChannel.h 
b/depends/libhdfs3/mock/MockRpcChannel.h
new file mode 100644
index 0000000..26eb567
--- /dev/null
+++ b/depends/libhdfs3/mock/MockRpcChannel.h
@@ -0,0 +1,49 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_
+
+#include "gmock/gmock.h"
+#include "rpc/RpcChannel.h"
+
+namespace Hdfs {
+namespace Mock {
+
+class MockRpcChannel: public Hdfs::Internal::RpcChannel {
+public:
+       MOCK_METHOD0(close, void());
+       MOCK_METHOD1(invoke, void(const Hdfs::Internal::RpcCall &));
+       MOCK_METHOD0(checkIdle, bool());
+       MOCK_METHOD0(waitForExit, void());
+       MOCK_METHOD0(addRef, void());
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCCHANNEL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockRpcClient.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockRpcClient.h 
b/depends/libhdfs3/mock/MockRpcClient.h
new file mode 100644
index 0000000..adbd834
--- /dev/null
+++ b/depends/libhdfs3/mock/MockRpcClient.h
@@ -0,0 +1,54 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_
+
+#include "gmock/gmock.h"
+#include "rpc/RpcClient.h"
+
+using namespace Hdfs::Internal;
+namespace Hdfs {
+namespace Mock {
+
+class MockRpcClient: public RpcClient {
+public:
+       MOCK_METHOD0(isRunning, bool());
+
+       MOCK_METHOD4(getChannel, RpcChannel & (const RpcAuth &,
+                                       const RpcProtocolInfo &, const 
RpcServerInfo &,
+                                       const RpcConfig &));
+
+       MOCK_CONST_METHOD0(getClientId, std::string());
+
+       MOCK_METHOD0(getCallId, int32_t());
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCCLIENT_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockRpcRemoteCall.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockRpcRemoteCall.h 
b/depends/libhdfs3/mock/MockRpcRemoteCall.h
new file mode 100644
index 0000000..7d75272
--- /dev/null
+++ b/depends/libhdfs3/mock/MockRpcRemoteCall.h
@@ -0,0 +1,52 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_
+
+#include "gmock/gmock.h"
+#include "rpc/RpcRemoteCall.h"
+
+using namespace Hdfs::Internal;
+namespace Hdfs {
+namespace Mock {
+
+class MockRpcRemoteCall: public RpcRemoteCall {
+public:
+       MockRpcRemoteCall(const RpcCall & c, int32_t id, const std::string & 
clientId) :
+                       RpcRemoteCall(c, id, clientId) {
+       }
+
+       MOCK_METHOD2(serialize, void(const RpcProtocolInfo&, WriteBuffer &));
+       MOCK_METHOD1(cancel, void(exception_ptr ));
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKRPCREMOTECALL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockSockCall.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockSockCall.h 
b/depends/libhdfs3/mock/MockSockCall.h
new file mode 100644
index 0000000..514fe16
--- /dev/null
+++ b/depends/libhdfs3/mock/MockSockCall.h
@@ -0,0 +1,65 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_
+
+#include "gmock/gmock.h"
+
+#include "MockSystem.h"
+
+namespace Hdfs {
+namespace Mock {
+
+class MockSockSysCall: public MockSockSysCallInterface {
+public:
+    MOCK_METHOD4(recv , ssize_t (int sock, void * buffer, size_t size, int 
flag));
+    MOCK_METHOD4(send , ssize_t (int sock, const void * buffer, size_t size,
+                    int flag));
+    MOCK_METHOD3(recvmsg , ssize_t (int socket, struct msghdr *message, int 
flags));
+    MOCK_METHOD4(getaddrinfo , int (const char * __restrict host,
+                    const char * __restrict port,
+                    const struct addrinfo * __restrict hint,
+                    struct addrinfo ** __restrict addr));
+    MOCK_METHOD1(freeaddrinfo , void (struct addrinfo * addr));
+    MOCK_METHOD3(socket , int (int family, int type, int protocol));
+    MOCK_METHOD3(connect , int (int sock, const struct sockaddr * addr,
+                    socklen_t len));
+    MOCK_METHOD3(getpeername , int (int sock, struct sockaddr * __restrict 
peer,
+                    socklen_t * __restrict len));
+    MOCK_METHOD3(fcntl , int (int sock, int flag, int value));
+    MOCK_METHOD5(setsockopt , int (int sock, int level, int optname, const 
void *optval,
+                    socklen_t optlen));
+    MOCK_METHOD3(poll , int (struct pollfd * pfd, nfds_t size, int timeout));
+    MOCK_METHOD2(shutdown , int (int sock, int how));
+    MOCK_METHOD1(close , int (int sock));
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKCALL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockSocket.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockSocket.h 
b/depends/libhdfs3/mock/MockSocket.h
new file mode 100644
index 0000000..59ef54e
--- /dev/null
+++ b/depends/libhdfs3/mock/MockSocket.h
@@ -0,0 +1,66 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_
+
+#include "gmock/gmock.h"
+
+#include "network/Socket.h"
+
+class MockSocket: public Hdfs::Internal::Socket {
+public:
+
+       MOCK_METHOD2(read, int32_t(char * buffer, int32_t size));
+
+       MOCK_METHOD3(readFully, void(char * buffer, int32_t size, int timeout));
+
+       MOCK_METHOD2(write, int32_t(const char * buffer, int32_t size));
+
+       MOCK_METHOD3(writeFully, void(const char * buffer, int32_t size, int 
timeout));
+
+       MOCK_METHOD3(connect, void(const char * host, int port, int timeout));
+
+       MOCK_METHOD3(connect, void(const char * host, const char * port, int 
timeout));
+
+       MOCK_METHOD4(connect, void(struct addrinfo * paddr, const char * host, 
const char * port,
+                                       int timeout));
+
+       MOCK_METHOD3(poll, bool(bool read, bool write, int timeout));
+
+       MOCK_METHOD1(setBlockMode, void(bool enable));
+
+       MOCK_METHOD1(setNoDelay, void(bool enable));
+
+       MOCK_METHOD1(setLingerTimeout, void(int timeout));
+
+       MOCK_METHOD0(disableSigPipe, void());
+
+       MOCK_METHOD0(close, void());
+};
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockSystem.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockSystem.cpp 
b/depends/libhdfs3/mock/MockSystem.cpp
new file mode 100644
index 0000000..80a2594
--- /dev/null
+++ b/depends/libhdfs3/mock/MockSystem.cpp
@@ -0,0 +1,148 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cassert>
+#include <climits>
+#include <errno.h>
+#include <poll.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <netdb.h>
+
+#include <string.h>
+
+#include "MockSystem.h"
+
+namespace MockSystem {
+
+Hdfs::Mock::MockSockSysCallInterface * MockSockSysCallObj = NULL;
+
+ssize_t recv(int sock, void * buffer, size_t size, int flag) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->recv(sock, buffer, size, flag);
+    }
+    return ::recv(sock, buffer, size, flag);
+}
+
+ssize_t send(int sock, const void * buffer, size_t size, int flag) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->send(sock, buffer, size, flag);
+    }
+    return ::send(sock, buffer, size, flag);
+}
+
+ssize_t recvmsg(int socket, struct msghdr *message, int flags) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->recvmsg(socket, message, flags);
+    }
+    return ::recvmsg(socket, message, flags);
+}
+
+int getaddrinfo(const char * __restrict host, const char * __restrict port,
+        const struct addrinfo * __restrict hint,
+        struct addrinfo ** __restrict addr) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->getaddrinfo(host, port, hint, addr);
+    }
+    return ::getaddrinfo(host, port, hint, addr);
+}
+
+void freeaddrinfo(struct addrinfo * addr) {
+    if (MockSockSysCallObj) {
+        MockSockSysCallObj->freeaddrinfo(addr);
+    } else {
+        ::freeaddrinfo(addr);
+    }
+}
+
+int socket(int family, int type, int protocol) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->socket(family, type, protocol);
+    }
+    return ::socket(family, type, protocol);
+}
+
+int connect(int sock, const struct sockaddr * addr, socklen_t len) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->connect(sock, addr, len);
+    }
+    return ::connect(sock, addr, len);
+}
+
+int getpeername(int sock, struct sockaddr * __restrict peer,
+        socklen_t * __restrict len) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->getpeername(sock, peer, len);
+    }
+    return ::getpeername(sock, peer, len);
+}
+
+int fcntl(int sock, int flag, int value) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->fcntl(sock, flag, value);
+    }
+    return ::fcntl(sock, flag, value);
+}
+
+int setsockopt(int sock, int level, int optname, const void *optval,
+        socklen_t optlen) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->setsockopt(sock, level, optname, optval,
+                optlen);
+    }
+    return ::setsockopt(sock, level, optname, optval, optlen);
+}
+
+int poll(struct pollfd * pfd, nfds_t size, int timeout) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->poll(pfd, size, timeout);
+    }
+    return ::poll(pfd, size, timeout);
+}
+
+int shutdown(int sock, int how) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->shutdown(sock, how);
+    }
+    return ::shutdown(sock, how);
+}
+
+int close(int sock) {
+    if (MockSockSysCallObj) {
+        return MockSockSysCallObj->close(sock);
+    }
+    return ::close(sock);
+}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/MockSystem.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockSystem.h 
b/depends/libhdfs3/mock/MockSystem.h
new file mode 100644
index 0000000..ce550f9
--- /dev/null
+++ b/depends/libhdfs3/mock/MockSystem.h
@@ -0,0 +1,112 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_MOCKSYSTEM_H_
+#define _HDFS_LIBHDFS3_MOCK_MOCKSYSTEM_H_
+
+#include <poll.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <netdb.h>
+
+namespace Hdfs {
+namespace Mock {
+
+class MockSockSysCallInterface {
+public:
+    virtual ~MockSockSysCallInterface() {
+    }
+
+    virtual ssize_t recv(int sock, void * buffer, size_t size, int flag) = 0;
+    virtual ssize_t send(int sock, const void * buffer, size_t size,
+            int flag) = 0;
+  virtual ssize_t recvmsg(int socket, struct msghdr *message, int flags) = 0;
+    virtual int getaddrinfo(const char * __restrict host,
+            const char * __restrict port,
+            const struct addrinfo * __restrict hint,
+            struct addrinfo ** __restrict addr) = 0;
+    virtual void freeaddrinfo(struct addrinfo * addr) = 0;
+    virtual int socket(int family, int type, int protocol) = 0;
+    virtual int connect(int sock, const struct sockaddr * addr,
+            socklen_t len) = 0;
+    virtual int getpeername(int sock, struct sockaddr * __restrict peer,
+            socklen_t * __restrict len) = 0;
+    virtual int fcntl(int sock, int flag, int value) = 0;
+    virtual int setsockopt(int sock, int level, int optname, const void 
*optval,
+            socklen_t optlen) = 0;
+    virtual int poll(struct pollfd * pfd, nfds_t size, int timeout) = 0;
+    virtual int shutdown(int sock, int how) = 0;
+    virtual int close(int sock) = 0;
+};
+
+}
+}
+
+namespace MockSystem {
+
+extern Hdfs::Mock::MockSockSysCallInterface * MockSockSysCallObj;
+
+ssize_t recv(int sock, void * buffer, size_t size, int flag);
+
+ssize_t send(int sock, const void * buffer, size_t size, int flag);
+
+ssize_t recvmsg(int socket, struct msghdr *message, int flags);
+
+int getaddrinfo(const char * __restrict host, const char * __restrict port,
+        const struct addrinfo * __restrict hint,
+        struct addrinfo ** __restrict addr);
+
+void freeaddrinfo(struct addrinfo * addr);
+
+int socket(int family, int type, int protocol);
+
+int connect(int sock, const struct sockaddr * addr, socklen_t len);
+
+int getpeername(int sock, struct sockaddr * __restrict peer,
+        socklen_t * __restrict len);
+
+int fcntl(int sock, int flag, int value);
+
+int setsockopt(int sock, int level, int optname, const void *optval,
+        socklen_t optlen);
+
+int poll(struct pollfd * pfd, nfds_t size, int timeout);
+
+int shutdown(int sock, int how);
+
+int close(int sock);
+
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_MOCKSYSTEM_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/NamenodeStub.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/NamenodeStub.h 
b/depends/libhdfs3/mock/NamenodeStub.h
new file mode 100644
index 0000000..50cdde4
--- /dev/null
+++ b/depends/libhdfs3/mock/NamenodeStub.h
@@ -0,0 +1,55 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_
+#define _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_
+
+#include "MockNamenode.h"
+
+#include <memory>
+
+using namespace Hdfs;
+using namespace Internal;
+
+namespace Hdfs {
+
+namespace Mock {
+
+class NamenodeStub {
+public:
+
+    virtual ~NamenodeStub() {
+    }
+
+    virtual MockNamenode  * getNamenode() = 0;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_NAMENODESTUB_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/PipelineStub.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/PipelineStub.h 
b/depends/libhdfs3/mock/PipelineStub.h
new file mode 100644
index 0000000..2648eeb
--- /dev/null
+++ b/depends/libhdfs3/mock/PipelineStub.h
@@ -0,0 +1,55 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_
+#define _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_
+
+#include "MockPipeline.h"
+
+#include <memory>
+
+using namespace Hdfs;
+using namespace Internal;
+
+namespace Hdfs {
+
+namespace Mock {
+
+class PipelineStub {
+public:
+
+    virtual ~PipelineStub() {
+    }
+
+    virtual shared_ptr<MockPipeline>  getPipeline() = 0;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_PIPELINESTUB_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/TestDatanodeStub.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/TestDatanodeStub.h 
b/depends/libhdfs3/mock/TestDatanodeStub.h
new file mode 100644
index 0000000..88d75c1
--- /dev/null
+++ b/depends/libhdfs3/mock/TestDatanodeStub.h
@@ -0,0 +1,48 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_
+#define _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_
+
+#include "MockDatanode.h"
+
+namespace Hdfs {
+
+namespace Mock {
+
+class TestDatanodeStub {
+public:
+    virtual ~TestDatanodeStub() {
+    }
+
+    virtual shared_ptr<MockDatanode> getDatanode() = 0;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_TESTDATANODESTUB_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/TestRpcChannelStub.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/TestRpcChannelStub.h 
b/depends/libhdfs3/mock/TestRpcChannelStub.h
new file mode 100644
index 0000000..bf1fc6e
--- /dev/null
+++ b/depends/libhdfs3/mock/TestRpcChannelStub.h
@@ -0,0 +1,56 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_
+#define _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_
+
+#include "rpc/RpcChannel.h"
+
+namespace Hdfs {
+namespace Internal {
+
+class RpcClient;
+
+}
+
+namespace Mock {
+
+class TestRpcChannelStub {
+public:
+       virtual ~TestRpcChannelStub() {
+       }
+
+       virtual Hdfs::Internal::RpcChannel* getChannel(
+                       Hdfs::Internal::RpcChannelKey key,
+                       Hdfs::Internal::RpcClient & c) = 0;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_MOCK_TESTRPCCHANNELSTUB_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/mock/TestUtil.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/TestUtil.h b/depends/libhdfs3/mock/TestUtil.h
new file mode 100644
index 0000000..4372847
--- /dev/null
+++ b/depends/libhdfs3/mock/TestUtil.h
@@ -0,0 +1,100 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 - 
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_MOCK_TESTUTIL_H_
+#define _HDFS_LIBHDFS3_MOCK_TESTUTIL_H_
+
+#include <cstddef>
+#include <cassert>
+
+#include "Logger.h"
+
+namespace Hdfs {
+
+/**
+ * Fill the buffer with "012345678\n".
+ * @param buffer The buffer to be filled.
+ * @param size The size of the buffer.
+ * @param offset Start offset of the content to be filled.
+ */
+static inline void FillBuffer(char * buffer, size_t size, size_t offset) {
+       int64_t todo = size;
+
+       char c;
+       while (todo-- > 0) {
+               c = offset++ % 10;
+               c = c < 9 ? c + '0' : '\n';
+               *buffer++ = c;
+       }
+}
+
+/**
+ * Check the content of buffer if it is filled with "012345678\n"
+ * @param buffer The buffer to be checked.
+ * @param size The size of buffer.
+ * @param offset Start offset of the content in buffer.
+ * @return Return true if the content of buffer is filled with expected data.
+ */
+static inline bool CheckBuffer(const char * buffer, size_t size,
+               size_t offset) {
+       int64_t todo = size;
+
+       char c;
+       while (todo-- > 0) {
+               c = offset++ % 10;
+               c = c < 9 ? c + '0' : '\n';
+               if (*buffer++ != c) {
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+static inline const char * GetEnv(const char * key, const char * defaultValue) 
{
+       const char * retval = getenv(key);
+       if (retval && strlen(retval) > 0) {
+               return retval;
+       }
+       return defaultValue;
+}
+
+}
+
+#define DebugException(function) \
+    try { \
+        function ; \
+    } catch (const Hdfs::HdfsException & e) { \
+        std::string buffer; \
+        LOG(LOG_ERROR, "DEBUG:\n%s", Hdfs::Internal::GetExceptionDetail(e, 
buffer)); \
+        throw; \
+    } catch (const std::exception & e) { \
+        LOG(LOG_ERROR, "DEBUG:\n%s", e.what()); \
+        throw; \
+    }
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/rpms/.gitignore
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/rpms/.gitignore b/depends/libhdfs3/rpms/.gitignore
new file mode 100644
index 0000000..3561700
--- /dev/null
+++ b/depends/libhdfs3/rpms/.gitignore
@@ -0,0 +1,7 @@
+BUILD
+BUILDROOT
+RPMS
+SOURCES
+SPECS
+SRPMS
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/rpms/build.sh
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/rpms/build.sh b/depends/libhdfs3/rpms/build.sh
new file mode 100755
index 0000000..4dd2faa
--- /dev/null
+++ b/depends/libhdfs3/rpms/build.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+this_dir=`cd "\`dirname \"$0\"\`";pwd`
+top_dir=${this_dir}/../
+
+die() {
+    echo "$@" 1>&2 ; popd 2>/dev/null; exit 1
+}
+
+install_depends() {
+    yum install -y epel-release || die "cannot install epel"
+    yum install -y \
+        which make rpmdevtools gcc-c++ cmake boost-devel libxml2-devel 
libuuid-devel krb5-devel libgsasl-devel \
+        protobuf-devel || die "cannot install dependencies"
+}
+
+build_with_boost() {
+    pushd ${top_dir}
+    rm -rf build && mkdir -p build && cd build || die "cannot create build 
directory"
+    ../bootstrap --enable-boost || die "bootstrap failed"
+    make -j2 unittest || die "failed to run unit tests"
+    popd
+}
+
+build_with_debug() {
+    pushd ${top_dir}
+    rm -rf build && mkdir -p build && cd build || die "cannot create build 
directory"
+    ../bootstrap --enable-debug || die "bootstrap failed"
+    make -j2 unittest || die "failed to run unit tests"
+    popd
+}
+
+create_package() {
+       pushd ${top_dir}
+    rm -rf build && mkdir -p build && cd build || die "cannot create build 
directory"
+    ../bootstrap || die "bootstrap failed"
+       make rpm-package || die "failed to create debian package"
+       popd
+}
+
+deploy() {
+    pushd ${top_dir}
+
+    version=$(cat ${top_dir}/rpms/BUILD/version)
+
+    if [ -z "${version}" ]; then
+        die "cannot get version"
+    fi
+    
+    if [ -z "${BINTRAY_KEY}" ]; then
+        die "bintray api key not set"
+    fi
+    
+    message=`curl -H "X-Bintray-Publish: 1" -H "X-Bintray-Override: 1" -T 
${top_dir}/rpms/RPMS/x86_64/libhdfs3-${version}-1.el7.centos.x86_64.rpm 
-uwangzw:${BINTRAY_KEY} \
+      
https://api.bintray.com/content/wangzw/rpm/libhdfs3/${version}/centos7/x86_64/libhdfs3-${version}-1.el7.centos.x86_64.rpm`
+    
+    if [ -z `echo ${message} | grep "success"` ]; then
+        echo ${message}
+        die "failed to upload libhdfs3-${version}-1.el7.centos.x86_64.rpm"
+    fi
+    
+    message=`curl -H "X-Bintray-Publish: 1" -H "X-Bintray-Override: 1" -T 
${top_dir}/rpms/RPMS/x86_64/libhdfs3-devel-${version}-1.el7.centos.x86_64.rpm 
-uwangzw:${BINTRAY_KEY} \
+      
https://api.bintray.com/content/wangzw/rpm/libhdfs3/${version}/centos7/x86_64/libhdfs3-devel-${version}-1.el7.centos.x86_64.rpm`
+    
+    if [ -z `echo ${message} | grep "success"` ]; then
+        echo ${message}
+        die "failed to upload 
libhdfs3-devel-${version}-1.el7.centos.x86_64.rpm"
+    fi
+
+    popd
+}
+
+run() {
+    install_depends || die "failed to install dependencies"
+    build_with_boost || die "build failed with boost"
+    build_with_debug || die "build failed with debug mode"
+    create_package || die "failed to create debian package"
+    
+    version=$(cat ${top_dir}/rpms/BUILD/version)
+    echo "version ${version}"
+
+    if [ -z "${BRANCH}" ]; then
+        echo "skip deploy since environment variable BRANCH is not set"
+        return
+    fi
+
+    if [ "${BRANCH}" = "v${version}" ]; then
+        echo "deploy libhdfs3 version ${version}"
+        deploy || die "failed to deploy libhdfs3 rpms"
+    else
+        echo "skip deploy for branch ${BRANCH}"
+    fi
+}
+
+"$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/rpms/libhdfs3.spec
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/rpms/libhdfs3.spec 
b/depends/libhdfs3/rpms/libhdfs3.spec
new file mode 100644
index 0000000..c4ccd8e
--- /dev/null
+++ b/depends/libhdfs3/rpms/libhdfs3.spec
@@ -0,0 +1,77 @@
+%define name libhdfs3
+%define release 1%{?dist}
+
+Name: %{name}
+Version: %{version}
+Release: %{release}
+Summary: Native C/C++ HDFS Client.
+Group: Development/Libraries
+Source0: libhdfs3-%{version}.tar.gz
+
+License: Apache-2.0
+BuildRoot:  %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
+
+BuildRequires: gcc-c++
+BuildRequires: make
+BuildRequires: cmake
+BuildRequires: libuuid-devel
+BuildRequires: libxml2-devel
+BuildRequires: krb5-devel
+BuildRequires: libgsasl-devel
+BuildRequires: protobuf-devel
+
+%description
+Libhdfs3, designed as an alternative implementation of libhdfs,
+is implemented based on native Hadoop RPC protocol and
+HDFS data transfer protocol.
+It gets rid of the drawbacks of JNI, and it has a lightweight,
+small memory footprint code base. In addition, it is easy to use and deploy.
+.
+Libhdfs3 is developed by Pivotal and used in HAWQ, which is a massive parallel
+database engine in Pivotal Hadoop Distribution.
+
+%package devel
+Summary: Native C/C++ HDFS Client - development files
+Requires: %{name} = %{version}-%{release}
+Group: Development/Libraries
+Requires: libhdfs3 = %{version}-%{release}
+Requires: libuuid-devel libxml2-devel krb5-devel libgsasl-devel protobuf-devel 
pkgconfig
+
+%description devel
+Libhdfs3, designed as an alternative implementation of libhdfs,
+is implemented based on native Hadoop RPC protocol and
+HDFS data transfer protocol.
+It gets rid of the drawbacks of JNI, and it has a lightweight,
+small memory footprint code base. In addition, it is easy to use and deploy.
+
+%build
+%{_sourcedir}/../../bootstrap --prefix=${RPM_BUILD_ROOT}/usr
+%{__make}
+
+%install
+%{__rm} -rf $RPM_BUILD_ROOT
+%{__make} install
+
+%clean
+%{__rm} -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+%{_prefix}/lib/lib*.so.*
+
+%files devel
+%defattr(-,root,root,-)
+%{_prefix}/lib/lib*.so
+%{_prefix}/lib/*.a
+%{_prefix}/lib/pkgconfig/*
+%{_prefix}/include/*
+
+%post
+/sbin/ldconfig
+
+%postun
+/sbin/ldconfig
+
+%changelog
+* Sun Oct 04 2015 Zhanwei Wang <wan...@wangzw.org> - 2.2.30-1
+- Initial RPM release

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/.gitignore
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/.gitignore b/depends/libhdfs3/src/.gitignore
new file mode 100644
index 0000000..fd8a241
--- /dev/null
+++ b/depends/libhdfs3/src/.gitignore
@@ -0,0 +1,2 @@
+libhdfs3.pc
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/CMakeLists.txt 
b/depends/libhdfs3/src/CMakeLists.txt
new file mode 100644
index 0000000..bc20d08
--- /dev/null
+++ b/depends/libhdfs3/src/CMakeLists.txt
@@ -0,0 +1,127 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+
+SET(libhdfs3_VERSION_MAJOR 2)
+SET(libhdfs3_VERSION_MINOR 2)
+SET(libhdfs3_VERSION_PATCH 31)
+SET(libhdfs3_VERSION_STRING 
"${libhdfs3_VERSION_MAJOR}.${libhdfs3_VERSION_MINOR}.${libhdfs3_VERSION_PATCH}")
+SET(libhdfs3_VERSION_API 1)
+SET(libhdfs3_ROOT_SOURCES_DIR ${CMAKE_SOURCE_DIR}/src)
+SET(libhdfs3_COMMON_SOURCES_DIR ${libhdfs3_ROOT_SOURCES_DIR}/common)
+
+IF(ENABLE_DEBUG)
+    SET(libhdfs3_VERSION_STRING "${libhdfs3_VERSION_STRING}d")
+ENDIF(ENABLE_DEBUG)
+
+CONFIGURE_FILE(platform.h.in platform.h)
+CONFIGURE_FILE(doxyfile.in doxyfile)
+
+AUTO_SOURCES(files "*.cpp" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
+LIST(APPEND libhdfs3_SOURCES ${files})
+
+AUTO_SOURCES(files "*.cc" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
+LIST(APPEND libhdfs3_SOURCES ${files})
+
+AUTO_SOURCES(files "*.c" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
+LIST(APPEND libhdfs3_SOURCES ${files})
+
+AUTO_SOURCES(files "*.h" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
+LIST(APPEND libhdfs3_SOURCES ${files})
+
+AUTO_SOURCES(libhdfs3_PROTO_FILES "proto/*.proto" "RECURSE" 
"${CMAKE_CURRENT_SOURCE_DIR}")
+SET(libhdfs3_PROTO_FILES ${libhdfs3_PROTO_FILES} PARENT_SCOPE)
+
+PROTOBUF_GENERATE_CPP(libhdfs3_PROTO_SOURCES libhdfs3_PROTO_HEADERS 
${libhdfs3_PROTO_FILES})
+
+SET(HEADER 
+    client/BlockLocation.h
+    client/DirectoryIterator.h
+    client/FileStatus.h
+    client/FileSystem.h
+    client/FileSystemStats.h
+    client/hdfs.h
+    client/InputStream.h
+    client/OutputStream.h
+    client/Permission.h
+    common/Exception.h
+    common/XmlConfig.h)
+
+ADD_LIBRARY(libhdfs3-static STATIC ${libhdfs3_SOURCES} 
${libhdfs3_PROTO_SOURCES} ${libhdfs3_PROTO_HEADERS})
+ADD_LIBRARY(libhdfs3-shared SHARED ${libhdfs3_SOURCES} 
${libhdfs3_PROTO_SOURCES} ${libhdfs3_PROTO_HEADERS})
+
+ADD_CUSTOM_COMMAND(
+    TARGET libhdfs3-shared libhdfs3-static
+    PRE_BUILD
+    COMMAND echo ${libhdfs3_VERSION_STRING} > version
+    WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+)
+
+TARGET_LINK_LIBRARIES(libhdfs3-static pthread)
+TARGET_LINK_LIBRARIES(libhdfs3-shared pthread)
+       
+IF(NEED_BOOST)
+    INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
+    TARGET_LINK_LIBRARIES(libhdfs3-static boost_thread)
+    TARGET_LINK_LIBRARIES(libhdfs3-static boost_chrono)
+    TARGET_LINK_LIBRARIES(libhdfs3-static boost_system)
+    TARGET_LINK_LIBRARIES(libhdfs3-static boost_atomic)
+    TARGET_LINK_LIBRARIES(libhdfs3-static boost_iostreams)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared boost_thread)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared boost_chrono)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared boost_system)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared boost_atomic)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared boost_iostreams)
+ENDIF(NEED_BOOST)
+
+IF(NEED_GCCEH)
+    TARGET_LINK_LIBRARIES(libhdfs3-static gcc_eh)
+    TARGET_LINK_LIBRARIES(libhdfs3-shared gcc_eh)
+ENDIF(NEED_GCCEH)
+
+IF(OS_LINUX)
+    TARGET_LINK_LIBRARIES(libhdfs3-static ${LIBUUID_LIBRARIES})
+    TARGET_LINK_LIBRARIES(libhdfs3-shared ${LIBUUID_LIBRARIES})
+    INCLUDE_DIRECTORIES(${LIBUUID_INCLUDE_DIRS})
+ENDIF(OS_LINUX)
+
+INCLUDE_DIRECTORIES(${libhdfs3_ROOT_SOURCES_DIR})
+INCLUDE_DIRECTORIES(${libhdfs3_COMMON_SOURCES_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
+INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIRS})
+INCLUDE_DIRECTORIES(${LIBXML2_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${KERBEROS_INCLUDE_DIRS})
+INCLUDE_DIRECTORIES(${GSASL_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/mock)
+
+TARGET_LINK_LIBRARIES(libhdfs3-static ${PROTOBUF_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-static ${LIBXML2_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-static ${KERBEROS_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-static ${GSASL_LIBRARIES})
+
+TARGET_LINK_LIBRARIES(libhdfs3-shared ${PROTOBUF_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-shared ${LIBXML2_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-shared ${KERBEROS_LIBRARIES})
+TARGET_LINK_LIBRARIES(libhdfs3-shared ${GSASL_LIBRARIES})
+
+SET_TARGET_PROPERTIES(libhdfs3-static PROPERTIES OUTPUT_NAME "hdfs3")
+SET_TARGET_PROPERTIES(libhdfs3-shared PROPERTIES OUTPUT_NAME "hdfs3")
+
+IF(NEED_BOOST)
+    SET_TARGET_PROPERTIES(libhdfs3-shared libhdfs3-static PROPERTIES 
LINK_FLAGS "-L${Boost_LIBRARY_DIRS}")
+ENDIF(NEED_BOOST)
+
+SET_TARGET_PROPERTIES(libhdfs3-shared PROPERTIES 
+    VERSION 
${libhdfs3_VERSION_MAJOR}.${libhdfs3_VERSION_MINOR}.${libhdfs3_VERSION_PATCH} 
+    SOVERSION ${libhdfs3_VERSION_API})
+
+INSTALL(TARGETS libhdfs3-static libhdfs3-shared
+        RUNTIME DESTINATION bin
+        LIBRARY DESTINATION lib
+        ARCHIVE DESTINATION lib)
+INSTALL(FILES ${HEADER} DESTINATION include/hdfs)
+INSTALL(FILES libhdfs3.pc DESTINATION lib/pkgconfig)
+            
+SET(libhdfs3_SOURCES ${libhdfs3_SOURCES} PARENT_SCOPE)
+SET(libhdfs3_PLATFORM_HEADER_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)
+SET(libhdfs3_ROOT_SOURCES_DIR ${libhdfs3_ROOT_SOURCES_DIR} PARENT_SCOPE)
+SET(libhdfs3_COMMON_SOURCES_DIR ${libhdfs3_COMMON_SOURCES_DIR} PARENT_SCOPE)
+SET(libhdfs3_VERSION_STRING ${libhdfs3_VERSION_STRING} PARENT_SCOPE)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/BlockLocation.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/BlockLocation.h 
b/depends/libhdfs3/src/client/BlockLocation.h
new file mode 100644
index 0000000..e699d83
--- /dev/null
+++ b/depends/libhdfs3/src/client/BlockLocation.h
@@ -0,0 +1,97 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
+#define _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
+
+#include <string>
+#include <vector>
+
+namespace Hdfs {
+
+class BlockLocation {
+public:
+    bool isCorrupt() const {
+        return corrupt;
+    }
+
+    void setCorrupt(bool corrupt) {
+        this->corrupt = corrupt;
+    }
+
+    const std::vector<std::string> & getHosts() const {
+        return hosts;
+    }
+
+    void setHosts(const std::vector<std::string> & hosts) {
+        this->hosts = hosts;
+    }
+
+    int64_t getLength() const {
+        return length;
+    }
+
+    void setLength(int64_t length) {
+        this->length = length;
+    }
+
+    const std::vector<std::string> & getNames() const {
+        return names;
+    }
+
+    void setNames(const std::vector<std::string> & names) {
+        this->names = names;
+    }
+
+    int64_t getOffset() const {
+        return offset;
+    }
+
+    void setOffset(int64_t offset) {
+        this->offset = offset;
+    }
+
+    const std::vector<std::string> & getTopologyPaths() const {
+        return topologyPaths;
+    }
+
+    void setTopologyPaths(const std::vector<std::string> & topologyPaths) {
+        this->topologyPaths = topologyPaths;
+    }
+
+private:
+    bool corrupt;
+    int64_t length;
+    int64_t offset;  // Offset of the block in the file
+    std::vector<std::string> hosts; // Datanode hostnames
+    std::vector<std::string> names; // Datanode IP:xferPort for accessing the 
block
+    std::vector<std::string> topologyPaths; // Full path name in network 
topology
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/BlockReader.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/BlockReader.h 
b/depends/libhdfs3/src/client/BlockReader.h
new file mode 100644
index 0000000..101cb4a
--- /dev/null
+++ b/depends/libhdfs3/src/client/BlockReader.h
@@ -0,0 +1,66 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_
+#define _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_
+
+#include <stdint.h>
+
+namespace Hdfs {
+namespace Internal {
+
+class BlockReader {
+public:
+    virtual ~BlockReader() {
+    }
+
+    /**
+     * Get how many bytes can be read without blocking.
+     * @return The number of bytes can be read without blocking.
+     */
+    virtual int64_t available() = 0;
+
+    /**
+     * To read data from block.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     * @return return the number of bytes filled in the buffer,
+     *  it may less than size. Return 0 if reach the end of block.
+     */
+    virtual int32_t read(char * buf, int32_t size) = 0;
+
+    /**
+     * Move the cursor forward len bytes.
+     * @param len The number of bytes to skip.
+     */
+    virtual void skip(int64_t len) = 0;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/DataTransferProtocol.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/DataTransferProtocol.h 
b/depends/libhdfs3/src/client/DataTransferProtocol.h
new file mode 100644
index 0000000..24359f0
--- /dev/null
+++ b/depends/libhdfs3/src/client/DataTransferProtocol.h
@@ -0,0 +1,123 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_
+#define _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_
+
+#include "client/Token.h"
+#include "server/DatanodeInfo.h"
+#include "server/ExtendedBlock.h"
+
+#include <vector>
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * Transfer data to/from datanode using a streaming protocol.
+ */
+class DataTransferProtocol {
+public:
+    virtual ~DataTransferProtocol() {
+    }
+    /**
+     * Read a block.
+     *
+     * @param blk the block being read.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param blockOffset offset of the block.
+     * @param length maximum number of bytes for this read.
+     */
+    virtual void readBlock(const ExtendedBlock & blk,
+                           const Token & blockToken, const char * clientName,
+                           int64_t blockOffset, int64_t length) = 0;
+
+    /**
+     * Write a block to a datanode pipeline.
+     *
+     * @param blk the block being written.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes in the pipeline.
+     * @param source source datanode.
+     * @param stage pipeline stage.
+     * @param pipelineSize the size of the pipeline.
+     * @param minBytesRcvd minimum number of bytes received.
+     * @param maxBytesRcvd maximum number of bytes received.
+     * @param latestGenerationStamp the latest generation stamp of the block.
+     */
+    virtual void writeBlock(const ExtendedBlock & blk,
+                            const Token & blockToken, const char * clientName,
+                            const std::vector<DatanodeInfo> & targets, int 
stage,
+                            int pipelineSize, int64_t minBytesRcvd, int64_t 
maxBytesRcvd,
+                            int64_t latestGenerationStamp, int checksumType,
+                            int bytesPerChecksum) = 0;
+
+    /**
+     * Transfer a block to another datanode.
+     * The block stage must be
+     * either {@link BlockConstructionStage#TRANSFER_RBW}
+     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
+     *
+     * @param blk the block being transferred.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes.
+     */
+    virtual void transferBlock(const ExtendedBlock & blk,
+                               const Token & blockToken, const char * 
clientName,
+                               const std::vector<DatanodeInfo> & targets) = 0;
+
+    /**
+     * Get block checksum (MD5 of CRC32).
+     *
+     * @param blk a block.
+     * @param blockToken security token for accessing the block.
+     * @throw HdfsIOException
+     */
+    virtual void blockChecksum(const ExtendedBlock & blk,
+                               const Token & blockToken) = 0;
+
+    /**
+     * Request short circuit access file descriptors from a DataNode.
+     *
+     * @param blk             The block to get file descriptors for.
+     * @param blockToken      Security token for accessing the block.
+     * @param clientName      client's name.
+     * @param maxVersion      Maximum version of the block data the client
+     *                          can understand.
+     */
+    virtual void requestShortCircuitFds(const ExtendedBlock blk,
+                                        const Token& blockToken,
+                                        uint32_t maxVersion) = 0;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_ */

Reply via email to