[2/4] hadoop git commit: HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii

2018-06-18 Thread cdouglas
HADOOP-15533. Make WASB listStatus messages consistent. Contributed by 
Esfandiar Manii

(cherry picked from commit f34744603ee93e082e7ba148df1400af5ac7c30c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e655b64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e655b64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e655b64

Branch: refs/heads/branch-3.1
Commit: 7e655b642d0ed5b85e0e8c212e004d067318ed4c
Parents: 40652e7
Author: Chris Douglas 
Authored: Sun Jun 17 23:12:18 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 23:12:59 2018 -0700

--
 .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e655b64/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index e05327e..dfc881a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2886,7 +2886,7 @@ public class NativeAzureFileSystem extends FileSystem {
   // There is no metadata found for the path.
   LOG.debug("Did not find any metadata for path: {}", key);
 
-  throw new FileNotFoundException("File" + f + " does not exist.");
+  throw new FileNotFoundException(f + " is not found");
 }
 
 return status.toArray(new FileStatus[0]);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii

2018-06-18 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 59686179a -> 4789b8e9c
  refs/heads/branch-3.0 34f89f5af -> d1dcc3922
  refs/heads/branch-3.1 40652e77e -> 7e655b642
  refs/heads/trunk 980031bb0 -> f34744603


HADOOP-15533. Make WASB listStatus messages consistent. Contributed by 
Esfandiar Manii


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3474460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3474460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3474460

Branch: refs/heads/trunk
Commit: f34744603ee93e082e7ba148df1400af5ac7c30c
Parents: 980031b
Author: Chris Douglas 
Authored: Sun Jun 17 23:12:18 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 23:12:18 2018 -0700

--
 .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3474460/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index e05327e..dfc881a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2886,7 +2886,7 @@ public class NativeAzureFileSystem extends FileSystem {
   // There is no metadata found for the path.
   LOG.debug("Did not find any metadata for path: {}", key);
 
-  throw new FileNotFoundException("File" + f + " does not exist.");
+  throw new FileNotFoundException(f + " is not found");
 }
 
 return status.toArray(new FileStatus[0]);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/4] hadoop git commit: HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii

2018-06-18 Thread cdouglas
HADOOP-15533. Make WASB listStatus messages consistent. Contributed by 
Esfandiar Manii

(cherry picked from commit f34744603ee93e082e7ba148df1400af5ac7c30c)
(cherry picked from commit 7e655b642d0ed5b85e0e8c212e004d067318ed4c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1dcc392
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1dcc392
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1dcc392

Branch: refs/heads/branch-3.0
Commit: d1dcc39222b6d1d8ba10f38a3f2fb69e4d6548b3
Parents: 34f89f5
Author: Chris Douglas 
Authored: Sun Jun 17 23:12:18 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 23:13:25 2018 -0700

--
 .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1dcc392/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index bda39e7..f1f7d4d 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2364,7 +2364,7 @@ public class NativeAzureFileSystem extends FileSystem {
   // There is no metadata found for the path.
   LOG.debug("Did not find any metadata for path: {}", key);
 
-  throw new FileNotFoundException("File" + f + " does not exist.");
+  throw new FileNotFoundException(f + " is not found");
 }
 
 return status.toArray(new FileStatus[0]);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HADOOP-15533. Make WASB listStatus messages consistent. Contributed by Esfandiar Manii

2018-06-18 Thread cdouglas
HADOOP-15533. Make WASB listStatus messages consistent. Contributed by 
Esfandiar Manii

(cherry picked from commit f34744603ee93e082e7ba148df1400af5ac7c30c)
(cherry picked from commit 7e655b642d0ed5b85e0e8c212e004d067318ed4c)
(cherry picked from commit d1dcc39222b6d1d8ba10f38a3f2fb69e4d6548b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4789b8e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4789b8e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4789b8e9

Branch: refs/heads/branch-2
Commit: 4789b8e9c95a29e2d8277e83e98dfcde7ece333f
Parents: 5968617
Author: Chris Douglas 
Authored: Sun Jun 17 23:12:18 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 23:14:12 2018 -0700

--
 .../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4789b8e9/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 2cda43c..08da8f1 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2894,7 +2894,7 @@ public class NativeAzureFileSystem extends FileSystem {
   // There is no metadata found for the path.
   LOG.debug("Did not find any metadata for path: {}", key);
 
-  throw new FileNotFoundException("File" + f + " does not exist.");
+  throw new FileNotFoundException(f + " is not found");
 }
 
 return status.toArray(new FileStatus[0]);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13186. Multipart Uploader API. Contributed by Ewan Higgs

2018-06-17 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3905fdb79 -> 980031bb0


HADOOP-13186. Multipart Uploader API. Contributed by Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/980031bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/980031bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/980031bb

Branch: refs/heads/trunk
Commit: 980031bb043dd026a6bf42b0e71d304ac89294a5
Parents: 3905fdb
Author: Chris Douglas 
Authored: Sun Jun 17 11:54:26 2018 -0700
Committer: Chris Douglas 
Committed: Sun Jun 17 11:54:26 2018 -0700

--
 .../java/org/apache/hadoop/fs/BBPartHandle.java |  58 +++
 .../org/apache/hadoop/fs/BBUploadHandle.java|  57 +++
 .../hadoop/fs/FileSystemMultipartUploader.java  | 132 
 .../hadoop/fs/LocalFileSystemPathHandle.java| 100 +
 .../org/apache/hadoop/fs/MultipartUploader.java |  90 +++
 .../hadoop/fs/MultipartUploaderFactory.java |  65 
 .../java/org/apache/hadoop/fs/PartHandle.java   |  45 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  61 +++-
 .../UnsupportedMultipartUploaderException.java  |  41 +
 .../java/org/apache/hadoop/fs/UploadHandle.java |  47 ++
 .../hadoop-common/src/main/proto/FSProtos.proto |   8 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  16 ++
 .../fs/AbstractSystemMultipartUploaderTest.java | 143 ++
 .../TestLocalFileSystemMultipartUploader.java   |  65 
 .../AbstractContractPathHandleTest.java |   6 +
 .../TestRawlocalContractPathHandle.java |  40 +
 .../src/test/resources/contract/rawlocal.xml|   5 +
 .../hdfs/DFSMultipartUploaderFactory.java   |  40 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  16 ++
 .../hadoop/fs/TestHDFSMultipartUploader.java|  76 ++
 .../hadoop/fs/s3a/S3AMultipartUploader.java | 150 +++
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  15 ++
 .../org.apache.hadoop.fs.MultipartUploader  |  16 ++
 23 files changed, 1290 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/980031bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
new file mode 100644
index 000..e1336b8
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBPartHandle.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Byte array backed part handle.
+ */
+public final class BBPartHandle implements PartHandle {
+
+  private static final long serialVersionUID = 0x23ce3eb1;
+
+  private final byte[] bytes;
+
+  private BBPartHandle(ByteBuffer byteBuffer){
+this.bytes = byteBuffer.array();
+  }
+
+  public static PartHandle from(ByteBuffer byteBuffer) {
+return new BBPartHandle(byteBuffer);
+  }
+
+  @Override
+  public ByteBuffer bytes() {
+return ByteBuffer.wrap(bytes);
+  }
+
+  @Override
+  public int hashCode() {
+return Arrays.hashCode(bytes);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (!(other instanceof PartHandle)) {
+  return false;
+
+}
+PartHandle o = (PartHandle) other;
+return bytes().equals(o.bytes());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/980031bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BBUploadHandle.java
new file mode 

[2/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)
(cherry picked from commit baac7c2b285454d71d0371505fb7a3403a548176)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df338f2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df338f2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df338f2e

Branch: refs/heads/branch-3.0
Commit: df338f2e1a19ed9c0c5c13f7d4aad08f9836de9f
Parents: 8202c33
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:33:36 2018 -0700

--
 hadoop-project/pom.xml  | 2 +-
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 92a158a..5c2edf9 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1161,7 +1161,7 @@
   
 com.microsoft.azure
 azure-storage
-5.4.0
+7.0.0
  
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index cc3baf5..456e4b1 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
 assertFalse(fs.rename(foo, bar));
 assertFalse(container.exists());
 
+// Create a container outside 

[1/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 805939079 -> 0b5d0c374
  refs/heads/branch-3.0 8202c334f -> df338f2e1
  refs/heads/branch-3.1 425fe4e21 -> baac7c2b2


HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/baac7c2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/baac7c2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/baac7c2b

Branch: refs/heads/branch-3.1
Commit: baac7c2b285454d71d0371505fb7a3403a548176
Parents: 425fe4e
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:33:03 2018 -0700

--
 hadoop-project/pom.xml  | 2 +-
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 55a78e9..e674a82 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1182,7 +1182,7 @@
   
 com.microsoft.azure
 azure-storage
-5.4.0
+7.0.0
  
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index cc3baf5..456e4b1 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
 

[3/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)
(cherry picked from commit baac7c2b285454d71d0371505fb7a3403a548176)
(cherry picked from commit df338f2e1a19ed9c0c5c13f7d4aad08f9836de9f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b5d0c37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b5d0c37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b5d0c37

Branch: refs/heads/branch-2
Commit: 0b5d0c3740c96a5ce439eadbd66e44aa017cdf30
Parents: 8059390
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:34:54 2018 -0700

--
 hadoop-project/pom.xml  | 6 ++
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8812132..f1ab70d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1101,6 +1101,12 @@
   
 
   
+com.microsoft.azure
+azure-storage
+7.0.0
+  
+
+  
 com.aliyun.oss
 aliyun-sdk-oss
 2.8.3

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index 417ee0e..cb8e0c9 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 

[1/2] hadoop git commit: HDFS-13272. DataNodeHttpServer to have configurable HttpServer2 threads. Contributed by Erik Krogen

2018-05-10 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 67468651b -> b2acaa52d
  refs/heads/branch-2.9 1f32345cd -> 54ff29310


HDFS-13272. DataNodeHttpServer to have configurable HttpServer2 threads. 
Contributed by Erik Krogen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2acaa52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2acaa52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2acaa52

Branch: refs/heads/branch-2
Commit: b2acaa52d21edd4b38083bf0a749caf76dfb79fd
Parents: 6746865
Author: Chris Douglas 
Authored: Wed May 2 21:23:57 2018 -0700
Committer: Chris Douglas 
Committed: Thu May 10 14:53:39 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 10 +-
 .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java  |  4 
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2acaa52/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index c44f7da..f95d696 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -43,6 +43,7 @@ import io.netty.handler.stream.ChunkedWriteHandler;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -91,6 +92,11 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  @InterfaceAudience.Private
+  public static final String DATANODE_HTTP_MAX_THREADS_KEY =
+  "dfs.datanode.http.max-threads";
+  private static final int DATANODE_HTTP_MAX_THREADS_DEFAULT = 10;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -99,7 +105,9 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS,
+conf.getInt(DATANODE_HTTP_MAX_THREADS_KEY,
+DATANODE_HTTP_MAX_THREADS_DEFAULT));
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2acaa52/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6dfa5b1..63348de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -104,6 +104,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -855,6 +856,9 @@ public class MiniDFSCluster implements AutoCloseable {
 conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
 StaticMapping.class, DNSToSwitchMapping.class);
   }
+  // Set to the minimum number of threads possible to avoid starting
+  // unnecessary threads in unit tests
+  conf.setInt(DatanodeHttpServer.DATANODE_HTTP_MAX_THREADS_KEY, 2);
 
   // In an HA cluster, in order for the StandbyNode to perform checkpoints,
  

[2/2] hadoop git commit: HDFS-13272. DataNodeHttpServer to have configurable HttpServer2 threads. Contributed by Erik Krogen

2018-05-10 Thread cdouglas
HDFS-13272. DataNodeHttpServer to have configurable HttpServer2 threads. 
Contributed by Erik Krogen

(cherry picked from commit b2acaa52d21edd4b38083bf0a749caf76dfb79fd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54ff2931
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54ff2931
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54ff2931

Branch: refs/heads/branch-2.9
Commit: 54ff293105ae0d28484a3942fc9da793250c5fda
Parents: 1f32345
Author: Chris Douglas 
Authored: Wed May 2 21:23:57 2018 -0700
Committer: Chris Douglas 
Committed: Thu May 10 15:20:30 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 10 +-
 .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java  |  4 
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ff2931/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index c44f7da..f95d696 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -43,6 +43,7 @@ import io.netty.handler.stream.ChunkedWriteHandler;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -91,6 +92,11 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  @InterfaceAudience.Private
+  public static final String DATANODE_HTTP_MAX_THREADS_KEY =
+  "dfs.datanode.http.max-threads";
+  private static final int DATANODE_HTTP_MAX_THREADS_DEFAULT = 10;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -99,7 +105,9 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS,
+conf.getInt(DATANODE_HTTP_MAX_THREADS_KEY,
+DATANODE_HTTP_MAX_THREADS_DEFAULT));
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ff2931/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 6dfa5b1..63348de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -104,6 +104,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -855,6 +856,9 @@ public class MiniDFSCluster implements AutoCloseable {
 conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
 StaticMapping.class, DNSToSwitchMapping.class);
   }
+  // Set to the minimum number of threads possible to avoid starting
+  // unnecessary threads in unit tests
+  conf.setInt(DatanodeHttpServer.DATANODE_HTTP_MAX_THREADS_KEY, 2);
 
   // In an HA cluster, in order for the StandbyNode to perform checkpoints,
   // it needs to know the HTTP port of the Active. So, 

[2/5] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

2018-04-23 Thread cdouglas
HDFS-13408. MiniDFSCluster to support being built on randomized base directory. 
Contributed by Xiao Liang

(cherry picked from commit f411de6a79a0a87f03c09366cfe7a7d0726ed932)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf272c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf272c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf272c51

Branch: refs/heads/branch-3.1
Commit: cf272c5179a9cb4b524016c0fca7c69c9eaa92f1
Parents: 1f486a0
Author: Chris Douglas 
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas 
Committed: Mon Apr 23 11:13:52 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++---
 2 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf272c51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 4c3aed7..acb720e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -202,8 +202,28 @@ public class MiniDFSCluster implements AutoCloseable {
   this.conf = conf;
   this.storagesPerDatanode =
   
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+conf.set(HDFS_MINIDFS_BASEDIR,
+new File(getBaseDirectory()).getAbsolutePath());
+  }
 }
-
+
+public Builder(Configuration conf, File basedir) {
+  this.conf = conf;
+  this.storagesPerDatanode =
+  
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == basedir) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory cannot be null");
+  }
+  String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+  if (cdir != null) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory already defined (" + cdir + ")");
+  }
+  conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+}
+
 /**
  * Default: 0
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf272c51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index d631b68..afc977f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -201,9 +201,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();
 File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-String c4Path = testDataCluster4.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster4 =
+new MiniDFSCluster.Builder(conf, testDataCluster4).build();
 try {
   DistributedFileSystem dfs = cluster4.getFileSystem();
   dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -222,12 +221,11 @@ public class TestMiniDFSCluster {
 Configuration conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
 File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-String c5Path = testDataCluster5.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-.numDataNodes(1)
-.checkDataNodeHostConfig(true)
-.build()) {
+try (MiniDFSCluster cluster5 =
+new MiniDFSCluster.Builder(conf, testDataCluster5)
+  .numDataNodes(1)
+  .checkDataNodeHostConfig(true)
+  .build()) {
   assertEquals("DataNode hostname config not respected", "MYHOST",
   cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
 }



[4/5] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

2018-04-23 Thread cdouglas
HDFS-13408. MiniDFSCluster to support being built on randomized base directory. 
Contributed by Xiao Liang

(cherry picked from commit f411de6a79a0a87f03c09366cfe7a7d0726ed932)
(cherry picked from commit cf272c5179a9cb4b524016c0fca7c69c9eaa92f1)
(cherry picked from commit 956ab12ede390e1eea0a66752e0e6711f47b4b94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e82e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e82e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e82e2c

Branch: refs/heads/branch-2
Commit: 99e82e2c2cf554fa5582da2ec9615fd8b698eecc
Parents: a975250
Author: Chris Douglas 
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas 
Committed: Mon Apr 23 11:15:28 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++---
 2 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e82e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 887c635..a643077 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -198,8 +198,28 @@ public class MiniDFSCluster implements AutoCloseable {
   this.conf = conf;
   this.storagesPerDatanode =
   
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+conf.set(HDFS_MINIDFS_BASEDIR,
+new File(getBaseDirectory()).getAbsolutePath());
+  }
 }
-
+
+public Builder(Configuration conf, File basedir) {
+  this.conf = conf;
+  this.storagesPerDatanode =
+  
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == basedir) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory cannot be null");
+  }
+  String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+  if (cdir != null) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory already defined (" + cdir + ")");
+  }
+  conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+}
+
 /**
  * Default: 0
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e82e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index e1346e9..296ede3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -200,9 +200,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();
 File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-String c4Path = testDataCluster4.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster4 =
+new MiniDFSCluster.Builder(conf, testDataCluster4).build();
 try {
   DistributedFileSystem dfs = cluster4.getFileSystem();
   dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -221,12 +220,11 @@ public class TestMiniDFSCluster {
 Configuration conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
 File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-String c5Path = testDataCluster5.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-.numDataNodes(1)
-.checkDataNodeHostConfig(true)
-.build()) {
+try (MiniDFSCluster cluster5 =
+new MiniDFSCluster.Builder(conf, testDataCluster5)
+  .numDataNodes(1)
+  .checkDataNodeHostConfig(true)
+  .build()) {
   assertEquals("DataNode hostname config not respected", "MYHOST",
   

[1/5] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

2018-04-23 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a97525030 -> 99e82e2c2
  refs/heads/branch-2.9 b78d3c25c -> 0539b7234
  refs/heads/branch-3.0 e8f62357c -> 956ab12ed
  refs/heads/branch-3.1 1f486a064 -> cf272c517
  refs/heads/trunk c533c7704 -> f411de6a7


HDFS-13408. MiniDFSCluster to support being built on randomized base directory. 
Contributed by Xiao Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f411de6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f411de6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f411de6a

Branch: refs/heads/trunk
Commit: f411de6a79a0a87f03c09366cfe7a7d0726ed932
Parents: c533c77
Author: Chris Douglas 
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas 
Committed: Mon Apr 23 11:13:18 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++---
 2 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f411de6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 4c3aed7..acb720e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -202,8 +202,28 @@ public class MiniDFSCluster implements AutoCloseable {
   this.conf = conf;
   this.storagesPerDatanode =
   
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+conf.set(HDFS_MINIDFS_BASEDIR,
+new File(getBaseDirectory()).getAbsolutePath());
+  }
 }
-
+
+public Builder(Configuration conf, File basedir) {
+  this.conf = conf;
+  this.storagesPerDatanode =
+  
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == basedir) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory cannot be null");
+  }
+  String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+  if (cdir != null) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory already defined (" + cdir + ")");
+  }
+  conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+}
+
 /**
  * Default: 0
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f411de6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index d631b68..afc977f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -201,9 +201,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();
 File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-String c4Path = testDataCluster4.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster4 =
+new MiniDFSCluster.Builder(conf, testDataCluster4).build();
 try {
   DistributedFileSystem dfs = cluster4.getFileSystem();
   dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -222,12 +221,11 @@ public class TestMiniDFSCluster {
 Configuration conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
 File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-String c5Path = testDataCluster5.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-.numDataNodes(1)
-.checkDataNodeHostConfig(true)
-.build()) {
+try (MiniDFSCluster cluster5 =
+new MiniDFSCluster.Builder(conf, testDataCluster5)
+  .numDataNodes(1)
+  .checkDataNodeHostConfig(true)
+  .build()) {
   

[3/5] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

2018-04-23 Thread cdouglas
HDFS-13408. MiniDFSCluster to support being built on randomized base directory. 
Contributed by Xiao Liang

(cherry picked from commit f411de6a79a0a87f03c09366cfe7a7d0726ed932)
(cherry picked from commit cf272c5179a9cb4b524016c0fca7c69c9eaa92f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/956ab12e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/956ab12e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/956ab12e

Branch: refs/heads/branch-3.0
Commit: 956ab12ede390e1eea0a66752e0e6711f47b4b94
Parents: e8f6235
Author: Chris Douglas 
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas 
Committed: Mon Apr 23 11:15:15 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++---
 2 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/956ab12e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 36a45d8..0edc13e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -199,8 +199,28 @@ public class MiniDFSCluster implements AutoCloseable {
   this.conf = conf;
   this.storagesPerDatanode =
   
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+conf.set(HDFS_MINIDFS_BASEDIR,
+new File(getBaseDirectory()).getAbsolutePath());
+  }
 }
-
+
+public Builder(Configuration conf, File basedir) {
+  this.conf = conf;
+  this.storagesPerDatanode =
+  
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == basedir) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory cannot be null");
+  }
+  String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+  if (cdir != null) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory already defined (" + cdir + ")");
+  }
+  conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+}
+
 /**
  * Default: 0
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/956ab12e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index d631b68..afc977f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -201,9 +201,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();
 File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-String c4Path = testDataCluster4.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster4 =
+new MiniDFSCluster.Builder(conf, testDataCluster4).build();
 try {
   DistributedFileSystem dfs = cluster4.getFileSystem();
   dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -222,12 +221,11 @@ public class TestMiniDFSCluster {
 Configuration conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
 File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-String c5Path = testDataCluster5.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-.numDataNodes(1)
-.checkDataNodeHostConfig(true)
-.build()) {
+try (MiniDFSCluster cluster5 =
+new MiniDFSCluster.Builder(conf, testDataCluster5)
+  .numDataNodes(1)
+  .checkDataNodeHostConfig(true)
+  .build()) {
   assertEquals("DataNode hostname config not respected", "MYHOST",
   cluster5.getDataNodes().get(0).getDatanodeId().getHostName());

[5/5] hadoop git commit: HDFS-13408. MiniDFSCluster to support being built on randomized base directory. Contributed by Xiao Liang

2018-04-23 Thread cdouglas
HDFS-13408. MiniDFSCluster to support being built on randomized base directory. 
Contributed by Xiao Liang

(cherry picked from commit f411de6a79a0a87f03c09366cfe7a7d0726ed932)
(cherry picked from commit cf272c5179a9cb4b524016c0fca7c69c9eaa92f1)
(cherry picked from commit 956ab12ede390e1eea0a66752e0e6711f47b4b94)
(cherry picked from commit 99e82e2c2cf554fa5582da2ec9615fd8b698eecc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0539b723
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0539b723
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0539b723

Branch: refs/heads/branch-2.9
Commit: 0539b72341e6cacd1e6fb9675182c35730bfaf38
Parents: b78d3c2
Author: Chris Douglas 
Authored: Mon Apr 23 11:13:18 2018 -0700
Committer: Chris Douglas 
Committed: Mon Apr 23 11:15:41 2018 -0700

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 16 +++---
 2 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0539b723/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 887c635..a643077 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -198,8 +198,28 @@ public class MiniDFSCluster implements AutoCloseable {
   this.conf = conf;
   this.storagesPerDatanode =
   
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == conf.get(HDFS_MINIDFS_BASEDIR)) {
+conf.set(HDFS_MINIDFS_BASEDIR,
+new File(getBaseDirectory()).getAbsolutePath());
+  }
 }
-
+
+public Builder(Configuration conf, File basedir) {
+  this.conf = conf;
+  this.storagesPerDatanode =
+  
FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
+  if (null == basedir) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory cannot be null");
+  }
+  String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
+  if (cdir != null) {
+throw new IllegalArgumentException(
+"MiniDFSCluster base directory already defined (" + cdir + ")");
+  }
+  conf.set(HDFS_MINIDFS_BASEDIR, basedir.getAbsolutePath());
+}
+
 /**
  * Default: 0
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0539b723/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index e1346e9..296ede3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -200,9 +200,8 @@ public class TestMiniDFSCluster {
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();
 File testDataCluster4 = new File(testDataPath, CLUSTER_4);
-String c4Path = testDataCluster4.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
-MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster4 =
+new MiniDFSCluster.Builder(conf, testDataCluster4).build();
 try {
   DistributedFileSystem dfs = cluster4.getFileSystem();
   dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -221,12 +220,11 @@ public class TestMiniDFSCluster {
 Configuration conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
 File testDataCluster5 = new File(testDataPath, CLUSTER_5);
-String c5Path = testDataCluster5.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
-try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
-.numDataNodes(1)
-.checkDataNodeHostConfig(true)
-.build()) {
+try (MiniDFSCluster cluster5 =
+new MiniDFSCluster.Builder(conf, testDataCluster5)
+  .numDataNodes(1)
+  .checkDataNodeHostConfig(true)
+  .build()) {
   

[1/4] hadoop git commit: HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake. Contributed by Shanyu Zhao

2018-03-28 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f5aa36e19 -> cc0a79179
  refs/heads/branch-2.9 fa04ea189 -> 13bf5c6eb
  refs/heads/branch-3.1 0db6b8c93 -> ac16e8f4d
  refs/heads/trunk 0b1c2b5fe -> 081c35018


HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and 
hadoop-azure-datalake. Contributed by Shanyu Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/081c3501
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/081c3501
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/081c3501

Branch: refs/heads/trunk
Commit: 081c3501885c543bb1f159929d456d1ba2e3650c
Parents: 0b1c2b5
Author: Chris Douglas 
Authored: Wed Mar 28 11:58:59 2018 -0700
Committer: Chris Douglas 
Committed: Wed Mar 28 11:58:59 2018 -0700

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  40 --
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  46 --
 ...TestNativeAzureFileSystemBlockLocations.java | 141 ---
 3 files changed, 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/081c3501/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9f54a36..aa6babe 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -46,7 +46,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.CreateFlag;
@@ -910,45 +909,6 @@ public class AdlFileSystem extends FileSystem {
 return ADL_BLOCK_SIZE;
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(final FileStatus status,
-  final long offset, final long length) throws IOException {
-if (status == null) {
-  return null;
-}
-
-if ((offset < 0) || (length < 0)) {
-  throw new IllegalArgumentException("Invalid start or len parameter");
-}
-
-if (status.getLen() < offset) {
-  return new BlockLocation[0];
-}
-
-final String[] name = {"localhost"};
-final String[] host = {"localhost"};
-long blockSize = ADL_BLOCK_SIZE;
-int numberOfLocations =
-(int) (length / blockSize) + ((length % blockSize == 0) ? 0 : 1);
-BlockLocation[] locations = new BlockLocation[numberOfLocations];
-for (int i = 0; i < locations.length; i++) {
-  long currentOffset = offset + (i * blockSize);
-  long currentLength = Math.min(blockSize, offset + length - 
currentOffset);
-  locations[i] = new BlockLocation(name, host, currentOffset,
-  currentLength);
-}
-
-return locations;
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
-  final long length) throws IOException {
-// read ops incremented in getFileStatus
-FileStatus fileStatus = getFileStatus(p);
-return getFileBlockLocations(fileStatus, offset, length);
-  }
-
   /**
* Get replication.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/081c3501/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 3d44b20..e05327e 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -52,7 +52,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -726,10 +725,6 @@ public class NativeAzureFileSystem extends FileSystem {
 
   static final String 

[2/4] hadoop git commit: HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake. Contributed by Shanyu Zhao

2018-03-28 Thread cdouglas
HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and 
hadoop-azure-datalake. Contributed by Shanyu Zhao

(cherry picked from commit 081c3501885c543bb1f159929d456d1ba2e3650c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac16e8f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac16e8f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac16e8f4

Branch: refs/heads/branch-3.1
Commit: ac16e8f4d3957c64bc2afddb9e58bd2f511f6c6e
Parents: 0db6b8c
Author: Chris Douglas 
Authored: Wed Mar 28 11:58:59 2018 -0700
Committer: Chris Douglas 
Committed: Wed Mar 28 11:59:08 2018 -0700

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  40 --
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  46 --
 ...TestNativeAzureFileSystemBlockLocations.java | 141 ---
 3 files changed, 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac16e8f4/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9f54a36..aa6babe 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -46,7 +46,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.CreateFlag;
@@ -910,45 +909,6 @@ public class AdlFileSystem extends FileSystem {
 return ADL_BLOCK_SIZE;
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(final FileStatus status,
-  final long offset, final long length) throws IOException {
-if (status == null) {
-  return null;
-}
-
-if ((offset < 0) || (length < 0)) {
-  throw new IllegalArgumentException("Invalid start or len parameter");
-}
-
-if (status.getLen() < offset) {
-  return new BlockLocation[0];
-}
-
-final String[] name = {"localhost"};
-final String[] host = {"localhost"};
-long blockSize = ADL_BLOCK_SIZE;
-int numberOfLocations =
-(int) (length / blockSize) + ((length % blockSize == 0) ? 0 : 1);
-BlockLocation[] locations = new BlockLocation[numberOfLocations];
-for (int i = 0; i < locations.length; i++) {
-  long currentOffset = offset + (i * blockSize);
-  long currentLength = Math.min(blockSize, offset + length - 
currentOffset);
-  locations[i] = new BlockLocation(name, host, currentOffset,
-  currentLength);
-}
-
-return locations;
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
-  final long length) throws IOException {
-// read ops incremented in getFileStatus
-FileStatus fileStatus = getFileStatus(p);
-return getFileBlockLocations(fileStatus, offset, length);
-  }
-
   /**
* Get replication.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac16e8f4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 3d44b20..e05327e 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -52,7 +52,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -726,10 +725,6 @@ public class NativeAzureFileSystem extends FileSystem {
 
   static final String AZURE_CHMOD_USERLIST_PROPERTY_DEFAULT_VALUE = "*";
 
-  static final String AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME =
-  

[3/4] hadoop git commit: HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake. Contributed by Shanyu Zhao

2018-03-28 Thread cdouglas
HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and 
hadoop-azure-datalake. Contributed by Shanyu Zhao

(cherry picked from commit 081c3501885c543bb1f159929d456d1ba2e3650c)
(cherry picked from commit ac16e8f4d3957c64bc2afddb9e58bd2f511f6c6e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc0a7917
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc0a7917
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc0a7917

Branch: refs/heads/branch-2
Commit: cc0a7917942aea5ef331c6a270a62c5540dfee2a
Parents: f5aa36e
Author: Chris Douglas 
Authored: Wed Mar 28 11:58:59 2018 -0700
Committer: Chris Douglas 
Committed: Wed Mar 28 12:04:20 2018 -0700

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  40 --
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  46 --
 ...TestNativeAzureFileSystemBlockLocations.java | 141 ---
 3 files changed, 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0a7917/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 4488898..9637c59 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -47,7 +47,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.CreateFlag;
@@ -929,45 +928,6 @@ public class AdlFileSystem extends FileSystem {
 return ADL_BLOCK_SIZE;
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(final FileStatus status,
-  final long offset, final long length) throws IOException {
-if (status == null) {
-  return null;
-}
-
-if ((offset < 0) || (length < 0)) {
-  throw new IllegalArgumentException("Invalid start or len parameter");
-}
-
-if (status.getLen() < offset) {
-  return new BlockLocation[0];
-}
-
-final String[] name = {"localhost"};
-final String[] host = {"localhost"};
-long blockSize = ADL_BLOCK_SIZE;
-int numberOfLocations =
-(int) (length / blockSize) + ((length % blockSize == 0) ? 0 : 1);
-BlockLocation[] locations = new BlockLocation[numberOfLocations];
-for (int i = 0; i < locations.length; i++) {
-  long currentOffset = offset + (i * blockSize);
-  long currentLength = Math.min(blockSize, offset + length - 
currentOffset);
-  locations[i] = new BlockLocation(name, host, currentOffset,
-  currentLength);
-}
-
-return locations;
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
-  final long length) throws IOException {
-// read ops incremented in getFileStatus
-FileStatus fileStatus = getFileStatus(p);
-return getFileBlockLocations(fileStatus, offset, length);
-  }
-
   /**
* Get replication.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0a7917/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index af42849..2cda43c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -48,7 +48,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -725,10 +724,6 @@ public class NativeAzureFileSystem extends FileSystem {
 
   static final String AZURE_CHMOD_USERLIST_PROPERTY_DEFAULT_VALUE = "*";
 
-  static final String 

[4/4] hadoop git commit: HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake. Contributed by Shanyu Zhao

2018-03-28 Thread cdouglas
HADOOP-15320. Remove customized getFileBlockLocations for hadoop-azure and 
hadoop-azure-datalake. Contributed by Shanyu Zhao

(cherry picked from commit 081c3501885c543bb1f159929d456d1ba2e3650c)
(cherry picked from commit ac16e8f4d3957c64bc2afddb9e58bd2f511f6c6e)
(cherry picked from commit cc0a7917942aea5ef331c6a270a62c5540dfee2a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13bf5c6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13bf5c6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13bf5c6e

Branch: refs/heads/branch-2.9
Commit: 13bf5c6eb8eddb3a974c37a2255dcb1c195c60a1
Parents: fa04ea1
Author: Chris Douglas 
Authored: Wed Mar 28 11:58:59 2018 -0700
Committer: Chris Douglas 
Committed: Wed Mar 28 12:04:33 2018 -0700

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  40 --
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  46 --
 ...TestNativeAzureFileSystemBlockLocations.java | 141 ---
 3 files changed, 227 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13bf5c6e/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 4488898..9637c59 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -47,7 +47,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.CreateFlag;
@@ -929,45 +928,6 @@ public class AdlFileSystem extends FileSystem {
 return ADL_BLOCK_SIZE;
   }
 
-  @Override
-  public BlockLocation[] getFileBlockLocations(final FileStatus status,
-  final long offset, final long length) throws IOException {
-if (status == null) {
-  return null;
-}
-
-if ((offset < 0) || (length < 0)) {
-  throw new IllegalArgumentException("Invalid start or len parameter");
-}
-
-if (status.getLen() < offset) {
-  return new BlockLocation[0];
-}
-
-final String[] name = {"localhost"};
-final String[] host = {"localhost"};
-long blockSize = ADL_BLOCK_SIZE;
-int numberOfLocations =
-(int) (length / blockSize) + ((length % blockSize == 0) ? 0 : 1);
-BlockLocation[] locations = new BlockLocation[numberOfLocations];
-for (int i = 0; i < locations.length; i++) {
-  long currentOffset = offset + (i * blockSize);
-  long currentLength = Math.min(blockSize, offset + length - 
currentOffset);
-  locations[i] = new BlockLocation(name, host, currentOffset,
-  currentLength);
-}
-
-return locations;
-  }
-
-  @Override
-  public BlockLocation[] getFileBlockLocations(final Path p, final long offset,
-  final long length) throws IOException {
-// read ops incremented in getFileStatus
-FileStatus fileStatus = getFileStatus(p);
-return getFileBlockLocations(fileStatus, offset, length);
-  }
-
   /**
* Get replication.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13bf5c6e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index af42849..2cda43c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -48,7 +48,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -725,10 +724,6 @@ public class NativeAzureFileSystem extends FileSystem {
 
   static final String 

hadoop git commit: HADOOP-14667. Flexible Visual Studio support. Contributed by Allen Wittenauer

2018-03-19 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk f480367af -> 3fc3fa971


HADOOP-14667. Flexible Visual Studio support. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fc3fa97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fc3fa97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fc3fa97

Branch: refs/heads/trunk
Commit: 3fc3fa9711d96677f6149e173df0f57cd06ee6b9
Parents: f480367
Author: Chris Douglas 
Authored: Mon Mar 19 16:05:55 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 19 16:05:55 2018 -0700

--
 BUILDING.txt| 30 
 dev-support/bin/win-vs-upgrade.cmd  | 39 
 dev-support/win-paths-eg.cmd| 49 
 hadoop-common-project/hadoop-common/pom.xml | 28 +++
 .../src/main/native/native.vcxproj  |  2 +
 .../hadoop-hdfs-native-client/pom.xml   |  5 +-
 6 files changed, 128 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fc3fa97/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 6c266e5..6d752d4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -348,7 +348,7 @@ Requirements:
 * Maven 3.0 or later
 * ProtocolBuffer 2.5.0
 * CMake 3.1 or newer
-* Windows SDK 7.1 or Visual Studio 2010 Professional
+* Visual Studio 2010 Professional or Higher
 * Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
@@ -359,18 +359,15 @@ Requirements:
 Unix command-line tools are also included with the Windows Git package which
 can be downloaded from http://git-scm.com/downloads
 
-If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
+If using Visual Studio, it must be Professional level or higher.
 Do not use Visual Studio Express.  It does not support compiling for 64-bit,
-which is problematic if running a 64-bit system.  The Windows SDK 7.1 is free 
to
-download here:
-
-http://www.microsoft.com/en-us/download/details.aspx?id=8279
+which is problematic if running a 64-bit system.
 
 The Windows SDK 8.1 is available to download at:
 
 http://msdn.microsoft.com/en-us/windows/bg162891.aspx
 
-Cygwin is neither required nor supported.
+Cygwin is not required.
 
 
--
 Building:
@@ -378,21 +375,12 @@ Building:
 Keep the source code tree in a short path to avoid running into problems 
related
 to Windows maximum path length limitation (for example, C:\hdc).
 
-Run builds from a Windows SDK Command Prompt. (Start, All Programs,
-Microsoft Windows SDK v7.1, Windows SDK 7.1 Command Prompt).
-
-JAVA_HOME must be set, and the path must not contain spaces. If the full path
-would contain spaces, then use the Windows short path instead.
-
-You must set the Platform environment variable to either x64 or Win32 depending
-on whether you're running a 64-bit or 32-bit system. Note that this is
-case-sensitive. It must be "Platform", not "PLATFORM" or "platform".
-Environment variables on Windows are usually case-insensitive, but Maven treats
-them as case-sensitive. Failure to set this environment variable correctly will
-cause msbuild to fail while building the native code in hadoop-common.
+There is one support command file located in dev-support called 
win-paths-eg.cmd.
+It should be copied somewhere convenient and modified to fit your needs.
 
-set Platform=x64 (when building on a 64-bit system)
-set Platform=Win32 (when building on a 32-bit system)
+win-paths-eg.cmd sets up the environment for use. You will need to modify this
+file. It will put all of the required components in the command path,
+configure the bit-ness of the build, and set several optional components.
 
 Several tests require that the user must have the Create Symbolic Links
 privilege.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fc3fa97/dev-support/bin/win-vs-upgrade.cmd
--
diff --git a/dev-support/bin/win-vs-upgrade.cmd 
b/dev-support/bin/win-vs-upgrade.cmd
new file mode 100644
index 000..d8c9d73
--- /dev/null
+++ b/dev-support/bin/win-vs-upgrade.cmd
@@ -0,0 +1,39 @@
+@ECHO OFF
+@REM Licensed to the Apache Software Foundation (ASF) under one or more
+@REM contributor license agreements.  See the NOTICE file distributed with
+@REM this work for additional information regarding copyright ownership.
+@REM The ASF licenses this file to You under the Apache 

[3/3] hadoop git commit: HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. Contributed by Erik Krogen

2018-03-13 Thread cdouglas
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. 
Contributed by Erik Krogen

(cherry picked from commit 9d6994da1964c1125a33b3a65e7a7747e2d0bc59)
(cherry picked from commit 80641508c721fe49c8ace4730b647efd3ce84fbd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9889e55e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9889e55e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9889e55e

Branch: refs/heads/branch-3.0
Commit: 9889e55e5a27506e92defc7910c36b3bee10a39d
Parents: fc6f3e2
Author: Chris Douglas 
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 14:20:44 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 12 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 23 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9889e55e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index fb49d2d..d36cdf4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -133,6 +133,14 @@ public final class HttpServer2 implements FilterContainer {
   "hadoop.http.socket.backlog.size";
   public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_ACCEPTOR_COUNT_KEY =
+  "hadoop.http.acceptor.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+  public static final String HTTP_SELECTOR_COUNT_KEY =
+  "hadoop.http.selector.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
   static final String FILTER_INITIALIZER_PROPERTY
@@ -464,7 +472,9 @@ public final class HttpServer2 implements FilterContainer {
 
 private ServerConnector createHttpChannelConnector(
 Server server, HttpConfiguration httpConfig) {
-  ServerConnector conn = new ServerConnector(server);
+  ServerConnector conn = new ServerConnector(server,
+  conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+  conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
   ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
   conn.addConnectionFactory(connFactory);
   configureChannelConnector(conn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9889e55e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
 Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
 server = createTestServer(conf);
 server.addServlet("echo", "/echo", EchoServlet.class);
 server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 ready.await();
 start.countDown();
   }
+
+  /**
+   * Test that the number of acceptors and selectors can be configured by
+   * trying to configure more of them than would be allowed based on the
+   * maximum thread count.
+   */
+  @Test
+  public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+

[1/3] hadoop git commit: HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. Contributed by Erik Krogen

2018-03-13 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fc6f3e2a6 -> 9889e55e5
  refs/heads/branch-3.1 189c1cb6f -> 80641508c
  refs/heads/trunk a82d4a2e3 -> 9d6994da1


HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. 
Contributed by Erik Krogen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6994da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6994da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6994da

Branch: refs/heads/trunk
Commit: 9d6994da1964c1125a33b3a65e7a7747e2d0bc59
Parents: a82d4a2
Author: Chris Douglas 
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 13:55:18 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 12 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 23 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
   "hadoop.http.socket.backlog.size";
   public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_ACCEPTOR_COUNT_KEY =
+  "hadoop.http.acceptor.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+  public static final String HTTP_SELECTOR_COUNT_KEY =
+  "hadoop.http.selector.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
   public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
 
 private ServerConnector createHttpChannelConnector(
 Server server, HttpConfiguration httpConfig) {
-  ServerConnector conn = new ServerConnector(server);
+  ServerConnector conn = new ServerConnector(server,
+  conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+  conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
   ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
   conn.addConnectionFactory(connFactory);
   configureChannelConnector(conn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
 Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
 server = createTestServer(conf);
 server.addServlet("echo", "/echo", EchoServlet.class);
 server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 ready.await();
 start.countDown();
   }
+
+  /**
+   * Test that the number of acceptors and selectors can be configured by
+   * trying to configure more of them than would be allowed based on the
+   * maximum thread count.
+   */
+  @Test
+  public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+

[2/3] hadoop git commit: HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. Contributed by Erik Krogen

2018-03-13 Thread cdouglas
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count. 
Contributed by Erik Krogen

(cherry picked from commit 9d6994da1964c1125a33b3a65e7a7747e2d0bc59)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80641508
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80641508
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80641508

Branch: refs/heads/branch-3.1
Commit: 80641508c721fe49c8ace4730b647efd3ce84fbd
Parents: 189c1cb
Author: Chris Douglas 
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 14:20:28 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 12 +-
 .../org/apache/hadoop/http/TestHttpServer.java  | 23 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80641508/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
   "hadoop.http.socket.backlog.size";
   public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_ACCEPTOR_COUNT_KEY =
+  "hadoop.http.acceptor.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+  public static final String HTTP_SELECTOR_COUNT_KEY =
+  "hadoop.http.selector.count";
+  // -1 to use default behavior of setting count based on CPU core count
+  public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
   public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
 
 private ServerConnector createHttpChannelConnector(
 Server server, HttpConfiguration httpConfig) {
-  ServerConnector conn = new ServerConnector(server);
+  ServerConnector conn = new ServerConnector(server,
+  conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+  conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
   ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
   conn.addConnectionFactory(connFactory);
   configureChannelConnector(conn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80641508/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
 Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
 server = createTestServer(conf);
 server.addServlet("echo", "/echo", EchoServlet.class);
 server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
 ready.await();
 start.countDown();
   }
+
+  /**
+   * Test that the number of acceptors and selectors can be configured by
+   * trying to configure more of them than would be allowed based on the
+   * maximum thread count.
+   */
+  @Test
+  public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+conf.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, MAX_THREADS - 2);
+

hadoop git commit: HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei Jiang

2018-03-13 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8211a3d46 -> 45cccadd2


HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei 
Jiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cccadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cccadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cccadd

Branch: refs/heads/trunk
Commit: 45cccadd2e84b99ec56f1cc0e2248dc8fc844f38
Parents: 8211a3d
Author: Chris Douglas 
Authored: Tue Mar 13 11:08:11 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 11:08:11 2018 -0700

--
 .../src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cccadd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 2b3b529..eba4bee 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -527,7 +527,7 @@ public class DistCpUtils {
   /**
* Utility to compare checksums for the paths specified.
*
-   * If checksums's can't be retrieved, it doesn't fail the test
+   * If checksums can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with HdfsFileStatus as argument. Contributed by Lokesh Jain

2018-03-13 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 509d6b1fe -> 032f05ea9
  refs/heads/trunk 0355ec20e -> b2b9ce585


HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with 
HdfsFileStatus as argument. Contributed by Lokesh Jain


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2b9ce58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2b9ce58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2b9ce58

Branch: refs/heads/trunk
Commit: b2b9ce585984a1791a8af3e2287c75c75b95586f
Parents: 0355ec2
Author: Chris Douglas 
Authored: Tue Mar 13 09:43:22 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 09:43:22 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 +---
 .../protocol/SnapshottableDirectoryStatus.java  |  8 ++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 10 +---
 3 files changed, 10 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index cb05c75..264e3f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,32 +50,7 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED;
-
-/**
- * Generates an enum set of Flags from a set of attr flags.
- * @param attr Set of attr flags
- * @return EnumSet of Flags
- */
-public static EnumSet convert(Set attr) {
-  if (attr.isEmpty()) {
-return EnumSet.noneOf(Flags.class);
-  }
-  EnumSet flags = EnumSet.noneOf(Flags.class);
-  if (attr.contains(AttrFlags.HAS_ACL)) {
-flags.add(Flags.HAS_ACL);
-  }
-  if (attr.contains(AttrFlags.HAS_EC)) {
-flags.add(Flags.HAS_EC);
-  }
-  if (attr.contains(AttrFlags.HAS_CRYPT)) {
-flags.add(Flags.HAS_CRYPT);
-  }
-  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
-flags.add(Flags.SNAPSHOT_ENABLED);
-  }
-  return flags;
-}
+SNAPSHOT_ENABLED
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 6cdb2ee..0d35238 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -79,6 +79,14 @@ public class SnapshottableDirectoryStatus {
 this.parentFullPath = parentFullPath;
   }
 
+  public SnapshottableDirectoryStatus(HdfsFileStatus dirStatus,
+  int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+this.dirStatus = dirStatus;
+this.snapshotNumber = snapshotNumber;
+this.snapshotQuota = snapshotQuota;
+this.parentFullPath = parentFullPath;
+  }
+
   /**
* @return Number of snapshots that have been taken for the directory
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2b9ce58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index aa79dc4..13c5226 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -65,7 +65,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 class JsonUtilClient {
   static final DatanodeInfo[] 

[2/2] hadoop git commit: HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with HdfsFileStatus as argument. Contributed by Lokesh Jain

2018-03-13 Thread cdouglas
HDFS-13271. WebHDFS: Add constructor in SnapshottableDirectoryStatus with 
HdfsFileStatus as argument. Contributed by Lokesh Jain

(cherry picked from commit b2b9ce585984a1791a8af3e2287c75c75b95586f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/032f05ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/032f05ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/032f05ea

Branch: refs/heads/branch-3.1
Commit: 032f05ea90a3f34af78db411b5a4f4e46df80740
Parents: 509d6b1
Author: Chris Douglas 
Authored: Tue Mar 13 09:43:22 2018 -0700
Committer: Chris Douglas 
Committed: Tue Mar 13 09:44:30 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 27 +---
 .../protocol/SnapshottableDirectoryStatus.java  |  8 ++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 10 +---
 3 files changed, 10 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/032f05ea/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index cb05c75..264e3f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -50,32 +50,7 @@ public interface HdfsFileStatus
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
-SNAPSHOT_ENABLED;
-
-/**
- * Generates an enum set of Flags from a set of attr flags.
- * @param attr Set of attr flags
- * @return EnumSet of Flags
- */
-public static EnumSet convert(Set attr) {
-  if (attr.isEmpty()) {
-return EnumSet.noneOf(Flags.class);
-  }
-  EnumSet flags = EnumSet.noneOf(Flags.class);
-  if (attr.contains(AttrFlags.HAS_ACL)) {
-flags.add(Flags.HAS_ACL);
-  }
-  if (attr.contains(AttrFlags.HAS_EC)) {
-flags.add(Flags.HAS_EC);
-  }
-  if (attr.contains(AttrFlags.HAS_CRYPT)) {
-flags.add(Flags.HAS_CRYPT);
-  }
-  if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
-flags.add(Flags.SNAPSHOT_ENABLED);
-  }
-  return flags;
-}
+SNAPSHOT_ENABLED
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/032f05ea/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 6cdb2ee..0d35238 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -79,6 +79,14 @@ public class SnapshottableDirectoryStatus {
 this.parentFullPath = parentFullPath;
   }
 
+  public SnapshottableDirectoryStatus(HdfsFileStatus dirStatus,
+  int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+this.dirStatus = dirStatus;
+this.snapshotNumber = snapshotNumber;
+this.snapshotQuota = snapshotQuota;
+this.parentFullPath = parentFullPath;
+  }
+
   /**
* @return Number of snapshots that have been taken for the directory
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/032f05ea/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index aa79dc4..13c5226 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -65,7 +65,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 class JsonUtilClient {
   static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
@@ -772,15 +771,8 @@ class 

[3/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer

(cherry picked from commit 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b)
(cherry picked from commit 09940b1eb3b7ed764149f4a993c1857e9c6ad938)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a03c8ea6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a03c8ea6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a03c8ea6

Branch: refs/heads/branch-3.0
Commit: a03c8ea61f6e30a9d462571ace23858b6e0fd1c9
Parents: 430cdfe
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:06:22 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a03c8ea6/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 82cd558..b085e36 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -1015,6 +998,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a03c8ea6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 72c8d41..b6f49da 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -332,7 +332,7 @@ public abstract class GenericTestUtils {
   + StringUtils.stringifyException(t),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -482,18 +482,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
-
+
 private final CountDownLatch fireLatch = new CountDownLatch(1);
 private final CountDownLatch waitLatch = new 

[6/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer

(cherry picked from commit 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b)
(cherry picked from commit 09940b1eb3b7ed764149f4a993c1857e9c6ad938)
(cherry picked from commit a03c8ea61f6e30a9d462571ace23858b6e0fd1c9)
(cherry picked from commit 340cd5f1b137dccc033023ed92cc87b7e47f45c2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f12f540c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f12f540c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f12f540c

Branch: refs/heads/branch-2.9
Commit: f12f540c62b91552d25e9ca1e9a387992823f350
Parents: da27005
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:08:56 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f12f540c/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 06a9252..88c3b10 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -795,30 +795,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -831,6 +814,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f12f540c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index af47d29..2810213 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -287,7 +287,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -295,7 +295,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -333,7 +333,7 @@ public abstract class GenericTestUtils {
   + StringUtils.stringifyException(t),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -483,18 +483,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
- 

[1/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 38c21ec65 -> 340cd5f1b
  refs/heads/branch-2.9 da270052a -> f12f540c6
  refs/heads/branch-3.0 430cdfefc -> a03c8ea61
  refs/heads/branch-3.0.1 eaf7b0382 -> 3b922cdb0
  refs/heads/branch-3.1 c96673174 -> 09940b1eb
  refs/heads/trunk 19292bc26 -> 45d1b0fdc


HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d1b0fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d1b0fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d1b0fd

Branch: refs/heads/trunk
Commit: 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b
Parents: 19292bc
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:05:39 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 078a943..49d3575 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -1015,6 +998,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index cdde48c..61b0271 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -349,7 +349,7 @@ public abstract class GenericTestUtils {
   StringUtils.stringifyException(t)),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -499,18 +499,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements 

[5/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer

(cherry picked from commit 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b)
(cherry picked from commit 09940b1eb3b7ed764149f4a993c1857e9c6ad938)
(cherry picked from commit a03c8ea61f6e30a9d462571ace23858b6e0fd1c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/340cd5f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/340cd5f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/340cd5f1

Branch: refs/heads/branch-2
Commit: 340cd5f1b137dccc033023ed92cc87b7e47f45c2
Parents: 38c21ec
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:08:46 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/340cd5f1/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 8c9dffc..85bb5d3 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -795,30 +795,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -831,6 +814,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/340cd5f1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index af47d29..2810213 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -287,7 +287,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -295,7 +295,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -333,7 +333,7 @@ public abstract class GenericTestUtils {
   + StringUtils.stringifyException(t),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -483,18 +483,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
-
+
 private final CountDownLatch fireLatch = new 

[2/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer

(cherry picked from commit 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09940b1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09940b1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09940b1e

Branch: refs/heads/branch-3.1
Commit: 09940b1eb3b7ed764149f4a993c1857e9c6ad938
Parents: c966731
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:06:04 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09940b1e/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index aae040a..70f08c8 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -1015,6 +998,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09940b1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index cdde48c..61b0271 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -349,7 +349,7 @@ public abstract class GenericTestUtils {
   StringUtils.stringifyException(t)),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -499,18 +499,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
-
+
 private final CountDownLatch fireLatch = new CountDownLatch(1);
 private final CountDownLatch waitLatch = new CountDownLatch(1);
 private final CountDownLatch resultLatch = new 

[4/6] hadoop git commit: HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen Wittenauer

2018-03-12 Thread cdouglas
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen 
Wittenauer

(cherry picked from commit 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b)
(cherry picked from commit 09940b1eb3b7ed764149f4a993c1857e9c6ad938)
(cherry picked from commit a03c8ea61f6e30a9d462571ace23858b6e0fd1c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b922cdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b922cdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b922cdb

Branch: refs/heads/branch-3.0.1
Commit: 3b922cdb04e27e09c1b4843edafc896ee916d5e6
Parents: eaf7b03
Author: Chris Douglas 
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 20:06:39 2018 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |  26 +
 .../apache/hadoop/test/GenericTestUtils.java|  68 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  26 +
 .../plugin/paralleltests/CreateDirsMojo.java| 100 +++
 hadoop-tools/hadoop-aws/pom.xml |  26 +
 5 files changed, 161 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b922cdb/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 296ceec..dfdb34a 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
   
 
   
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
 
   
-create-parallel-tests-dirs
-test-compile
-
-  
-
-  
-
+parallel-tests-createdir
 
-  run
+  parallel-tests-createdir
 
   
 
@@ -1015,6 +998,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
+${testsThreadCount}
 
${test.build.data}/${surefire.forkNumber}
 
${test.build.dir}/${surefire.forkNumber}
 
${hadoop.tmp.dir}/${surefire.forkNumber}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b922cdb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 72c8d41..b6f49da 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
   public static void assertExists(File f) {
 Assert.assertTrue("File " + f + " should exist", f.exists());
   }
-
+
   /**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
   public static void assertGlobEquals(File dir, String pattern,
   String ... expectedMatches) throws IOException {
-
+
 Set found = Sets.newTreeSet();
 for (File f : FileUtil.listFiles(dir)) {
   if (f.getName().matches(pattern)) {
@@ -332,7 +332,7 @@ public abstract class GenericTestUtils {
   + StringUtils.stringifyException(t),
   t);
 }
-  }  
+  }
 
   /**
* Wait for the specified test to return true. The test will be performed
@@ -482,18 +482,18 @@ public abstract class GenericTestUtils {
*/
   public static class DelayAnswer implements Answer {
 private final Log LOG;
-
+
 private final CountDownLatch fireLatch = new 

[2/2] hadoop git commit: HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. Contributed by Takanobu Asanuma

2018-03-12 Thread cdouglas
HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. 
Contributed by Takanobu Asanuma

(cherry picked from commit 39a5fbae479ecee3a563e2f4eb937471fbf666f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75d3699a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75d3699a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75d3699a

Branch: refs/heads/branch-3.1
Commit: 75d3699a00731f6096b3524d879d663552cf9de7
Parents: c9364b3
Author: Chris Douglas 
Authored: Mon Mar 12 14:29:44 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 14:30:06 2018 -0700

--
 .../hadoop/hdfs/TestReconstructStripedFile.java | 48 +++
 ...econstructStripedFileWithRandomECPolicy.java | 49 
 2 files changed, 78 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75d3699a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 7201e11..1e93a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -67,14 +68,13 @@ import org.junit.Test;
 public class TestReconstructStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestReconstructStripedFile.class);
 
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlkNum = ecPolicy.getNumDataUnits();
-  private final int parityBlkNum = ecPolicy.getNumParityUnits();
-  private final int cellSize = ecPolicy.getCellSize();
-  private final int blockSize = cellSize * 3;
-  private final int groupSize = dataBlkNum + parityBlkNum;
-  private final int dnNum = groupSize + parityBlkNum;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlkNum;
+  private int parityBlkNum;
+  private int cellSize;
+  private int blockSize;
+  private int groupSize;
+  private int dnNum;
 
   static {
 GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
@@ -95,8 +95,20 @@ public class TestReconstructStripedFile {
   private Map dnMap = new HashMap<>();
   private final Random random = new Random();
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+dataBlkNum = ecPolicy.getNumDataUnits();
+parityBlkNum = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+blockSize = cellSize * 3;
+groupSize = dataBlkNum + parityBlkNum;
+dnNum = groupSize + parityBlkNum;
+
 conf = new Configuration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setInt(
@@ -114,10 +126,8 @@ public class TestReconstructStripedFile {
 cluster.waitActive();
 
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
-fs.getClient().setErasureCodingPolicy("/",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(ecPolicy.getName());
+fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
 
 List datanodes = cluster.getDataNodes();
 for (int i = 0; i < dnNum; i++) {
@@ -432,7 +442,7 @@ public class TestReconstructStripedFile {
 
 BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
 new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, 
liveIndices,
-StripedFileTestUtil.getDefaultECPolicy());
+ecPolicy);
 List ecTasks = new ArrayList<>();
 ecTasks.add(invalidECInfo);
 dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
@@ -461,7 +471,8 @@ public class TestReconstructStripedFile {
 .numDataNodes(numDataNodes).build();
 cluster.waitActive();
 fs = cluster.getFileSystem();
-ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
+ErasureCodingPolicy policy = ecPolicy;
+fs.enableErasureCodingPolicy(policy.getName());
 fs.getClient().setErasureCodingPolicy("/", 

[1/2] hadoop git commit: HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. Contributed by Takanobu Asanuma

2018-03-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c9364b3bc -> 75d3699a0
  refs/heads/trunk ddb67ca70 -> 39a5fbae4


HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. 
Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39a5fbae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39a5fbae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39a5fbae

Branch: refs/heads/trunk
Commit: 39a5fbae479ecee3a563e2f4eb937471fbf666f8
Parents: ddb67ca
Author: Chris Douglas 
Authored: Mon Mar 12 14:29:44 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 14:29:44 2018 -0700

--
 .../hadoop/hdfs/TestReconstructStripedFile.java | 48 +++
 ...econstructStripedFileWithRandomECPolicy.java | 49 
 2 files changed, 78 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39a5fbae/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 7201e11..1e93a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -67,14 +68,13 @@ import org.junit.Test;
 public class TestReconstructStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestReconstructStripedFile.class);
 
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlkNum = ecPolicy.getNumDataUnits();
-  private final int parityBlkNum = ecPolicy.getNumParityUnits();
-  private final int cellSize = ecPolicy.getCellSize();
-  private final int blockSize = cellSize * 3;
-  private final int groupSize = dataBlkNum + parityBlkNum;
-  private final int dnNum = groupSize + parityBlkNum;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlkNum;
+  private int parityBlkNum;
+  private int cellSize;
+  private int blockSize;
+  private int groupSize;
+  private int dnNum;
 
   static {
 GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
@@ -95,8 +95,20 @@ public class TestReconstructStripedFile {
   private Map dnMap = new HashMap<>();
   private final Random random = new Random();
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+dataBlkNum = ecPolicy.getNumDataUnits();
+parityBlkNum = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+blockSize = cellSize * 3;
+groupSize = dataBlkNum + parityBlkNum;
+dnNum = groupSize + parityBlkNum;
+
 conf = new Configuration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setInt(
@@ -114,10 +126,8 @@ public class TestReconstructStripedFile {
 cluster.waitActive();
 
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
-fs.getClient().setErasureCodingPolicy("/",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(ecPolicy.getName());
+fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
 
 List datanodes = cluster.getDataNodes();
 for (int i = 0; i < dnNum; i++) {
@@ -432,7 +442,7 @@ public class TestReconstructStripedFile {
 
 BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
 new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, 
liveIndices,
-StripedFileTestUtil.getDefaultECPolicy());
+ecPolicy);
 List ecTasks = new ArrayList<>();
 ecTasks.add(invalidECInfo);
 dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
@@ -461,7 +471,8 @@ public class TestReconstructStripedFile {
 .numDataNodes(numDataNodes).build();
 cluster.waitActive();
 fs = cluster.getFileSystem();
-ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
+ErasureCodingPolicy policy = ecPolicy;
+fs.enableErasureCodingPolicy(policy.getName());
  

[2/3] hadoop git commit: HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by Gera Shegalov

2018-03-12 Thread cdouglas
HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by 
Gera Shegalov

(cherry picked from commit ddb67ca707de896cd0ba5cda3c0d1a2d9edca968)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9364b3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9364b3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9364b3b

Branch: refs/heads/branch-3.1
Commit: c9364b3bce30946a6c0154974d0adc6accb3bea3
Parents: 2bda1ff
Author: Chris Douglas 
Authored: Mon Mar 12 13:42:38 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 13:47:01 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 +++
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9364b3b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 1008583..f851ef6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -180,6 +180,145 @@ Recall that one cannot rename files or directories across 
namenodes or clusters
 
 This will NOT work in the new world if `/user` and `/data` are actually stored 
on different namenodes within a cluster.
 
+Multi-Filesystem I/0 with Nfly Mount Points
+-
+
+HDFS and other distributed filesystems provide data resilience via some sort of
+redundancy such as block replication or more sophisticated distributed 
encoding.
+However, modern setups may be comprised of multiple Hadoop clusters, enterprise
+filers, hosted on and off premise. Nfly mount points make it possible for a
+single logical file to be synchronously replicated by multiple filesystems.
+It's designed for a relatively small files up to a gigabyte. In general it's a
+function of a single core/single network link performance since the logic
+resides in a single client JVM using ViewFs such as FsShell or a
+MapReduce task.
+
+### Basic Configuration
+
+Consider the following example to understand the basic configuration of Nfly.
+Suppose we want to keep the directory `ads` replicated on three filesystems
+represented by URIs: `uri1`, `uri2` and `uri3`.
+
+```xml
+  
+fs.viewfs.mounttable.global.linkNfly../ads
+uri1,uri2,uri3
+  
+```
+Note 2 consecutive `..` in the property name. They arise because of empty
+settings for advanced tweaking of the mount point which we will show in
+subsequent sections. The property value is a comma-separated list of URIs.
+
+URIs may point to different clusters in different regions
+`hdfs://datacenter-east/ads`, `s3a://models-us-west/ads`, 
`hdfs://datacenter-west/ads`
+or in the simplest case to different directories under the same filesystem,
+e.g., `file:/tmp/ads1`, `file:/tmp/ads2`, `file:/tmp/ads3`
+
+All *modifications* performed under the global path `viewfs://global/ads` are
+propagated to all destination URIs if the underlying system is available.
+
+For instance if we create a file via hadoop shell
+```bash
+hadoop fs -touchz viewfs://global/ads/z1
+```
+
+We will find it via local filesystem in the latter configuration
+```bash
+ls -al /tmp/ads*/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads1/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads2/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads3/z1
+```
+
+A read from the global path is processed by the first filesystem that does not
+result in an exception. The order in which filesystems are accessed depends on
+whether they are available at this moment or and whether a topological order
+exists.
+
+### Advanced Configuration
+
+Mount points `linkNfly` can be further configured using parameters passed as a
+comma-separated list of key=value pairs. Following parameters are currently
+supported.
+
+`minReplication=int` determines the minimum number of destinations that have to
+process a write modification without exceptions, if below nfly write is failed.
+It is an configuration error to have minReplication higher than the number of
+target URIs. The default is 2.
+
+If minReplication is lower than the number of target URIs we may have some
+target URIs without latest writes. It can be compensated by employing more
+expensive read operations controlled by the following settings
+
+`readMostRecent=boolean` if set to `true` causes Nfly client to check the path
+under all target URIs instead of just the first one based on the topology 
order.
+Among all available at the moment the one with the most recent modification 
time
+is processed.
+
+`repairOnRead=boolean` if set to `true` 

[1/3] hadoop git commit: HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by Gera Shegalov

2018-03-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 29cf29a85 -> 2210593da
  refs/heads/branch-3.1 2bda1ffe7 -> c9364b3bc
  refs/heads/trunk cceb68ffe -> ddb67ca70


HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by 
Gera Shegalov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddb67ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddb67ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddb67ca7

Branch: refs/heads/trunk
Commit: ddb67ca707de896cd0ba5cda3c0d1a2d9edca968
Parents: cceb68f
Author: Chris Douglas 
Authored: Mon Mar 12 13:42:38 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 13:43:27 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 +++
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddb67ca7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 1008583..f851ef6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -180,6 +180,145 @@ Recall that one cannot rename files or directories across 
namenodes or clusters
 
 This will NOT work in the new world if `/user` and `/data` are actually stored 
on different namenodes within a cluster.
 
+Multi-Filesystem I/0 with Nfly Mount Points
+-
+
+HDFS and other distributed filesystems provide data resilience via some sort of
+redundancy such as block replication or more sophisticated distributed 
encoding.
+However, modern setups may be comprised of multiple Hadoop clusters, enterprise
+filers, hosted on and off premise. Nfly mount points make it possible for a
+single logical file to be synchronously replicated by multiple filesystems.
+It's designed for a relatively small files up to a gigabyte. In general it's a
+function of a single core/single network link performance since the logic
+resides in a single client JVM using ViewFs such as FsShell or a
+MapReduce task.
+
+### Basic Configuration
+
+Consider the following example to understand the basic configuration of Nfly.
+Suppose we want to keep the directory `ads` replicated on three filesystems
+represented by URIs: `uri1`, `uri2` and `uri3`.
+
+```xml
+  
+fs.viewfs.mounttable.global.linkNfly../ads
+uri1,uri2,uri3
+  
+```
+Note 2 consecutive `..` in the property name. They arise because of empty
+settings for advanced tweaking of the mount point which we will show in
+subsequent sections. The property value is a comma-separated list of URIs.
+
+URIs may point to different clusters in different regions
+`hdfs://datacenter-east/ads`, `s3a://models-us-west/ads`, 
`hdfs://datacenter-west/ads`
+or in the simplest case to different directories under the same filesystem,
+e.g., `file:/tmp/ads1`, `file:/tmp/ads2`, `file:/tmp/ads3`
+
+All *modifications* performed under the global path `viewfs://global/ads` are
+propagated to all destination URIs if the underlying system is available.
+
+For instance if we create a file via hadoop shell
+```bash
+hadoop fs -touchz viewfs://global/ads/z1
+```
+
+We will find it via local filesystem in the latter configuration
+```bash
+ls -al /tmp/ads*/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads1/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads2/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads3/z1
+```
+
+A read from the global path is processed by the first filesystem that does not
+result in an exception. The order in which filesystems are accessed depends on
+whether they are available at this moment or and whether a topological order
+exists.
+
+### Advanced Configuration
+
+Mount points `linkNfly` can be further configured using parameters passed as a
+comma-separated list of key=value pairs. Following parameters are currently
+supported.
+
+`minReplication=int` determines the minimum number of destinations that have to
+process a write modification without exceptions, if below nfly write is failed.
+It is an configuration error to have minReplication higher than the number of
+target URIs. The default is 2.
+
+If minReplication is lower than the number of target URIs we may have some
+target URIs without latest writes. It can be compensated by employing more
+expensive read operations controlled by the following settings
+
+`readMostRecent=boolean` if set to `true` causes Nfly client to check the path
+under all target URIs instead of just the first one based on the topology 
order.
+Among all available at the moment the one 

[3/3] hadoop git commit: HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by Gera Shegalov

2018-03-12 Thread cdouglas
HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by 
Gera Shegalov

(cherry picked from commit ddb67ca707de896cd0ba5cda3c0d1a2d9edca968)
(cherry picked from commit c9364b3bce30946a6c0154974d0adc6accb3bea3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2210593d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2210593d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2210593d

Branch: refs/heads/branch-3.0
Commit: 2210593dadedaba85107a2d79bcf73c581d39b67
Parents: 29cf29a
Author: Chris Douglas 
Authored: Mon Mar 12 13:42:38 2018 -0700
Committer: Chris Douglas 
Committed: Mon Mar 12 13:55:10 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 +++
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2210593d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index e8b85f2..2a47887 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -184,6 +184,145 @@ Recall that one cannot rename files or directories across 
namenodes or clusters
 
 This will NOT work in the new world if `/user` and `/data` are actually stored 
on different namenodes within a cluster.
 
+Multi-Filesystem I/0 with Nfly Mount Points
+-
+
+HDFS and other distributed filesystems provide data resilience via some sort of
+redundancy such as block replication or more sophisticated distributed 
encoding.
+However, modern setups may be comprised of multiple Hadoop clusters, enterprise
+filers, hosted on and off premise. Nfly mount points make it possible for a
+single logical file to be synchronously replicated by multiple filesystems.
+It's designed for a relatively small files up to a gigabyte. In general it's a
+function of a single core/single network link performance since the logic
+resides in a single client JVM using ViewFs such as FsShell or a
+MapReduce task.
+
+### Basic Configuration
+
+Consider the following example to understand the basic configuration of Nfly.
+Suppose we want to keep the directory `ads` replicated on three filesystems
+represented by URIs: `uri1`, `uri2` and `uri3`.
+
+```xml
+  
+fs.viewfs.mounttable.global.linkNfly../ads
+uri1,uri2,uri3
+  
+```
+Note 2 consecutive `..` in the property name. They arise because of empty
+settings for advanced tweaking of the mount point which we will show in
+subsequent sections. The property value is a comma-separated list of URIs.
+
+URIs may point to different clusters in different regions
+`hdfs://datacenter-east/ads`, `s3a://models-us-west/ads`, 
`hdfs://datacenter-west/ads`
+or in the simplest case to different directories under the same filesystem,
+e.g., `file:/tmp/ads1`, `file:/tmp/ads2`, `file:/tmp/ads3`
+
+All *modifications* performed under the global path `viewfs://global/ads` are
+propagated to all destination URIs if the underlying system is available.
+
+For instance if we create a file via hadoop shell
+```bash
+hadoop fs -touchz viewfs://global/ads/z1
+```
+
+We will find it via local filesystem in the latter configuration
+```bash
+ls -al /tmp/ads*/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads1/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads2/z1
+-rw-r--r--  1 user  wheel  0 Mar 11 12:17 /tmp/ads3/z1
+```
+
+A read from the global path is processed by the first filesystem that does not
+result in an exception. The order in which filesystems are accessed depends on
+whether they are available at this moment or and whether a topological order
+exists.
+
+### Advanced Configuration
+
+Mount points `linkNfly` can be further configured using parameters passed as a
+comma-separated list of key=value pairs. Following parameters are currently
+supported.
+
+`minReplication=int` determines the minimum number of destinations that have to
+process a write modification without exceptions, if below nfly write is failed.
+It is an configuration error to have minReplication higher than the number of
+target URIs. The default is 2.
+
+If minReplication is lower than the number of target URIs we may have some
+target URIs without latest writes. It can be compensated by employing more
+expensive read operations controlled by the following settings
+
+`readMostRecent=boolean` if set to `true` causes Nfly client to check the path
+under all target URIs instead of just the first one based on the topology 
order.
+Among all available at the moment the one with the most recent 

[hadoop] Git Push Summary

2018-03-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/yarn-3409 [deleted] 84ebbfbfb

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-28 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4722cd9f3 -> 8e2e60731
  refs/heads/branch-2.9 0d7a3f48b -> 717295e46


HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)
(cherry picked from commit 07c7df4b261f23e567d58936b78aee4ab73cb5fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/717295e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/717295e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/717295e4

Branch: refs/heads/branch-2.9
Commit: 717295e464d0ccf21dde9584bfd3e1d67f3f4860
Parents: 0d7a3f4
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Chris Douglas 
Committed: Wed Feb 28 14:54:17 2018 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/717295e4/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/717295e4/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 0e141e3..4488898 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -38,6 +40,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -75,6 +79,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -116,12 +122,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  

[2/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-28 Thread cdouglas
HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)
(cherry picked from commit 07c7df4b261f23e567d58936b78aee4ab73cb5fc)
(cherry picked from commit 717295e464d0ccf21dde9584bfd3e1d67f3f4860)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e2e6073
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e2e6073
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e2e6073

Branch: refs/heads/branch-2.8
Commit: 8e2e6073198d223e221735751fa1c76508f2d02b
Parents: 4722cd9
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Chris Douglas 
Committed: Wed Feb 28 14:54:57 2018 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e2e6073/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e2e6073/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 0e141e3..4488898 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -38,6 +40,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -75,6 +79,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -116,12 +122,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones

[2/2] hadoop git commit: Backport HADOOP-13514 (surefire upgrade) to branch-2

2018-02-26 Thread cdouglas
Backport HADOOP-13514 (surefire upgrade) to branch-2

(cherry picked from commit 762125b864ab812512bad9a59344ca79af7f43ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6024b3a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6024b3a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6024b3a2

Branch: refs/heads/branch-2.9
Commit: 6024b3a2add1247b7b6e82d50f5267a8772e14e3
Parents: 39e1f96
Author: Chris Douglas 
Authored: Mon Feb 26 16:32:06 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 26 16:32:19 2018 -0800

--
 BUILDING.txt  | 4 ++--
 hadoop-project/pom.xml| 5 +++--
 hadoop-tools/hadoop-aws/pom.xml   | 2 ++
 hadoop-tools/hadoop-azure/pom.xml | 3 +++
 4 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9c1fbd6..e7701a5 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -267,10 +267,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 and 512 MB of heap space to
+Here is an example setting to allocate between 256 MB and 1 GB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx512m"
+export MAVEN_OPTS="-Xms256m -Xmx1g"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6441dc9..9f80749 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -111,7 +111,7 @@
 
 
 -Xmx2048m -XX:MaxPermSize=768m 
-XX:+HeapDumpOnOutOfMemoryError
-2.17
+2.20.1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1509,6 +1509,7 @@
 
${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
 4
   
+  false
   
 
 ${project.build.directory}/log
@@ -1519,7 +1520,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${test.build.classes}
+
${project.build.directory}/test-classes
 
 true
 
${project.build.directory}/test-classes/krb5.conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 9343af2..920f985 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -153,6 +153,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 true
@@ -209,6 +210,7 @@
 
 
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index 81ea1ff..8b8a584 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -326,6 +326,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.azure.scale.test.timeout}
+  false
   
 
 true
@@ -381,6 +382,7 @@
 
 
   
${fs.azure.scale.test.timeout}
+  false
   
 false
 
${fs.azure.scale.test.enabled}
@@ -431,6 +433,7 @@
 
${fs.azure.scale.test.timeout}
   
   
${fs.azure.scale.test.timeout}
+  false
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[1/2] hadoop git commit: Backport HADOOP-13514 (surefire upgrade) to branch-2

2018-02-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 54803ebe4 -> 762125b86
  refs/heads/branch-2.9 39e1f963b -> 6024b3a2a


Backport HADOOP-13514 (surefire upgrade) to branch-2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/762125b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/762125b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/762125b8

Branch: refs/heads/branch-2
Commit: 762125b864ab812512bad9a59344ca79af7f43ac
Parents: 54803eb
Author: Chris Douglas 
Authored: Mon Feb 26 16:32:06 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 26 16:32:06 2018 -0800

--
 BUILDING.txt  | 4 ++--
 hadoop-project/pom.xml| 5 +++--
 hadoop-tools/hadoop-aws/pom.xml   | 2 ++
 hadoop-tools/hadoop-azure/pom.xml | 3 +++
 4 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9c1fbd6..e7701a5 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -267,10 +267,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 and 512 MB of heap space to
+Here is an example setting to allocate between 256 MB and 1 GB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx512m"
+export MAVEN_OPTS="-Xms256m -Xmx1g"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6ba1ced..f560088 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -111,7 +111,7 @@
 
 
 -Xmx2048m -XX:MaxPermSize=768m 
-XX:+HeapDumpOnOutOfMemoryError
-2.17
+2.20.1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1521,6 +1521,7 @@
 
${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
 4
   
+  false
   
 
 ${project.build.directory}/log
@@ -1531,7 +1532,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${test.build.classes}
+
${project.build.directory}/test-classes
 
 true
 
${project.build.directory}/test-classes/krb5.conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index d8b54c4..659af93 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -153,6 +153,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 true
@@ -209,6 +210,7 @@
 
 
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index 244f432..c9325ff 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -326,6 +326,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.azure.scale.test.timeout}
+  false
   
 
 true
@@ -381,6 +382,7 @@
 
 
   
${fs.azure.scale.test.timeout}
+  false
   
 false
 
${fs.azure.scale.test.enabled}
@@ -431,6 +433,7 @@
 
${fs.azure.scale.test.timeout}
   
   
${fs.azure.scale.test.timeout}
+  false
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org

[hadoop] Git Push Summary

2018-02-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 [deleted] 692260754

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13374. Add the L verification script. Contributed by Allen Wittenauer

2018-02-23 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 033f9c68e -> 329a4fdd0


HADOOP-13374. Add the L verification script. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/329a4fdd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/329a4fdd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/329a4fdd

Branch: refs/heads/trunk
Commit: 329a4fdd07ab007615f34c8e0e651360f988064d
Parents: 033f9c6
Author: Chris Douglas 
Authored: Fri Feb 23 17:07:22 2018 -0800
Committer: Chris Douglas 
Committed: Fri Feb 23 17:07:22 2018 -0800

--
 dev-support/bin/verify-license-files | 145 ++
 1 file changed, 145 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/329a4fdd/dev-support/bin/verify-license-files
--
diff --git a/dev-support/bin/verify-license-files 
b/dev-support/bin/verify-license-files
new file mode 100755
index 000..1fd70a6
--- /dev/null
+++ b/dev-support/bin/verify-license-files
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+## @description  check a file
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @paramfilename
+## @paramjarfile
+## @return   0 = destroy verify dir
+## @return   1 = keep verify dir
+function process_file
+{
+  declare check=$1
+  declare fqfn=$2
+  declare fn
+  declare keepdir
+  declare tf
+  declare count
+  declare valid
+
+  fn=$(basename "${fqfn}")
+  keepdir=false
+  valid=0
+  count=0
+
+  unzip -o -d "${WORK_DIR}/${fn}" "${fqfn}" '*'"${check}"'*' >/dev/null 2>&1
+
+  while read -r tf; do
+((count = count + 1))
+if diff -q "${DIST_DIR}/${check}.txt" "${tf}" >/dev/null 2>&1; then
+  ((valid = valid + 1))
+fi
+  done < <(find "${WORK_DIR}/${fn}" -name "${check}"'*')
+
+  if [[ "${count}" -eq 0 ]]; then
+hadoop_error "ERROR: ${fn}: Missing a ${check} file"
+  elif [[ "${count}" -gt 1 ]]; then
+hadoop_error "WARNING: ${fn}: Found ${count} ${check} files (${valid} were 
valid)"
+keepdir=true
+  fi
+
+  if [[ "${valid}" -eq 0 ]] && [[ "${count}" -gt 0 ]]; then
+  hadoop_error "ERROR: ${fn}: No valid ${check} found"
+  keepdir=true
+  fi
+
+  if [[ "${keepdir}" = "false" ]]; then
+return 0
+  else
+return 1
+  fi
+}
+
+
+## @description  check a jar
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @paramjarfile
+## @return   0 - success
+## @return   1 - errors
+function process_jar
+{
+  declare fqfn=$1
+  declare fn
+  declare keepwork
+
+  fn=$(basename "${fqfn}")
+  keepwork=false
+
+  if [[ ! ${fn} =~ hadoop-.*-${PROJ_VERSION} ]]; then
+return
+  fi
+
+  mkdir -p "${WORK_DIR}/${fn}"
+
+  if ! process_file LICENSE "${fqfn}"; then
+keepwork=true
+  fi
+
+  if ! process_file NOTICE "${fqfn}"; then
+keepwork=true
+  fi
+
+  if [[ "${keepwork}" = "false" ]]; then
+rm -rf "${WORK_DIR:?}/${fn}"
+return 0
+  else
+hadoop_error ""
+return 1
+  fi
+}
+
+
+MYNAME="${BASH_SOURCE-$0}"
+#shellcheck disable=SC2034
+HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
+BINDIR=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
+#shellcheck disable=SC2034
+HADOOP_LIBEXEC_DIR="${BINDIR}/../../hadoop-common-project/hadoop-common/src/main/bin"
+
+#shellcheck disable=SC1090
+. "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
+
+HADOOP_LIBEXEC_DIR=$(hadoop_abs "${HADOOP_LIBEXEC_DIR}")
+BINDIR=$(hadoop_abs "${BINDIR}")
+BASEDIR=$(hadoop_abs "${BINDIR}/../..")
+
+pushd "${BASEDIR}" >/dev/null
+#shellcheck disable=SC2016
+PROJ_VERSION=$(mvn -q -Dexec.executable="echo" 
-Dexec.args='${project.version}' --non-recursive exec:exec)
+popd >/dev/null
+
+DIST_DIR="${BASEDIR}/hadoop-dist/target/hadoop-${PROJ_VERSION}"
+WORK_DIR="${BASEDIR}/patchprocess/verify"
+
+rm -rf "${WORK_DIR:?}"
+mkdir -p "${WORK_DIR}"
+
+while read -r filename; do
+  process_jar "${filename}"
+  

[1/4] hadoop git commit: Revert "HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

2018-02-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d51e4affb -> 08053c4ea
  refs/heads/trunk 5b88cb339 -> 0c5d7d71a


Revert "HADOOP-15195. With SELinux enabled, directories mounted with 
start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

This reverts commit 5b88cb339898f82519223bcd07e1caedff02d051.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cc6d1df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cc6d1df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cc6d1df

Branch: refs/heads/trunk
Commit: 9cc6d1dfb351f505aaa8f9f028068650b3b00d0d
Parents: 5b88cb3
Author: Chris Douglas 
Authored: Mon Feb 12 21:06:10 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:06:10 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 ---
 start-build-env.sh  |  32 +-
 2 files changed, 3 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cc6d1df/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
deleted file mode 100644
index 0c32bcf..000
--- a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-# Mock docker command
-docker () {
-  if [ "$1" = "-v" ]; then
-shift
-echo Docker version ${DCKR_MOCK_VER:?}
-  elif [ "$1" = run ]; then
-shift
-until [ $# -eq 0 ]; do
-  if [ "$1" = -v ]; then
-shift
-echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
-  printf "Mounted %s with %s option.\n", $1, $3
-  else if (NF == 2)
-  printf "Mounted %s without %s option.\n", $1, "z"}'
-  fi
-  shift
-done
-  fi
-}
-export -f docker
-export DCKR_MOCK_VER
-
-# Mock a SELinux enabled system
-enable_selinux () {
-  mkdir -p "${TMP}/bin"
-  echo true >"${TMP}/bin"/selinuxenabled
-  chmod a+x "${TMP}/bin"/selinuxenabled
-  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
-PATH="${TMP}/bin":"$PATH"
-  fi
-}
-
-setup_user () {
-  if [ -z "$(printenv USER)" ]; then
-if [ -z "$USER" ]; then
-  USER=${HOME##*/}
-fi
-export USER
-  fi
-}
-
-# Mock stat command as used in start-build-env.sh
-stat () {
-  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
-printf 'mock_u:mock_r:mock_t:s0'
-  else
-command stat "$@"
-  fi
-}
-export -f stat
-
-# Verify that host directories get mounted without z option
-# and INFO messages get printed out
-@test "start-build-env.sh (Docker without z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.4
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
-  [[ ${lines[1]} =~ \
- "Mounted ".*" may not be accessible to the container." ]]
-  [[ ${lines[2]} == \
- "INFO: If so, on the host, run the following command:" ]]
-  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
-  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
-}
-
-# Verify that host directories get mounted with z option
-@test "start-build-env.sh (Docker with z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.7
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
-}


[3/4] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-12 Thread cdouglas
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c5d7d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c5d7d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c5d7d71

Branch: refs/heads/trunk
Commit: 0c5d7d71a80bccd4ad7eab269d0727b999606a7e
Parents: 9cc6d1d
Author: Chris Douglas 
Authored: Mon Feb 12 21:07:15 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:07:15 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..dbb14ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with z option.\n", $1
+  else if (NF == 2)
+  printf "Mounted %s without z option.\n", $1}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux is enabled." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index 5a18151..4da55af 100755
--- 

[2/4] hadoop git commit: Revert "HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

2018-02-12 Thread cdouglas
Revert "HADOOP-15195. With SELinux enabled, directories mounted with 
start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

This reverts commit d51e4affbacad720793a42794aa16343a4e36a66.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f6b1894
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f6b1894
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f6b1894

Branch: refs/heads/branch-3.1
Commit: 1f6b1894fcfe2f83a01282a9f2d7a5b327db2182
Parents: d51e4af
Author: Chris Douglas 
Authored: Mon Feb 12 21:06:21 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:06:21 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 ---
 start-build-env.sh  |  32 +-
 2 files changed, 3 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f6b1894/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
deleted file mode 100644
index 0c32bcf..000
--- a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-# Mock docker command
-docker () {
-  if [ "$1" = "-v" ]; then
-shift
-echo Docker version ${DCKR_MOCK_VER:?}
-  elif [ "$1" = run ]; then
-shift
-until [ $# -eq 0 ]; do
-  if [ "$1" = -v ]; then
-shift
-echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
-  printf "Mounted %s with %s option.\n", $1, $3
-  else if (NF == 2)
-  printf "Mounted %s without %s option.\n", $1, "z"}'
-  fi
-  shift
-done
-  fi
-}
-export -f docker
-export DCKR_MOCK_VER
-
-# Mock a SELinux enabled system
-enable_selinux () {
-  mkdir -p "${TMP}/bin"
-  echo true >"${TMP}/bin"/selinuxenabled
-  chmod a+x "${TMP}/bin"/selinuxenabled
-  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
-PATH="${TMP}/bin":"$PATH"
-  fi
-}
-
-setup_user () {
-  if [ -z "$(printenv USER)" ]; then
-if [ -z "$USER" ]; then
-  USER=${HOME##*/}
-fi
-export USER
-  fi
-}
-
-# Mock stat command as used in start-build-env.sh
-stat () {
-  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
-printf 'mock_u:mock_r:mock_t:s0'
-  else
-command stat "$@"
-  fi
-}
-export -f stat
-
-# Verify that host directories get mounted without z option
-# and INFO messages get printed out
-@test "start-build-env.sh (Docker without z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.4
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
-  [[ ${lines[1]} =~ \
- "Mounted ".*" may not be accessible to the container." ]]
-  [[ ${lines[2]} == \
- "INFO: If so, on the host, run the following command:" ]]
-  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
-  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
-}
-
-# Verify that host directories get mounted with z option
-@test "start-build-env.sh (Docker with z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.7
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f6b1894/start-build-env.sh

[4/4] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-12 Thread cdouglas
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine

(cherry picked from commit 0c5d7d71a80bccd4ad7eab269d0727b999606a7e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08053c4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08053c4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08053c4e

Branch: refs/heads/branch-3.1
Commit: 08053c4ead2f6026a5eb937df560db251123633c
Parents: 1f6b189
Author: Chris Douglas 
Authored: Mon Feb 12 21:07:15 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:07:25 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08053c4e/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..dbb14ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with z option.\n", $1
+  else if (NF == 2)
+  printf "Mounted %s without z option.\n", $1}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux is enabled." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08053c4e/start-build-env.sh
--
diff --git 

[1/2] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-12 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5f7ef73b7 -> d51e4affb
  refs/heads/trunk 5a1db60ab -> 5b88cb339


HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b88cb33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b88cb33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b88cb33

Branch: refs/heads/trunk
Commit: 5b88cb339898f82519223bcd07e1caedff02d051
Parents: 5a1db60
Author: Chris Douglas 
Authored: Mon Feb 12 21:00:47 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:00:47 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..0c32bcf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with %s option.\n", $1, $3
+  else if (NF == 2)
+  printf "Mounted %s without %s option.\n", $1, "z"}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/start-build-env.sh

[2/2] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-12 Thread cdouglas
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine

(cherry picked from commit 5b88cb339898f82519223bcd07e1caedff02d051)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d51e4aff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d51e4aff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d51e4aff

Branch: refs/heads/branch-3.1
Commit: d51e4affbacad720793a42794aa16343a4e36a66
Parents: 5f7ef73
Author: Chris Douglas 
Authored: Mon Feb 12 21:00:47 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:03:00 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d51e4aff/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..0c32bcf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with %s option.\n", $1, $3
+  else if (NF == 2)
+  printf "Mounted %s without %s option.\n", $1, "z"}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d51e4aff/start-build-env.sh
--
diff 

[2/2] hadoop git commit: YARN-6868. Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)

2018-02-05 Thread cdouglas
YARN-6868. Add test scope to certain entries in 
hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)

(cherry picked from commit a20e7105ea9d4e38d7f8f9fd48035e342bb22f1c)
(cherry picked from commit c25c082961a837b94016dcf0250bdc6d5f54e915)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e78a7016
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e78a7016
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e78a7016

Branch: refs/heads/branch-2.9
Commit: e78a701687f61e3eaad428899ad0b0f0d72b87dd
Parents: 1ed6ae7
Author: Haibo Chen 
Authored: Wed Aug 30 09:13:49 2017 -0700
Committer: Chris Douglas 
Committed: Mon Feb 5 10:51:39 2018 -0800

--
 .../hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78a7016/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 02d2a5f..b7fec1b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -196,6 +196,7 @@
 
   org.apache.curator
   curator-test
+  test
 
 
   org.apache.zookeeper
@@ -205,9 +206,11 @@
   org.fusesource.leveldbjni
   leveldbjni-all
 
+
 
   org.apache.zookeeper
   zookeeper
+  test
   test-jar
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-6868. Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)

2018-02-05 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6dfea0504 -> c25c08296
  refs/heads/branch-2.9 1ed6ae7b1 -> e78a70168


YARN-6868. Add test scope to certain entries in 
hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)

(cherry picked from commit a20e7105ea9d4e38d7f8f9fd48035e342bb22f1c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c25c0829
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c25c0829
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c25c0829

Branch: refs/heads/branch-2
Commit: c25c082961a837b94016dcf0250bdc6d5f54e915
Parents: 6dfea05
Author: Haibo Chen 
Authored: Wed Aug 30 09:13:49 2017 -0700
Committer: Chris Douglas 
Committed: Mon Feb 5 10:51:03 2018 -0800

--
 .../hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c25c0829/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index ea71435..43a6295 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -196,6 +196,7 @@
 
   org.apache.curator
   curator-test
+  test
 
 
   org.apache.zookeeper
@@ -205,9 +206,11 @@
   org.fusesource.leveldbjni
   leveldbjni-all
 
+
 
   org.apache.zookeeper
   zookeeper
+  test
   test-jar
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MapFile.fix creates a wrong index file in case of block-compressed data file. Contributed by Grigori Rybkine

2018-01-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8b5b045bd -> 56872cff9


MapFile.fix creates a wrong index file in case of block-compressed data file. 
Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56872cff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56872cff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56872cff

Branch: refs/heads/trunk
Commit: 56872cff92f543bf77206a1324968559dceb7bc2
Parents: 8b5b045
Author: Chris Douglas 
Authored: Fri Jan 26 09:06:48 2018 -0800
Committer: Chris Douglas 
Committed: Fri Jan 26 09:18:30 2018 -0800

--
 .../main/java/org/apache/hadoop/io/MapFile.java | 35 ++--
 .../java/org/apache/hadoop/io/TestMapFile.java  | 59 +++-
 2 files changed, 88 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56872cff/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index d56822f..51db0b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -811,15 +811,40 @@ public class MapFile {
 (LongWritable.class));
 }
 try {
-  long pos = 0L;
+  /** What's the position (in bytes) we wrote when we got the last index */
+  long lastIndexPos = -1;
+  /**
+   * What was size when we last wrote an index. Set to MIN_VALUE to ensure
+   * that we have an index at position zero - midKey will throw an 
exception
+   * if this is not the case
+   */
+  long lastIndexKeyCount = Long.MIN_VALUE;
+  long pos = dataReader.getPosition();
   LongWritable position = new LongWritable();
+  long nextBlock = pos;
+  boolean blockCompressed = dataReader.isBlockCompressed();
   while(dataReader.next(key, value)) {
-cnt++;
-if (cnt % indexInterval == 0) {
+if (blockCompressed) {
+  long curPos = dataReader.getPosition();
+  if (curPos > nextBlock) {
+pos = nextBlock;   // current block position
+nextBlock = curPos;
+  }
+}
+// Follow the same logic as in
+// {@link MapFile.Writer#append(WritableComparable, Writable)}
+if (cnt >= lastIndexKeyCount + indexInterval && pos > lastIndexPos) {
   position.set(pos);
-  if (!dryrun) indexWriter.append(key, position);
+  if (!dryrun) {
+indexWriter.append(key, position);
+  }
+  lastIndexPos = pos;
+  lastIndexKeyCount = cnt;
+}
+if (!blockCompressed) {
+  pos = dataReader.getPosition(); // next record position
 }
-pos = dataReader.getPosition();
+cnt++;
   }
 } catch(Throwable t) {
   // truncated data file. swallow it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56872cff/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
index ff8df7c..7ec4227 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
@@ -485,6 +485,63 @@ public class TestMapFile {
   IOUtils.cleanup(null, writer);
 }
   }
+
+  /**
+   * test {@link MapFile#fix(FileSystem, Path, Class,
+   * Class, boolean, 
Configuration)}
+   * method in case of BLOCK compression
+   */
+  @Test
+  public void testFixBlockCompress() throws Exception {
+final String indexLessMapFile = "testFixBlockCompress.mapfile";
+final int compressBlocksize = 100;
+final int indexInterval = 4;
+final int noBlocks = 4;
+final String value = "value-";
+final int size = noBlocks * compressBlocksize / (4 + value.length());
+
+conf.setInt("io.seqfile.compress.blocksize", compressBlocksize);
+MapFile.Writer.setIndexInterval(conf, indexInterval);
+FileSystem fs = FileSystem.getLocal(conf);
+Path dir = new Path(TEST_DIR, indexLessMapFile);
+MapFile.Writer writer = null;
+MapFile.Reader reader = 

[1/3] hadoop git commit: MapFile.fix creates a wrong index file in case of block-compressed data file. Contributed by Grigori Rybkine

2018-01-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5f44bd3eb -> 9bd439e2c
  refs/heads/branch-2.9 98499bb09 -> e062e2b08
  refs/heads/branch-3.0 e6c66baba -> 7f3548778


MapFile.fix creates a wrong index file in case of block-compressed data file. 
Contributed by Grigori Rybkine

(cherry picked from commit 91db424c4360d7556660e8c57ac9a266e6688e01)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f354877
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f354877
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f354877

Branch: refs/heads/branch-3.0
Commit: 7f354877889b343878a8a09792d5cec8d2846a50
Parents: e6c66ba
Author: Chris Douglas 
Authored: Fri Jan 26 09:06:48 2018 -0800
Committer: Chris Douglas 
Committed: Fri Jan 26 09:15:03 2018 -0800

--
 .../main/java/org/apache/hadoop/io/MapFile.java | 35 ++--
 .../java/org/apache/hadoop/io/TestMapFile.java  | 59 +++-
 2 files changed, 88 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f354877/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index d56822f..51db0b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -811,15 +811,40 @@ public class MapFile {
 (LongWritable.class));
 }
 try {
-  long pos = 0L;
+  /** What's the position (in bytes) we wrote when we got the last index */
+  long lastIndexPos = -1;
+  /**
+   * What was size when we last wrote an index. Set to MIN_VALUE to ensure
+   * that we have an index at position zero - midKey will throw an 
exception
+   * if this is not the case
+   */
+  long lastIndexKeyCount = Long.MIN_VALUE;
+  long pos = dataReader.getPosition();
   LongWritable position = new LongWritable();
+  long nextBlock = pos;
+  boolean blockCompressed = dataReader.isBlockCompressed();
   while(dataReader.next(key, value)) {
-cnt++;
-if (cnt % indexInterval == 0) {
+if (blockCompressed) {
+  long curPos = dataReader.getPosition();
+  if (curPos > nextBlock) {
+pos = nextBlock;   // current block position
+nextBlock = curPos;
+  }
+}
+// Follow the same logic as in
+// {@link MapFile.Writer#append(WritableComparable, Writable)}
+if (cnt >= lastIndexKeyCount + indexInterval && pos > lastIndexPos) {
   position.set(pos);
-  if (!dryrun) indexWriter.append(key, position);
+  if (!dryrun) {
+indexWriter.append(key, position);
+  }
+  lastIndexPos = pos;
+  lastIndexKeyCount = cnt;
+}
+if (!blockCompressed) {
+  pos = dataReader.getPosition(); // next record position
 }
-pos = dataReader.getPosition();
+cnt++;
   }
 } catch(Throwable t) {
   // truncated data file. swallow it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f354877/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
index ff8df7c..7ec4227 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
@@ -485,6 +485,63 @@ public class TestMapFile {
   IOUtils.cleanup(null, writer);
 }
   }
+
+  /**
+   * test {@link MapFile#fix(FileSystem, Path, Class,
+   * Class, boolean, 
Configuration)}
+   * method in case of BLOCK compression
+   */
+  @Test
+  public void testFixBlockCompress() throws Exception {
+final String indexLessMapFile = "testFixBlockCompress.mapfile";
+final int compressBlocksize = 100;
+final int indexInterval = 4;
+final int noBlocks = 4;
+final String value = "value-";
+final int size = noBlocks * compressBlocksize / (4 + value.length());
+
+conf.setInt("io.seqfile.compress.blocksize", compressBlocksize);
+MapFile.Writer.setIndexInterval(conf, 

[3/3] hadoop git commit: MapFile.fix creates a wrong index file in case of block-compressed data file. Contributed by Grigori Rybkine

2018-01-26 Thread cdouglas
MapFile.fix creates a wrong index file in case of block-compressed data file. 
Contributed by Grigori Rybkine

(cherry picked from commit 91db424c4360d7556660e8c57ac9a266e6688e01)
(cherry picked from commit 7f354877889b343878a8a09792d5cec8d2846a50)
(cherry picked from commit 9bd439e2c535b95ff0d2b5767b05a7ef43479298)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e062e2b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e062e2b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e062e2b0

Branch: refs/heads/branch-2.9
Commit: e062e2b08c56adb1fa7965a7de543810df6f5a91
Parents: 98499bb
Author: Chris Douglas 
Authored: Fri Jan 26 09:06:48 2018 -0800
Committer: Chris Douglas 
Committed: Fri Jan 26 09:16:08 2018 -0800

--
 .../main/java/org/apache/hadoop/io/MapFile.java | 35 ++--
 .../java/org/apache/hadoop/io/TestMapFile.java  | 59 +++-
 2 files changed, 88 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e062e2b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index f9e0145..8373e01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -811,15 +811,40 @@ public class MapFile {
 (LongWritable.class));
 }
 try {
-  long pos = 0L;
+  /** What's the position (in bytes) we wrote when we got the last index */
+  long lastIndexPos = -1;
+  /**
+   * What was size when we last wrote an index. Set to MIN_VALUE to ensure
+   * that we have an index at position zero - midKey will throw an 
exception
+   * if this is not the case
+   */
+  long lastIndexKeyCount = Long.MIN_VALUE;
+  long pos = dataReader.getPosition();
   LongWritable position = new LongWritable();
+  long nextBlock = pos;
+  boolean blockCompressed = dataReader.isBlockCompressed();
   while(dataReader.next(key, value)) {
-cnt++;
-if (cnt % indexInterval == 0) {
+if (blockCompressed) {
+  long curPos = dataReader.getPosition();
+  if (curPos > nextBlock) {
+pos = nextBlock;   // current block position
+nextBlock = curPos;
+  }
+}
+// Follow the same logic as in
+// {@link MapFile.Writer#append(WritableComparable, Writable)}
+if (cnt >= lastIndexKeyCount + indexInterval && pos > lastIndexPos) {
   position.set(pos);
-  if (!dryrun) indexWriter.append(key, position);
+  if (!dryrun) {
+indexWriter.append(key, position);
+  }
+  lastIndexPos = pos;
+  lastIndexKeyCount = cnt;
+}
+if (!blockCompressed) {
+  pos = dataReader.getPosition(); // next record position
 }
-pos = dataReader.getPosition();
+cnt++;
   }
 } catch(Throwable t) {
   // truncated data file. swallow it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e062e2b0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
index ff8df7c..7ec4227 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
@@ -485,6 +485,63 @@ public class TestMapFile {
   IOUtils.cleanup(null, writer);
 }
   }
+
+  /**
+   * test {@link MapFile#fix(FileSystem, Path, Class,
+   * Class, boolean, 
Configuration)}
+   * method in case of BLOCK compression
+   */
+  @Test
+  public void testFixBlockCompress() throws Exception {
+final String indexLessMapFile = "testFixBlockCompress.mapfile";
+final int compressBlocksize = 100;
+final int indexInterval = 4;
+final int noBlocks = 4;
+final String value = "value-";
+final int size = noBlocks * compressBlocksize / (4 + value.length());
+
+conf.setInt("io.seqfile.compress.blocksize", compressBlocksize);
+MapFile.Writer.setIndexInterval(conf, indexInterval);
+FileSystem fs = 

[2/3] hadoop git commit: MapFile.fix creates a wrong index file in case of block-compressed data file. Contributed by Grigori Rybkine

2018-01-26 Thread cdouglas
MapFile.fix creates a wrong index file in case of block-compressed data file. 
Contributed by Grigori Rybkine

(cherry picked from commit 91db424c4360d7556660e8c57ac9a266e6688e01)
(cherry picked from commit 7f354877889b343878a8a09792d5cec8d2846a50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bd439e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bd439e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bd439e2

Branch: refs/heads/branch-2
Commit: 9bd439e2c535b95ff0d2b5767b05a7ef43479298
Parents: 5f44bd3
Author: Chris Douglas 
Authored: Fri Jan 26 09:06:48 2018 -0800
Committer: Chris Douglas 
Committed: Fri Jan 26 09:15:53 2018 -0800

--
 .../main/java/org/apache/hadoop/io/MapFile.java | 35 ++--
 .../java/org/apache/hadoop/io/TestMapFile.java  | 59 +++-
 2 files changed, 88 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bd439e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index f9e0145..8373e01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -811,15 +811,40 @@ public class MapFile {
 (LongWritable.class));
 }
 try {
-  long pos = 0L;
+  /** What's the position (in bytes) we wrote when we got the last index */
+  long lastIndexPos = -1;
+  /**
+   * What was size when we last wrote an index. Set to MIN_VALUE to ensure
+   * that we have an index at position zero - midKey will throw an 
exception
+   * if this is not the case
+   */
+  long lastIndexKeyCount = Long.MIN_VALUE;
+  long pos = dataReader.getPosition();
   LongWritable position = new LongWritable();
+  long nextBlock = pos;
+  boolean blockCompressed = dataReader.isBlockCompressed();
   while(dataReader.next(key, value)) {
-cnt++;
-if (cnt % indexInterval == 0) {
+if (blockCompressed) {
+  long curPos = dataReader.getPosition();
+  if (curPos > nextBlock) {
+pos = nextBlock;   // current block position
+nextBlock = curPos;
+  }
+}
+// Follow the same logic as in
+// {@link MapFile.Writer#append(WritableComparable, Writable)}
+if (cnt >= lastIndexKeyCount + indexInterval && pos > lastIndexPos) {
   position.set(pos);
-  if (!dryrun) indexWriter.append(key, position);
+  if (!dryrun) {
+indexWriter.append(key, position);
+  }
+  lastIndexPos = pos;
+  lastIndexKeyCount = cnt;
+}
+if (!blockCompressed) {
+  pos = dataReader.getPosition(); // next record position
 }
-pos = dataReader.getPosition();
+cnt++;
   }
 } catch(Throwable t) {
   // truncated data file. swallow it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bd439e2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
index ff8df7c..7ec4227 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
@@ -485,6 +485,63 @@ public class TestMapFile {
   IOUtils.cleanup(null, writer);
 }
   }
+
+  /**
+   * test {@link MapFile#fix(FileSystem, Path, Class,
+   * Class, boolean, 
Configuration)}
+   * method in case of BLOCK compression
+   */
+  @Test
+  public void testFixBlockCompress() throws Exception {
+final String indexLessMapFile = "testFixBlockCompress.mapfile";
+final int compressBlocksize = 100;
+final int indexInterval = 4;
+final int noBlocks = 4;
+final String value = "value-";
+final int size = noBlocks * compressBlocksize / (4 + value.length());
+
+conf.setInt("io.seqfile.compress.blocksize", compressBlocksize);
+MapFile.Writer.setIndexInterval(conf, indexInterval);
+FileSystem fs = FileSystem.getLocal(conf);
+Path dir = new Path(TEST_DIR, indexLessMapFile);
+

hadoop git commit: HADOOP-15117. open(PathHandle) contract test should be exhaustive for default options

2017-12-30 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4bb765ee2 -> 7fe6f83c8


HADOOP-15117. open(PathHandle) contract test should be exhaustive for default 
options


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe6f83c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe6f83c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe6f83c

Branch: refs/heads/trunk
Commit: 7fe6f83c8f0f67b1456c37d94b0de807e81a904a
Parents: 4bb765e
Author: Chris Douglas 
Authored: Sat Dec 30 17:58:35 2017 -0800
Committer: Chris Douglas 
Committed: Sat Dec 30 17:58:35 2017 -0800

--
 .../fs/contract/AbstractContractOpenTest.java   | 258 ---
 .../AbstractContractPathHandleTest.java | 246 ++
 .../hadoop/hdfs/protocol/HdfsPathHandle.java|   3 +
 .../hdfs/TestHDFSContractPathHandle.java|  55 
 4 files changed, 304 insertions(+), 258 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe6f83c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index ab179eb..d475c6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -19,27 +19,17 @@ package org.apache.hadoop.fs.contract;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.InvalidPathHandleException;
-import org.apache.hadoop.fs.Options.HandleOpt;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathHandle;
-import org.apache.hadoop.fs.RawPathHandle;
 import org.apache.hadoop.io.IOUtils;
 
-import static org.apache.hadoop.fs.contract.ContractTestUtils.appendFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
-import static 
org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents;
 
 import org.junit.Test;
 
@@ -173,252 +163,4 @@ public abstract class AbstractContractOpenTest
 instream.close();
   }
 
-  /**
-   * Skip a test case if the FS doesn't support file references.
-   * The feature is assumed to be unsupported unless stated otherwise.
-   */
-  protected void assumeSupportsFileReference() throws IOException {
-if (getContract().isSupported(SUPPORTS_FILE_REFERENCE, false)) {
-  return;
-}
-skip("Skipping as unsupported feature: " + SUPPORTS_FILE_REFERENCE);
-  }
-
-  /**
-   * Skip a test case if the FS doesn't support content validation.
-   * The feature is assumed to be unsupported unless stated otherwise.
-   */
-  protected void assumeSupportsContentCheck() throws IOException {
-if (getContract().isSupported(SUPPORTS_CONTENT_CHECK, false)) {
-  return;
-}
-skip("Skipping as unsupported feature: " + SUPPORTS_CONTENT_CHECK);
-  }
-
-  private PathHandle getHandleOrSkip(FileStatus stat, HandleOpt... opts) {
-try {
-  return getFileSystem().getPathHandle(stat, opts);
-} catch (UnsupportedOperationException e) {
-  skip("FileSystem does not support " + Arrays.toString(opts));
-}
-// unreachable
-return null;
-  }
-
-  /**
-   * Verify {@link HandleOpt#exact()} handle semantics.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileByExact() throws Throwable {
-describe("verify open(getPathHandle(FileStatus, exact())) operations" +
-"detect changes");
-assumeSupportsContentCheck();
-assumeSupportsFileReference();
-Path path1 = path("testopenfilebyexact1");
-Path path2 = path("testopenfilebyexact2");
-byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-createFile(getFileSystem(), path1, false, file1);
-FileStatus stat1 = getFileSystem().getFileStatus(path1);
-assertNotNull(stat1);
-assertEquals(path1, stat1.getPath());
-

hadoop git commit: HADOOP-15106. FileSystem::open(PathHandle) should throw a specific exception on validation failure

2017-12-16 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk fc7ec80d8 -> 5e81f32d1


HADOOP-15106. FileSystem::open(PathHandle) should throw a specific exception on 
validation failure


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e81f32d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e81f32d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e81f32d

Branch: refs/heads/trunk
Commit: 5e81f32d1155ea96c892099008cfeb50799082eb
Parents: fc7ec80
Author: Chris Douglas 
Authored: Sat Dec 16 10:53:10 2017 -0800
Committer: Chris Douglas 
Committed: Sat Dec 16 10:53:10 2017 -0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |  6 +++
 .../hadoop/fs/InvalidPathHandleException.java   | 46 
 .../src/site/markdown/filesystem/filesystem.md  |  2 +-
 .../fs/contract/AbstractContractOpenTest.java   |  7 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  3 ++
 .../hadoop/hdfs/protocol/HdfsPathHandle.java| 16 +++
 6 files changed, 67 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81f32d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index a364921..6b7dead 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -957,6 +957,8 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* resource directly and verify that the resource referenced
* satisfies constraints specified at its construciton.
* @param fd PathHandle object returned by the FS authority.
+   * @throws InvalidPathHandleException If {@link PathHandle} constraints are
+   *not satisfied
* @throws IOException IO failure
* @throws UnsupportedOperationException If {@link #open(PathHandle, int)}
*   not overridden by subclass
@@ -973,6 +975,8 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* satisfies constraints specified at its construciton.
* @param fd PathHandle object returned by the FS authority.
* @param bufferSize the size of the buffer to use
+   * @throws InvalidPathHandleException If {@link PathHandle} constraints are
+   *not satisfied
* @throws IOException IO failure
* @throws UnsupportedOperationException If not overridden by subclass
*/
@@ -994,6 +998,8 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* the specified constraints.
*/
   public final PathHandle getPathHandle(FileStatus stat, HandleOpt... opt) {
+// method is final with a default so clients calling getPathHandle(stat)
+// get the same semantics for all FileSystem implementations
 if (null == opt || 0 == opt.length) {
   return createPathHandle(stat, HandleOpt.path());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81f32d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidPathHandleException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidPathHandleException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidPathHandleException.java
new file mode 100644
index 000..8e26ea7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidPathHandleException.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 

[40/46] hadoop git commit: HDFS-12905. [READ] Handle decommissioning and under-maintenance Datanodes with Provided storage.

2017-12-15 Thread cdouglas
HDFS-12905. [READ] Handle decommissioning and under-maintenance Datanodes with 
Provided storage.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f6aa956
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f6aa956
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f6aa956

Branch: refs/heads/trunk
Commit: 0f6aa9564cbe0812a8cab36d999e353269dd6bc9
Parents: 2298f2d
Author: Virajith Jalaparti 
Authored: Fri Dec 8 10:07:40 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../blockmanagement/ProvidedStorageMap.java | 13 ++-
 .../TestNameNodeProvidedImplementation.java | 95 
 2 files changed, 107 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f6aa956/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 7fbc71a..208ed3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -342,14 +342,25 @@ public class ProvidedStorageMap {
   return dn;
 }
   }
+  // prefer live nodes first.
+  DatanodeDescriptor dn = chooseRandomNode(excludedUUids, true);
+  if (dn == null) {
+dn = chooseRandomNode(excludedUUids, false);
+  }
+  return dn;
+}
 
+private DatanodeDescriptor chooseRandomNode(Set excludedUUids,
+boolean preferLiveNodes) {
   Random r = new Random();
   for (int i = dnR.size() - 1; i >= 0; --i) {
 int pos = r.nextInt(i + 1);
 DatanodeDescriptor node = dnR.get(pos);
 String uuid = node.getDatanodeUuid();
 if (!excludedUUids.contains(uuid)) {
-  return node;
+  if (!preferLiveNodes || node.getAdminState() == AdminStates.NORMAL) {
+return node;
+  }
 }
 Collections.swap(dnR, i, pos);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f6aa956/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index d057247..394e8d8 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -56,6 +56,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap;
@@ -795,4 +796,98 @@ public class TestNameNodeProvidedImplementation {
 FileUtils.deleteDirectory(tempDirectory);
   }
 
+  private DatanodeDescriptor getDatanodeDescriptor(DatanodeManager dnm,
+  int dnIndex) throws Exception {
+return 
dnm.getDatanode(cluster.getDataNodes().get(dnIndex).getDatanodeId());
+  }
+
+  private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
+  int dnIndex) throws Exception {
+namesystem.writeLock();
+DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
+dnm.getDatanodeAdminManager().startDecommission(dnDesc);
+namesystem.writeUnlock();
+  }
+
+  private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
+  int dnIndex) throws Exception {
+namesystem.writeLock();
+DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
+dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
+namesystem.writeUnlock();
+  }
+
+  private 

[36/46] hadoop git commit: HDFS-12903. [READ] Fix closing streams in ImageWriter

2017-12-15 Thread cdouglas
HDFS-12903. [READ] Fix closing streams in ImageWriter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/962b5e72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/962b5e72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/962b5e72

Branch: refs/heads/trunk
Commit: 962b5e722ba86d1c012be11280c6b8fb5e0a2043
Parents: 71ec170
Author: Virajith Jalaparti 
Authored: Thu Dec 7 14:21:24 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/962b5e72/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 0abc7a7..c21c282 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -183,9 +183,9 @@ public class ImageWriter implements Closeable {
   dirsTmp.deleteOnExit();
   dirsTmpStream = new FileOutputStream(dirsTmp);
   dirs = beginSection(dirsTmpStream);
-} catch (IOException e) {
+} catch (Throwable e) {
   IOUtils.cleanupWithLogger(null, raw, dirsTmpStream);
-  throw e;
+  throw new IOException(e);
 }
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/46] hadoop git commit: HDFS-12887. [READ] Allow Datanodes with Provided volumes to start when blocks with the same id exist locally

2017-12-15 Thread cdouglas
HDFS-12887. [READ] Allow Datanodes with Provided volumes to start when blocks 
with the same id exist locally


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71ec1701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71ec1701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71ec1701

Branch: refs/heads/trunk
Commit: 71ec170107e67e42cdbc5052c3f7b23c64751835
Parents: 4531588
Author: Virajith Jalaparti 
Authored: Wed Dec 6 09:42:31 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71ec1701/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index f65fbbc..59ec100 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -208,8 +208,8 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
 incrNumBlocks();
 incDfsUsed(region.getBlock().getNumBytes());
   } else {
-throw new IOException("A block with id " + newReplica.getBlockId()
-+ " already exists in the volumeMap");
+LOG.warn("A block with id " + newReplica.getBlockId()
++ " exists locally. Skipping PROVIDED replica");
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/46] hadoop git commit: HDFS-12778. [READ] Report multiple locations for PROVIDED blocks

2017-12-15 Thread cdouglas
HDFS-12778. [READ] Report multiple locations for PROVIDED blocks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d3be87e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d3be87e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d3be87e

Branch: refs/heads/trunk
Commit: 3d3be87e301d9f8ab1a220bc5dbeae0f032c5a86
Parents: 3b1d303
Author: Virajith Jalaparti 
Authored: Tue Nov 21 14:54:57 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../blockmanagement/ProvidedStorageMap.java | 149 +++
 .../server/namenode/FixedBlockResolver.java |   3 +-
 .../TestNameNodeProvidedImplementation.java | 127 +++-
 3 files changed, 151 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d3be87e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 2bc8faa..6fec977 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -72,6 +71,7 @@ public class ProvidedStorageMap {
   private final DatanodeStorageInfo providedStorageInfo;
   private boolean providedEnabled;
   private long capacity;
+  private int defaultReplication;
 
   ProvidedStorageMap(RwLock lock, BlockManager bm, Configuration conf)
   throws IOException {
@@ -95,6 +95,8 @@ public class ProvidedStorageMap {
 storageId, State.NORMAL, StorageType.PROVIDED);
 providedDescriptor = new ProvidedDescriptor();
 providedStorageInfo = providedDescriptor.createProvidedStorage(ds);
+this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+DFSConfigKeys.DFS_REPLICATION_DEFAULT);
 
 this.bm = bm;
 this.lock = lock;
@@ -198,63 +200,72 @@ public class ProvidedStorageMap {
*/
   class ProvidedBlocksBuilder extends LocatedBlockBuilder {
 
-private ShadowDatanodeInfoWithStorage pending;
-private boolean hasProvidedLocations;
-
 ProvidedBlocksBuilder(int maxBlocks) {
   super(maxBlocks);
-  pending = new ShadowDatanodeInfoWithStorage(
-  providedDescriptor, storageId);
-  hasProvidedLocations = false;
+}
+
+private DatanodeDescriptor chooseProvidedDatanode(
+Set excludedUUids) {
+  DatanodeDescriptor dn = providedDescriptor.choose(null, excludedUUids);
+  if (dn == null) {
+dn = providedDescriptor.choose(null);
+  }
+  return dn;
 }
 
 @Override
 LocatedBlock newLocatedBlock(ExtendedBlock eb,
 DatanodeStorageInfo[] storages, long pos, boolean isCorrupt) {
 
-  DatanodeInfoWithStorage[] locs =
-new DatanodeInfoWithStorage[storages.length];
-  String[] sids = new String[storages.length];
-  StorageType[] types = new StorageType[storages.length];
+  List locs = new ArrayList<>();
+  List sids = new ArrayList<>();
+  List types = new ArrayList<>();
+  boolean isProvidedBlock = false;
+  Set excludedUUids = new HashSet<>();
+
   for (int i = 0; i < storages.length; ++i) {
-sids[i] = storages[i].getStorageID();
-types[i] = storages[i].getStorageType();
-if (StorageType.PROVIDED.equals(storages[i].getStorageType())) {
-  locs[i] = pending;
-  hasProvidedLocations = true;
+DatanodeStorageInfo currInfo = storages[i];
+StorageType storageType = currInfo.getStorageType();
+sids.add(currInfo.getStorageID());
+types.add(storageType);
+if (StorageType.PROVIDED.equals(storageType)) {
+  DatanodeDescriptor dn = chooseProvidedDatanode(excludedUUids);
+  locs.add(
+  new DatanodeInfoWithStorage(
+  dn, currInfo.getStorageID(), currInfo.getStorageType()));
+  

[19/46] hadoop git commit: HDFS-12775. [READ] Fix reporting of Provided volumes

2017-12-15 Thread cdouglas
HDFS-12775. [READ] Fix reporting of Provided volumes


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b1d3030
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b1d3030
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b1d3030

Branch: refs/heads/trunk
Commit: 3b1d30301bcd35bbe525a7e122d3e5acfab92c88
Parents: e1a28f9
Author: Virajith Jalaparti 
Authored: Thu Nov 16 03:52:12 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 -
 .../server/blockmanagement/BlockManager.java|  19 ++-
 .../blockmanagement/DatanodeDescriptor.java |  24 ++--
 .../blockmanagement/DatanodeStatistics.java |   3 +
 .../server/blockmanagement/DatanodeStats.java   |   4 +-
 .../blockmanagement/HeartbeatManager.java   |   9 +-
 .../blockmanagement/ProvidedStorageMap.java |  60 +++--
 .../blockmanagement/StorageTypeStats.java   |  33 -
 .../fsdataset/impl/DefaultProvidedVolumeDF.java |  58 -
 .../fsdataset/impl/ProvidedVolumeDF.java|  34 -
 .../fsdataset/impl/ProvidedVolumeImpl.java  | 101 ---
 .../federation/metrics/FederationMBean.java |   6 +
 .../federation/metrics/FederationMetrics.java   |   5 +
 .../federation/metrics/NamenodeBeanMetrics.java |  10 ++
 .../resolver/MembershipNamenodeResolver.java|   1 +
 .../resolver/NamenodeStatusReport.java  |  12 +-
 .../router/NamenodeHeartbeatService.java|   3 +-
 .../store/records/MembershipStats.java  |   4 +
 .../records/impl/pb/MembershipStatsPBImpl.java  |  10 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  12 ++
 .../hdfs/server/namenode/NameNodeMXBean.java|  10 +-
 .../namenode/metrics/FSNamesystemMBean.java |   7 +-
 .../src/main/proto/FederationProtocol.proto |   1 +
 .../src/main/resources/hdfs-default.xml |   8 --
 .../src/main/webapps/hdfs/dfshealth.html|   1 +
 .../blockmanagement/TestProvidedStorageMap.java |  39 +++---
 .../fsdataset/impl/TestProvidedImpl.java|  55 ++--
 .../metrics/TestFederationMetrics.java  |   2 +
 .../TestNameNodeProvidedImplementation.java | 125 ---
 29 files changed, 425 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cb57675..fbdc859 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -331,7 +331,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_NAMENODE_PROVIDED_ENABLED = 
"dfs.namenode.provided.enabled";
   public static final boolean DFS_NAMENODE_PROVIDED_ENABLED_DEFAULT = false;
 
-  public static final String DFS_PROVIDER_DF_CLASS = "dfs.provided.df.class";
   public static final String DFS_PROVIDER_STORAGEUUID = 
"dfs.provided.storage.id";
   public static final String DFS_PROVIDER_STORAGEUUID_DEFAULT =  "DS-PROVIDED";
   public static final String DFS_PROVIDED_ALIASMAP_CLASS = 
"dfs.provided.aliasmap.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 07502c1..f92c4e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -103,6 +103,8 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
 import 

[28/46] hadoop git commit: HDFS-12885. Add visibility/stability annotations. Contributed by Chris Douglas

2017-12-15 Thread cdouglas
HDFS-12885. Add visibility/stability annotations. Contributed by Chris Douglas


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a027055d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a027055d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a027055d

Branch: refs/heads/trunk
Commit: a027055dd2bf5009fe272e9ceb08305bd0a8cc31
Parents: b634053
Author: Virajith Jalaparti 
Authored: Tue Dec 5 09:51:09 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java| 4 
 .../org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolPB.java   | 2 ++
 .../hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java | 4 
 .../InMemoryAliasMapProtocolClientSideTranslatorPB.java | 4 
 .../apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java| 4 
 .../hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java   | 4 
 .../hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java | 4 
 .../hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java  | 4 
 .../java/org/apache/hadoop/hdfs/server/common/BlockAlias.java   | 4 
 .../java/org/apache/hadoop/hdfs/server/common/FileRegion.java   | 4 
 .../hadoop/hdfs/server/common/blockaliasmap/BlockAliasMap.java  | 4 
 .../blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java   | 4 
 .../common/blockaliasmap/impl/LevelDBFileRegionAliasMap.java| 4 
 .../common/blockaliasmap/impl/TextFileRegionAliasMap.java   | 4 
 .../hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java   | 4 
 .../org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java | 4 
 .../hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java | 4 +++-
 .../org/apache/hadoop/hdfs/server/namenode/BlockResolver.java   | 4 
 .../java/org/apache/hadoop/hdfs/server/namenode/FSTreeWalk.java | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java | 4 
 .../hdfs/server/namenode/FixedBlockMultiReplicaResolver.java| 4 
 .../apache/hadoop/hdfs/server/namenode/FixedBlockResolver.java  | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FsUGIResolver.java   | 5 +
 .../org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 4 
 .../apache/hadoop/hdfs/server/namenode/NullBlockAliasMap.java   | 4 
 .../apache/hadoop/hdfs/server/namenode/SingleUGIResolver.java   | 4 
 .../java/org/apache/hadoop/hdfs/server/namenode/TreePath.java   | 4 
 .../java/org/apache/hadoop/hdfs/server/namenode/TreeWalk.java   | 5 +
 .../org/apache/hadoop/hdfs/server/namenode/UGIResolver.java | 4 
 .../org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java  | 4 
 30 files changed, 119 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a027055d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
index eee58ba..861ef8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
@@ -16,6 +16,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 
 import javax.annotation.Nonnull;
@@ -25,6 +27,8 @@ import java.util.Arrays;
  * ProvidedStorageLocation is a location in an external storage system
  * containing the data for a block (~Replica).
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class ProvidedStorageLocation {
   private final Path path;
   private final long offset;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a027055d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolPB.java
index 98b3ee1..4e14fad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolPB.java
+++ 

[32/46] hadoop git commit: HDFS-12685. [READ] FsVolumeImpl exception when scanning Provided storage volume

2017-12-15 Thread cdouglas
HDFS-12685. [READ] FsVolumeImpl exception when scanning Provided storage volume


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc933cba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc933cba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc933cba

Branch: refs/heads/trunk
Commit: cc933cba77c147153e463415fc192cee2d53a1ef
Parents: 4d59dab
Author: Virajith Jalaparti 
Authored: Thu Nov 30 10:11:12 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../impl/TextFileRegionAliasMap.java|  3 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  3 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 40 ++--
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  4 +-
 .../fsdataset/impl/TestProvidedImpl.java| 19 ++
 5 files changed, 37 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc933cba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
index 80f48c1..bd04d60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
@@ -439,7 +439,8 @@ public class TextFileRegionAliasMap
 
   @Override
   public void refresh() throws IOException {
-//nothing to do;
+throw new UnsupportedOperationException(
+"Refresh not supported by " + getClass());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc933cba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 8fb8551..ab9743c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -515,7 +515,8 @@ public class DirectoryScanner implements Runnable {
*
* @return a map of sorted arrays of block information
*/
-  private Map getDiskReport() {
+  @VisibleForTesting
+  public Map getDiskReport() {
 ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
 ScanInfoPerBlockPool[] dirReports = null;
 // First get list of data directories

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc933cba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 15e71f0..20a153d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -296,8 +296,23 @@ public interface FsVolumeSpi
  */
 public ScanInfo(long blockId, File blockFile, File metaFile,
 FsVolumeSpi vol) {
-  this(blockId, blockFile, metaFile, vol, null,
-  (blockFile != null) ? blockFile.length() : 0);
+  this.blockId = blockId;
+  String condensedVolPath =
+  (vol == null || vol.getBaseURI() == null) ? null :
+  getCondensedPath(new File(vol.getBaseURI()).getAbsolutePath());
+  this.blockSuffix = blockFile == null ? null :
+  getSuffix(blockFile, condensedVolPath);
+  this.blockLength = (blockFile != null) ? blockFile.length() : 0;
+  if (metaFile == null) {
+this.metaSuffix = null;
+  } else if (blockFile == null) {
+this.metaSuffix = getSuffix(metaFile, condensedVolPath);
+  } else {
+this.metaSuffix = getSuffix(metaFile,

[22/46] hadoop git commit: HDFS-12789. [READ] Image generation tool does not close an opened stream

2017-12-15 Thread cdouglas
HDFS-12789. [READ] Image generation tool does not close an opened stream


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87dc026b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87dc026b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87dc026b

Branch: refs/heads/trunk
Commit: 87dc026beec5d69a84771631ebca5fadb2f7195b
Parents: c293cc8
Author: Virajith Jalaparti 
Authored: Wed Nov 8 10:28:50 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../hadoop/hdfs/server/namenode/ImageWriter.java   | 17 -
 1 file changed, 12 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87dc026b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index ea1888a..390bb39 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -165,16 +165,23 @@ public class ImageWriter implements Closeable {
 
 // create directory and inode sections as side-files.
 // The details are written to files to avoid keeping them in memory.
-dirsTmp = File.createTempFile("fsimg_dir", null);
-dirsTmp.deleteOnExit();
-dirs = beginSection(new FileOutputStream(dirsTmp));
+FileOutputStream dirsTmpStream = null;
+try {
+  dirsTmp = File.createTempFile("fsimg_dir", null);
+  dirsTmp.deleteOnExit();
+  dirsTmpStream = new FileOutputStream(dirsTmp);
+  dirs = beginSection(dirsTmpStream);
+} catch (IOException e) {
+  IOUtils.cleanupWithLogger(null, raw, dirsTmpStream);
+  throw e;
+}
+
 try {
   inodesTmp = File.createTempFile("fsimg_inode", null);
   inodesTmp.deleteOnExit();
   inodes = new FileOutputStream(inodesTmp);
 } catch (IOException e) {
-  // appropriate to close raw?
-  IOUtils.cleanup(null, raw, dirs);
+  IOUtils.cleanupWithLogger(null, raw, dirsTmpStream, dirs);
   throw e;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/46] hadoop git commit: HDFS-12874. Documentation for provided storage. Contributed by Virajith Jalaparti

2017-12-15 Thread cdouglas
HDFS-12874. Documentation for provided storage. Contributed by Virajith 
Jalaparti


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2298f2d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2298f2d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2298f2d7

Branch: refs/heads/trunk
Commit: 2298f2d76b2cafd84c8f7421ae792336d6f2f37a
Parents: 962b5e7
Author: Chris Douglas 
Authored: Thu Dec 7 17:41:00 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../src/main/resources/hdfs-default.xml |   2 +-
 .../src/site/markdown/HdfsProvidedStorage.md| 247 +++
 2 files changed, 248 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2298f2d7/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 3dc583c..7b5ccbc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4643,7 +4643,7 @@
 
   The class that is used to specify the input format of the blocks on
   provided storages. The default is
-  org.apache.hadoop.hdfs.server.common.TextFileRegionAliasMap which uses
+  
org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap 
which uses
   file regions to describe blocks. The file regions are specified as a
   delimited text file. Each file region is a 6-tuple containing the
   block id, remote file path, offset into file, length of block, the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2298f2d7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
new file mode 100644
index 000..7455044
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
@@ -0,0 +1,247 @@
+
+
+HDFS Provided Storage
+=
+
+Provided storage allows data *stored outside HDFS* to be mapped to and 
addressed
+from HDFS. It builds on [heterogeneous storage](./ArchivalStorage.html) by
+introducing a new storage type, `PROVIDED`, to the set of media in a datanode.
+Clients accessing data in
+`PROVIDED` storages can cache replicas in local media, enforce HDFS invariants
+(e.g., security, quotas), and address more data than the cluster could persist
+in the storage attached to DataNodes. This architecture is particularly useful
+in scenarios where HDFS clusters are ephemeral (e.g., cloud scenarios), and/or
+require to read data that lives in other storage systems (e.g., blob stores).
+
+Provided storage is an experimental feature in HDFS.
+
+
+
+Introduction
+
+
+As of this writing, support for mounting external storage as `PROVIDED` blocks
+is limited to creating a *read-only image* of a remote namespace that 
implements the
+`org.apache.hadoop.fs.FileSystem` interface, and starting a NameNode
+to serve the image. Specifically, reads from a snapshot of a remote namespace 
are
+supported. Adding a remote namespace to an existing/running namenode, 
refreshing the
+remote snapshot, unmounting, and writes are not available in this release. One
+can use [ViewFs](./ViewFs.html) and [RBF](HDFSRouterFederation.html) to
+integrate namespaces with `PROVIDED` storage into an existing deployment.
+
+Creating HDFS Clusters with `PROVIDED` Storage
+--
+
+One can create snapshots of the remote namespace using the `fs2img` tool. Given
+a path to a remote `FileSystem`, the tool creates an _image_ mirroring the
+namespace and an _alias map_ that maps blockIDs in the generated image to a
+`FileRegion` in the remote filesystem. A `FileRegion` contains sufficient 
information to
+address a fixed sequence of bytes in the remote `FileSystem` (e.g., file, 
offset, length)
+and a nonce to verify that the region is unchanged since the image was 
generated.
+
+After the NameNode image and alias map are created, the NameNode and DataNodes
+must be configured to consistently reference this address space. When a 
DataNode
+registers with an attached, `PROVIDED` storage, the NameNode considers all the
+external blocks to be addressable through that DataNode, and may begin to 
direct
+clients to it. Symmetrically, the DataNode must be able to map every block in
+the 

[10/46] hadoop git commit: HDFS-12584. [READ] Fix errors in image generation tool from latest rebase

2017-12-15 Thread cdouglas
HDFS-12584. [READ] Fix errors in image generation tool from latest rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17052c4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17052c4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17052c4a

Branch: refs/heads/trunk
Commit: 17052c4aff104cb02701bc1e8dc9cd73d1a325fb
Parents: aca023b
Author: Virajith Jalaparti 
Authored: Tue Oct 3 14:44:17 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 hadoop-tools/hadoop-fs2img/pom.xml  |  4 +--
 .../hdfs/server/namenode/RandomTreeWalk.java| 28 +---
 2 files changed, 14 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17052c4a/hadoop-tools/hadoop-fs2img/pom.xml
--
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml 
b/hadoop-tools/hadoop-fs2img/pom.xml
index 36096b7..e1411f8 100644
--- a/hadoop-tools/hadoop-fs2img/pom.xml
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -17,12 +17,12 @@
   
 org.apache.hadoop
 hadoop-project
-3.0.0-alpha3-SNAPSHOT
+3.1.0-SNAPSHOT
 ../../hadoop-project
   
   org.apache.hadoop
   hadoop-fs2img
-  3.0.0-alpha3-SNAPSHOT
+  3.1.0-SNAPSHOT
   fs2img
   fs2img
   jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17052c4a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
index c82c489..d002e4a 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/RandomTreeWalk.java
@@ -113,22 +113,18 @@ public class RandomTreeWalk extends TreeWalk {
 final long len = isDir ? 0 : r.nextInt(Integer.MAX_VALUE);
 final int nblocks = 0 == len ? 0 : (((int)((len - 1) / blocksize)) + 1);
 BlockLocation[] blocks = genBlocks(r, nblocks, blocksize, len);
-try {
-  return new LocatedFileStatus(new FileStatus(
-  len,  /* long length, */
-  isDir,/* boolean isdir,   */
-  1,/* int block_replication,   */
-  blocksize,/* long blocksize,  */
-  0L,   /* long modification_time,  */
-  0L,   /* long access_time,*/
-  null, /* FsPermission permission, */
-  "hadoop", /* String owner,*/
-  "hadoop", /* String group,*/
-  name),/* Path path*/
-  blocks);
-} catch (IOException e) {
-  throw new RuntimeException(e);
-}
+return new LocatedFileStatus(new FileStatus(
+len,  /* long length, */
+isDir,/* boolean isdir,   */
+1,/* int block_replication,   */
+blocksize,/* long blocksize,  */
+0L,   /* long modification_time,  */
+0L,   /* long access_time,*/
+null, /* FsPermission permission, */
+"hadoop", /* String owner,*/
+"hadoop", /* String group,*/
+name),/* Path path*/
+blocks);
   }
 
   BlockLocation[] genBlocks(Random r, int nblocks, int blocksize, long len) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/46] hadoop git commit: HDFS-12591. [READ] Implement LevelDBFileRegionFormat. Contributed by Ewan Higgs.

2017-12-15 Thread cdouglas
HDFS-12591. [READ] Implement LevelDBFileRegionFormat. Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b634053c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b634053c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b634053c

Branch: refs/heads/trunk
Commit: b634053c4daec181511abb314aeef0a8fe851086
Parents: 352f994
Author: Virajith Jalaparti 
Authored: Sat Dec 2 12:22:00 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../impl/LevelDBFileRegionAliasMap.java | 257 +++
 .../impl/TestLevelDBFileRegionAliasMap.java | 115 +
 3 files changed, 374 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b634053c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 00976f9..7db0a8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -352,6 +352,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_PROVIDED_ALIASMAP_TEXT_CODEC = 
"dfs.provided.aliasmap.text.codec";
   public static final String DFS_PROVIDED_ALIASMAP_TEXT_WRITE_PATH = 
"dfs.provided.aliasmap.text.write.path";
 
+  public static final String DFS_PROVIDED_ALIASMAP_LEVELDB_PATH = 
"dfs.provided.aliasmap.leveldb.read.path";
+
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = 
"dfs.content-summary.limit";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b634053c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/LevelDBFileRegionAliasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/LevelDBFileRegionAliasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/LevelDBFileRegionAliasMap.java
new file mode 100644
index 000..66971a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/LevelDBFileRegionAliasMap.java
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Optional;
+
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBIterator;
+import static org.fusesource.leveldbjni.JniDBFactory.factory;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LEVELDB_PATH;
+import static 
org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap.fromBlockBytes;
+import static 
org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap.fromProvidedStorageLocationBytes;
+import static 
org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap.toProtoBufBytes;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A LevelDB based implementation of {@link 

[05/46] hadoop git commit: HDFS-10675. Datanode support to read from external stores.

2017-12-15 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b668eb91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index adec209..15e71f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
@@ -241,10 +242,11 @@ public interface FsVolumeSpi
 
 private final FsVolumeSpi volume;
 
+private final FileRegion fileRegion;
 /**
  * Get the file's length in async block scan
  */
-private final long blockFileLength;
+private final long blockLength;
 
 private final static Pattern CONDENSED_PATH_REGEX =
 Pattern.compile("(?http://git-wip-us.apache.org/repos/asf/hadoop/blob/b668eb91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
new file mode 100644
index 000..24921c4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * The default usage statistics for a provided volume.
+ */
+public class DefaultProvidedVolumeDF
+implements ProvidedVolumeDF, Configurable {
+
+  @Override
+  public void setConf(Configuration conf) {
+  }
+
+  @Override
+  public Configuration getConf() {
+return null;
+  }
+
+  @Override
+  public long getCapacity() {
+return Long.MAX_VALUE;
+  }
+
+  @Override
+  public long getSpaceUsed() {
+return 0;
+  }
+
+  @Override
+  public long getBlockPoolUsed(String bpid) {
+return 0;
+  }
+
+  @Override
+  public long getAvailable() {
+return Long.MAX_VALUE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b668eb91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 095bc8f..db8d60c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 

[26/46] hadoop git commit: HDFS-12777. [READ] Reduce memory and CPU footprint for PROVIDED volumes.

2017-12-15 Thread cdouglas
HDFS-12777. [READ] Reduce memory and CPU footprint for PROVIDED volumes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1a28f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1a28f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1a28f95

Branch: refs/heads/trunk
Commit: e1a28f95b8ffcb86300148f10a23b710f8388341
Parents: 6cd80b2
Author: Virajith Jalaparti 
Authored: Fri Nov 10 10:19:33 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../hdfs/server/datanode/DirectoryScanner.java  |  4 +
 .../datanode/FinalizedProvidedReplica.java  |  8 ++
 .../hdfs/server/datanode/ProvidedReplica.java   | 77 +++-
 .../hdfs/server/datanode/ReplicaBuilder.java| 37 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  | 30 +++-
 .../fsdataset/impl/TestProvidedImpl.java| 76 ---
 6 files changed, 196 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1a28f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 3b6d06c..8fb8551 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -530,6 +530,10 @@ public class DirectoryScanner implements Runnable {
   new HashMap();
 
   for (int i = 0; i < volumes.size(); i++) {
+if (volumes.get(i).getStorageType() == StorageType.PROVIDED) {
+  // Disable scanning PROVIDED volumes to keep overhead low
+  continue;
+}
 ReportCompiler reportCompiler =
 new ReportCompiler(datanode, volumes.get(i));
 Future result =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1a28f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
index e23d6be..bcc9a38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
@@ -21,6 +21,7 @@ import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -37,6 +38,13 @@ public class FinalizedProvidedReplica extends 
ProvidedReplica {
 remoteFS);
   }
 
+  public FinalizedProvidedReplica(long blockId, Path pathPrefix,
+  String pathSuffix, long fileOffset, long blockLen, long genStamp,
+  FsVolumeSpi volume, Configuration conf, FileSystem remoteFS) {
+super(blockId, pathPrefix, pathSuffix, fileOffset, blockLen,
+genStamp, volume, conf, remoteFS);
+  }
+
   @Override
   public ReplicaState getState() {
 return ReplicaState.FINALIZED;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1a28f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
index 2b3bd13..8681421 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.input.BoundedInputStream;
 

[09/46] hadoop git commit: HDFS-12605. [READ] TestNameNodeProvidedImplementation#testProvidedDatanodeFailures fails after rebase

2017-12-15 Thread cdouglas
HDFS-12605. [READ] 
TestNameNodeProvidedImplementation#testProvidedDatanodeFailures fails after 
rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6a9a899
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6a9a899
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6a9a899

Branch: refs/heads/trunk
Commit: d6a9a8997339939b59ce36246225f7cc45b21da5
Parents: 17052c4
Author: Virajith Jalaparti 
Authored: Wed Oct 18 13:53:11 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../hdfs/server/blockmanagement/DatanodeDescriptor.java | 12 
 .../namenode/TestNameNodeProvidedImplementation.java|  6 +++---
 2 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6a9a899/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 28a3d1a..e3d6582 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -489,6 +489,18 @@ public class DatanodeDescriptor extends DatanodeInfo {
 synchronized (storageMap) {
   DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
   if (null == storage) {
+LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
+getXferAddr());
+DFSTopologyNodeImpl parent = null;
+if (getParent() instanceof DFSTopologyNodeImpl) {
+  parent = (DFSTopologyNodeImpl) getParent();
+}
+StorageType type = s.getStorageType();
+if (!hasStorageType(type) && parent != null) {
+  // we are about to add a type this node currently does not have,
+  // inform the parent that a new type is added to this datanode
+  parent.childAddStorage(getName(), type);
+}
 storageMap.put(s.getStorageID(), s);
   } else {
 assert storage == s : "found " + storage + " expected " + s;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6a9a899/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 3f937c4..d622b9e 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -481,13 +481,13 @@ public class TestNameNodeProvidedImplementation {
   assertEquals(providedDatanode2.getDatanodeUuid(),
   dnInfos[0].getDatanodeUuid());
 
-  //stop the 2nd provided datanode
-  cluster.stopDataNode(1);
+  // stop the 2nd provided datanode
+  MiniDFSCluster.DataNodeProperties providedDNProperties2 =
+  cluster.stopDataNode(0);
   // make NameNode detect that datanode is down
   BlockManagerTestUtil.noticeDeadDatanode(
   cluster.getNameNode(),
   providedDatanode2.getDatanodeId().getXferAddr());
-
   getAndCheckBlockLocations(client, filename, 0);
 
   //restart the provided datanode


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/46] hadoop git commit: HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl

2017-12-15 Thread cdouglas
HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55ade54b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55ade54b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55ade54b

Branch: refs/heads/trunk
Commit: 55ade54b8ed36e18f028f478381a96e7b8c6be50
Parents: 4851f06
Author: Virajith Jalaparti 
Authored: Wed May 31 15:17:12 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  6 +-
 .../fsdataset/impl/TestProvidedImpl.java| 94 ++--
 2 files changed, 92 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ade54b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index a48e117..421b9cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -191,7 +191,11 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
 
   @Override
   long getBlockPoolUsed(String bpid) throws IOException {
-return df.getBlockPoolUsed(bpid);
+if (bpSlices.containsKey(bpid)) {
+  return df.getBlockPoolUsed(bpid);
+} else {
+  throw new IOException("block pool " + bpid + " is not found");
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ade54b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index 2c119fe..4753235 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -83,6 +83,7 @@ public class TestProvidedImpl {
   private static final String BASE_DIR =
   new FileSystemTestHelper().getTestRootDir();
   private static final int NUM_LOCAL_INIT_VOLUMES = 1;
+  //only support one provided volume for now.
   private static final int NUM_PROVIDED_INIT_VOLUMES = 1;
   private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
   private static final int NUM_PROVIDED_BLKS = 10;
@@ -208,6 +209,39 @@ public class TestProvidedImpl {
 }
   }
 
+  public static class TestProvidedVolumeDF
+  implements ProvidedVolumeDF, Configurable {
+
+@Override
+public void setConf(Configuration conf) {
+}
+
+@Override
+public Configuration getConf() {
+  return null;
+}
+
+@Override
+public long getCapacity() {
+  return Long.MAX_VALUE;
+}
+
+@Override
+public long getSpaceUsed() {
+  return -1;
+}
+
+@Override
+public long getBlockPoolUsed(String bpid) {
+  return -1;
+}
+
+@Override
+public long getAvailable() {
+  return Long.MAX_VALUE;
+}
+  }
+
   private static Storage.StorageDirectory createLocalStorageDirectory(
   File root, Configuration conf)
   throws SecurityException, IOException {
@@ -299,8 +333,8 @@ public class TestProvidedImpl {
   public void setUp() throws IOException {
 datanode = mock(DataNode.class);
 storage = mock(DataStorage.class);
-this.conf = new Configuration();
-this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
+conf = new Configuration();
+conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 
 when(datanode.getConf()).thenReturn(conf);
 final DNConf dnConf = new DNConf(datanode);
@@ -312,8 +346,10 @@ public class TestProvidedImpl {
 new ShortCircuitRegistry(conf);
 when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 
-this.conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
 TestFileRegionProvider.class, 

[39/46] hadoop git commit: HDFS-12912. [READ] Fix configuration and implementation of LevelDB-based alias maps

2017-12-15 Thread cdouglas
HDFS-12912. [READ] Fix configuration and implementation of LevelDB-based alias 
maps


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80c3fec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80c3fec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80c3fec3

Branch: refs/heads/trunk
Commit: 80c3fec3a13c41051daaae42e5c9a9fedf5c7ee7
Parents: c89b29b
Author: Virajith Jalaparti 
Authored: Wed Dec 13 13:39:21 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../hdfs/server/aliasmap/InMemoryAliasMap.java  | 42 ++--
 .../aliasmap/InMemoryLevelDBAliasMapServer.java |  9 +++--
 .../impl/LevelDBFileRegionAliasMap.java |  5 +++
 .../src/site/markdown/HdfsProvidedStorage.md|  4 +-
 .../server/aliasmap/ITestInMemoryAliasMap.java  |  9 +++--
 .../server/aliasmap/TestInMemoryAliasMap.java   |  2 +-
 .../impl/TestInMemoryLevelDBAliasMapClient.java |  2 +
 .../impl/TestLevelDbMockAliasMapClient.java |  2 +-
 .../TestNameNodeProvidedImplementation.java |  2 +
 9 files changed, 45 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80c3fec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java
index 3d9eeea..142a040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java
@@ -59,6 +59,7 @@ public class InMemoryAliasMap implements 
InMemoryAliasMapProtocol,
 
   private final DB levelDb;
   private Configuration conf;
+  private String blockPoolID;
 
   @Override
   public void setConf(Configuration conf) {
@@ -79,32 +80,38 @@ public class InMemoryAliasMap implements 
InMemoryAliasMapProtocol,
 .toString();
   }
 
-  public static @Nonnull InMemoryAliasMap init(Configuration conf)
-  throws IOException {
+  public static @Nonnull InMemoryAliasMap init(Configuration conf,
+  String blockPoolID) throws IOException {
 Options options = new Options();
 options.createIfMissing(true);
 String directory =
 conf.get(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR);
 LOG.info("Attempting to load InMemoryAliasMap from \"{}\"", directory);
-File path = new File(directory);
-if (!path.exists()) {
+File levelDBpath;
+if (blockPoolID != null) {
+  levelDBpath = new File(directory, blockPoolID);
+} else {
+  levelDBpath = new File(directory);
+}
+if (!levelDBpath.exists()) {
   String error = createPathErrorMessage(directory);
   throw new IOException(error);
 }
-DB levelDb = JniDBFactory.factory.open(path, options);
-InMemoryAliasMap aliasMap = new InMemoryAliasMap(levelDb);
+DB levelDb = JniDBFactory.factory.open(levelDBpath, options);
+InMemoryAliasMap aliasMap = new InMemoryAliasMap(levelDb, blockPoolID);
 aliasMap.setConf(conf);
 return aliasMap;
   }
 
   @VisibleForTesting
-  InMemoryAliasMap(DB levelDb) {
+  InMemoryAliasMap(DB levelDb, String blockPoolID) {
 this.levelDb = levelDb;
+this.blockPoolID = blockPoolID;
   }
 
   @Override
   public IterationResult list(Optional marker) throws IOException {
-return withIterator((DBIterator iterator) -> {
+try (DBIterator iterator = levelDb.iterator()) {
   Integer batchSize =
   conf.getInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE,
   DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT);
@@ -130,8 +137,7 @@ public class InMemoryAliasMap implements 
InMemoryAliasMapProtocol,
   } else {
 return new IterationResult(batch, Optional.empty());
   }
-
-});
+}
   }
 
   public @Nonnull Optional read(@Nonnull Block block)
@@ -159,7 +165,7 @@ public class InMemoryAliasMap implements 
InMemoryAliasMapProtocol,
 
   @Override
   public String getBlockPoolId() {
-return null;
+return blockPoolID;
   }
 
   public void close() throws IOException {
@@ -202,21 +208,15 @@ public class InMemoryAliasMap implements 
InMemoryAliasMapProtocol,
 return blockOutputStream.toByteArray();
   }
 
-  private IterationResult withIterator(
-  CheckedFunction func) throws IOException {
-try (DBIterator iterator = levelDb.iterator()) {
-  return func.apply(iterator);

[29/46] hadoop git commit: HDFS-12713. [READ] Refactor FileRegion and BlockAliasMap to separate out HDFS metadata and PROVIDED storage metadata. Contributed by Ewan Higgs

2017-12-15 Thread cdouglas
HDFS-12713. [READ] Refactor FileRegion and BlockAliasMap to separate out HDFS 
metadata and PROVIDED storage metadata. Contributed by Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c35be86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c35be86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c35be86

Branch: refs/heads/trunk
Commit: 9c35be86e17021202823bfd3c2067ff3b312ce5c
Parents: a027055
Author: Virajith Jalaparti 
Authored: Tue Dec 5 13:46:30 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 +--
 .../AliasMapProtocolServerSideTranslatorPB.java | 10 +++
 ...yAliasMapProtocolClientSideTranslatorPB.java | 17 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  2 +-
 .../hdfs/server/aliasmap/InMemoryAliasMap.java  |  7 +-
 .../aliasmap/InMemoryAliasMapProtocol.java  |  7 ++
 .../aliasmap/InMemoryLevelDBAliasMapServer.java | 13 +++-
 .../blockmanagement/ProvidedStorageMap.java |  8 ++-
 .../hadoop/hdfs/server/common/FileRegion.java   | 30 ++--
 .../common/blockaliasmap/BlockAliasMap.java | 14 ++--
 .../impl/InMemoryLevelDBAliasMapClient.java | 24 ++-
 .../impl/LevelDBFileRegionAliasMap.java | 22 --
 .../impl/TextFileRegionAliasMap.java| 76 
 .../fsdataset/impl/ProvidedVolumeImpl.java  | 41 ++-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  6 +-
 .../hdfs/server/protocol/NamespaceInfo.java |  4 ++
 .../src/main/proto/AliasMapProtocol.proto   |  8 +++
 .../src/main/resources/hdfs-default.xml | 23 +-
 .../blockmanagement/TestProvidedStorageMap.java |  4 +-
 .../impl/TestInMemoryLevelDBAliasMapClient.java | 41 +--
 .../impl/TestLevelDBFileRegionAliasMap.java | 10 +--
 .../impl/TestLevelDbMockAliasMapClient.java | 19 +++--
 .../impl/TestTextBlockAliasMap.java | 55 +++---
 .../fsdataset/impl/TestProvidedImpl.java|  9 ++-
 .../hdfs/server/namenode/FileSystemImage.java   |  4 ++
 .../hdfs/server/namenode/ImageWriter.java   | 14 +++-
 .../hdfs/server/namenode/NullBlockAliasMap.java |  6 +-
 .../hadoop/hdfs/server/namenode/TreePath.java   |  3 +-
 .../TestNameNodeProvidedImplementation.java | 24 +++
 29 files changed, 346 insertions(+), 165 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c35be86/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7db0a8d..2ef2bf0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -342,17 +342,19 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String DFS_PROVIDER_STORAGEUUID = 
"dfs.provided.storage.id";
   public static final String DFS_PROVIDER_STORAGEUUID_DEFAULT =  "DS-PROVIDED";
   public static final String DFS_PROVIDED_ALIASMAP_CLASS = 
"dfs.provided.aliasmap.class";
+  public static final String DFS_PROVIDED_ALIASMAP_LOAD_RETRIES = 
"dfs.provided.aliasmap.load.retries";
 
   public static final String DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER = 
"dfs.provided.aliasmap.text.delimiter";
   public static final String DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER_DEFAULT = 
",";
 
-  public static final String DFS_PROVIDED_ALIASMAP_TEXT_READ_PATH = 
"dfs.provided.aliasmap.text.read.path";
-  public static final String DFS_PROVIDED_ALIASMAP_TEXT_PATH_DEFAULT = 
"file:///tmp/blocks.csv";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE = 
"dfs.provided.aliasmap.text.read.file";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE_DEFAULT = 
"file:///tmp/blocks.csv";
 
   public static final String DFS_PROVIDED_ALIASMAP_TEXT_CODEC = 
"dfs.provided.aliasmap.text.codec";
-  public static final String DFS_PROVIDED_ALIASMAP_TEXT_WRITE_PATH = 
"dfs.provided.aliasmap.text.write.path";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR = 
"dfs.provided.aliasmap.text.write.dir";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR_DEFAULT = 
"file:///tmp/";
 
-  public static final String DFS_PROVIDED_ALIASMAP_LEVELDB_PATH = 
"dfs.provided.aliasmap.leveldb.read.path";
+  public static final String DFS_PROVIDED_ALIASMAP_LEVELDB_PATH = 
"dfs.provided.aliasmap.leveldb.path";
 
   public static final String  

[34/46] hadoop git commit: HDFS-12665. [AliasMap] Create a version of the AliasMap that runs in memory in the Namenode (leveldb). Contributed by Ewan Higgs.

2017-12-15 Thread cdouglas
HDFS-12665. [AliasMap] Create a version of the AliasMap that runs in memory in 
the Namenode (leveldb). Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/352f994b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/352f994b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/352f994b

Branch: refs/heads/trunk
Commit: 352f994b6484524cdcfcda021046c59905b62f31
Parents: cc933cb
Author: Virajith Jalaparti 
Authored: Thu Nov 30 10:37:28 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../hdfs/protocol/ProvidedStorageLocation.java  |  85 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  32 ++
 .../src/main/proto/hdfs.proto   |  14 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../hdfs/protocolPB/AliasMapProtocolPB.java |  35 ++
 .../AliasMapProtocolServerSideTranslatorPB.java | 120 +++
 ...yAliasMapProtocolClientSideTranslatorPB.java | 159 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  28 ++
 .../hdfs/server/aliasmap/InMemoryAliasMap.java  | 213 
 .../aliasmap/InMemoryAliasMapProtocol.java  |  92 +
 .../aliasmap/InMemoryLevelDBAliasMapServer.java | 141 
 .../hadoop/hdfs/server/common/FileRegion.java   |  89 ++---
 .../common/blockaliasmap/BlockAliasMap.java |  19 +-
 .../impl/InMemoryLevelDBAliasMapClient.java | 156 +
 .../impl/TextFileRegionAliasMap.java|  40 ++-
 .../datanode/FinalizedProvidedReplica.java  |  11 +
 .../hdfs/server/datanode/ReplicaBuilder.java|   7 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  38 +--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  21 ++
 .../src/main/proto/AliasMapProtocol.proto   |  60 
 .../src/main/resources/hdfs-default.xml |  34 ++
 .../server/aliasmap/ITestInMemoryAliasMap.java  | 126 +++
 .../server/aliasmap/TestInMemoryAliasMap.java   |  45 +++
 .../blockmanagement/TestProvidedStorageMap.java |   1 -
 .../impl/TestInMemoryLevelDBAliasMapClient.java | 341 +++
 .../impl/TestLevelDbMockAliasMapClient.java | 116 +++
 .../fsdataset/impl/TestProvidedImpl.java|   9 +-
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-fs2img/pom.xml  |   6 +
 .../hdfs/server/namenode/NullBlockAliasMap.java |   9 +-
 .../TestNameNodeProvidedImplementation.java |  65 +++-
 32 files changed, 2016 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/352f994b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
new file mode 100644
index 000..eee58ba
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ProvidedStorageLocation.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.fs.Path;
+
+import javax.annotation.Nonnull;
+import java.util.Arrays;
+
+/**
+ * ProvidedStorageLocation is a location in an external storage system
+ * containing the data for a block (~Replica).
+ */
+public class ProvidedStorageLocation {
+  private final Path path;
+  private final long offset;
+  private final long length;
+  private final byte[] nonce;
+
+  public ProvidedStorageLocation(Path path, long offset, long length,
+  byte[] nonce) {
+this.path = path;
+this.offset = offset;
+this.length = length;
+this.nonce = Arrays.copyOf(nonce, nonce.length);
+  }
+
+  public @Nonnull Path 

[37/46] hadoop git commit: HDFS-12712. [9806] Code style cleanup

2017-12-15 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8239e3af/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
deleted file mode 100644
index 1023616..000
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ /dev/null
@@ -1,934 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.file.Files;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
-import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap;
-import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
-import 
org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.InMemoryLevelDBAliasMapClient;
-import 
org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.net.NodeBase;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static 
org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
-import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR;
-import static org.junit.Assert.*;
-
-public class TestNameNodeProvidedImplementation {
-
-  @Rule public TestName name = new TestName();
-  public static final Logger LOG =
-  LoggerFactory.getLogger(TestNameNodeProvidedImplementation.class);
-
-  final Random r = new Random();
-  final File fBASE = new 

[07/46] hadoop git commit: HDFS-11653. [READ] ProvidedReplica should return an InputStream that is bounded by its length

2017-12-15 Thread cdouglas
HDFS-11653. [READ] ProvidedReplica should return an InputStream that is bounded 
by its length


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1108cb76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1108cb76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1108cb76

Branch: refs/heads/trunk
Commit: 1108cb76917debf0a8541d5130e015883eb521af
Parents: d65df0f
Author: Virajith Jalaparti 
Authored: Thu May 4 12:43:48 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:37 2017 -0800

--
 .../hdfs/server/datanode/ProvidedReplica.java   |   5 +-
 .../datanode/TestProvidedReplicaImpl.java   | 163 +++
 2 files changed, 167 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1108cb76/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
index b021ea2..946ab5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
+
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -98,7 +100,8 @@ public abstract class ProvidedReplica extends ReplicaInfo {
 if (remoteFS != null) {
   FSDataInputStream ins = remoteFS.open(new Path(fileURI));
   ins.seek(fileOffset + seekOffset);
-  return new FSDataInputStream(ins);
+  return new BoundedInputStream(
+  new FSDataInputStream(ins), getBlockDataLength());
 } else {
   throw new IOException("Remote filesystem for provided replica " + this +
   " does not exist");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1108cb76/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
new file mode 100644
index 000..8258c21
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.io.input.BoundedInputStream;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests the implementation of {@link ProvidedReplica}.
+ */
+public class TestProvidedReplicaImpl {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestProvidedReplicaImpl.class);
+  private static final String BASE_DIR =

[31/46] hadoop git commit: HDFS-12809. [READ] Fix the randomized selection of locations in {{ProvidedBlocksBuilder}}.

2017-12-15 Thread cdouglas
HDFS-12809. [READ] Fix the randomized selection of locations in 
{{ProvidedBlocksBuilder}}.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d59dabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d59dabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d59dabb

Branch: refs/heads/trunk
Commit: 4d59dabb7f6ef1d8565bf2bb2d38aeb91bf7f7cc
Parents: 3d3be87
Author: Virajith Jalaparti 
Authored: Mon Nov 27 17:04:20 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../blockmanagement/ProvidedStorageMap.java | 112 +++
 .../TestNameNodeProvidedImplementation.java |  26 -
 2 files changed, 61 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d59dabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 6fec977..c85eb2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 import java.util.NavigableMap;
+import java.util.Random;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentSkipListMap;
@@ -229,11 +230,8 @@ public class ProvidedStorageMap {
 sids.add(currInfo.getStorageID());
 types.add(storageType);
 if (StorageType.PROVIDED.equals(storageType)) {
-  DatanodeDescriptor dn = chooseProvidedDatanode(excludedUUids);
-  locs.add(
-  new DatanodeInfoWithStorage(
-  dn, currInfo.getStorageID(), currInfo.getStorageType()));
-  excludedUUids.add(dn.getDatanodeUuid());
+  // Provided location will be added to the list of locations after
+  // examining all local locations.
   isProvidedBlock = true;
 } else {
   locs.add(new DatanodeInfoWithStorage(
@@ -245,11 +243,17 @@ public class ProvidedStorageMap {
 
   int numLocations = locs.size();
   if (isProvidedBlock) {
+// add the first datanode here
+DatanodeDescriptor dn = chooseProvidedDatanode(excludedUUids);
+locs.add(
+new DatanodeInfoWithStorage(dn, storageId, StorageType.PROVIDED));
+excludedUUids.add(dn.getDatanodeUuid());
+numLocations++;
 // add more replicas until we reach the defaultReplication
 for (int count = numLocations + 1;
 count <= defaultReplication && count <= providedDescriptor
 .activeProvidedDatanodes(); count++) {
-  DatanodeDescriptor dn = chooseProvidedDatanode(excludedUUids);
+  dn = chooseProvidedDatanode(excludedUUids);
   locs.add(new DatanodeInfoWithStorage(
   dn, storageId, StorageType.PROVIDED));
   sids.add(storageId);
@@ -284,6 +288,9 @@ public class ProvidedStorageMap {
 
 private final NavigableMap dns =
 new ConcurrentSkipListMap<>();
+// maintain a separate list of the datanodes with provided storage
+// to efficiently choose Datanodes when required.
+private final List dnR = new ArrayList<>();
 public final static String NETWORK_LOCATION = "/REMOTE";
 public final static String NAME = "PROVIDED";
 
@@ -300,8 +307,8 @@ public class ProvidedStorageMap {
 
 DatanodeStorageInfo getProvidedStorage(
 DatanodeDescriptor dn, DatanodeStorage s) {
-  LOG.info("X adding Datanode " + dn.getDatanodeUuid());
   dns.put(dn.getDatanodeUuid(), dn);
+  dnR.add(dn);
   // TODO: maintain separate RPC ident per dn
   return storageMap.get(s.getStorageID());
 }
@@ -315,84 +322,42 @@ public class ProvidedStorageMap {
 }
 
 DatanodeDescriptor choose(DatanodeDescriptor client) {
-  // exact match for now
-  DatanodeDescriptor dn = client != null ?
-  dns.get(client.getDatanodeUuid()) : null;
-  if (null == dn) {
-dn = chooseRandom();
-  }
-  return dn;
+  return choose(client, Collections.emptySet());
 }
 
 

[01/46] hadoop git commit: HDFS-11703. [READ] Tests for ProvidedStorageMap

2017-12-15 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 44825f096 -> fc7ec80d8


HDFS-11703. [READ] Tests for ProvidedStorageMap


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89b9faf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89b9faf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89b9faf5

Branch: refs/heads/trunk
Commit: 89b9faf5294c93f66ba7bbe08f5ab9083ecb5d72
Parents: aa5ec85
Author: Virajith Jalaparti 
Authored: Thu May 4 13:14:41 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:37 2017 -0800

--
 .../blockmanagement/ProvidedStorageMap.java |   6 +
 .../blockmanagement/TestProvidedStorageMap.java | 153 +++
 2 files changed, 159 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b9faf5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index 518b7e9..0faf16d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentSkipListMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -121,6 +122,11 @@ public class ProvidedStorageMap {
 return dn.getStorageInfo(s.getStorageID());
   }
 
+  @VisibleForTesting
+  public DatanodeStorageInfo getProvidedStorageInfo() {
+return providedStorageInfo;
+  }
+
   public LocatedBlockBuilder newLocatedBlocks(int maxValue) {
 if (!providedEnabled) {
   return new LocatedBlockBuilder(maxValue);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b9faf5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
new file mode 100644
index 000..50e2fed
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.util.RwLock;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * This class tests the {@link ProvidedStorageMap}.
+ */
+public class TestProvidedStorageMap {
+
+  private Configuration conf;
+  private BlockManager bm;
+  private RwLock nameSystemLock;
+  private String providedStorageID;
+
+  static class 

[30/46] hadoop git commit: HDFS-12894. [READ] Skip setting block count of ProvidedDatanodeStorageInfo on DN registration update

2017-12-15 Thread cdouglas
HDFS-12894. [READ] Skip setting block count of ProvidedDatanodeStorageInfo on 
DN registration update


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb996a32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb996a32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb996a32

Branch: refs/heads/trunk
Commit: fb996a32a98a25c0fe34a8ebb28563b53cd6e20e
Parents: 9c35be8
Author: Virajith Jalaparti 
Authored: Tue Dec 5 17:55:32 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:40 2017 -0800

--
 .../server/blockmanagement/BlockManager.java|  5 +
 .../blockmanagement/DatanodeDescriptor.java |  4 +++-
 .../TestNameNodeProvidedImplementation.java | 20 +++-
 3 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb996a32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f92c4e8..916cbaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4943,4 +4943,9 @@ public class BlockManager implements BlockStatsMXBean {
   public void setBlockRecoveryTimeout(long blockRecoveryTimeout) {
 pendingRecoveryBlocks.setRecoveryTimeoutInterval(blockRecoveryTimeout);
   }
+
+  @VisibleForTesting
+  public ProvidedStorageMap getProvidedStorageMap() {
+return providedStorageMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb996a32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 83c608f..fc58708 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -919,7 +919,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
 
 // must re-process IBR after re-registration
 for(DatanodeStorageInfo storage : getStorageInfos()) {
-  storage.setBlockReportCount(0);
+  if (storage.getStorageType() != StorageType.PROVIDED) {
+storage.setBlockReportCount(0);
+  }
 }
 heartbeatedSinceRegistration = false;
 forceRegistration = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb996a32/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index deaf9d5..d057247 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -559,7 +559,9 @@ public class TestNameNodeProvidedImplementation {
 DataNode providedDatanode2 = cluster.getDataNodes().get(1);
 
 DFSClient client = new DFSClient(new InetSocketAddress("localhost",
-cluster.getNameNodePort()), cluster.getConfiguration(0));
+cluster.getNameNodePort()), cluster.getConfiguration(0));
+
+DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
 
 if (numFiles >= 1) {
   String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
@@ -596,10 +598,15 @@ public class TestNameNodeProvidedImplementation {
   providedDatanode2.getDatanodeId().getXferAddr());
   getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0);
 
+  // BR count for the provided ProvidedDatanodeStorageInfo should reset to
+  // 0, when all DNs with PROVIDED storage fail.
+  assertEquals(0, 

[12/46] hadoop git commit: HDFS-12093. [READ] Share remoteFS between ProvidedReplica instances.

2017-12-15 Thread cdouglas
HDFS-12093. [READ] Share remoteFS between ProvidedReplica instances.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2407c9b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2407c9b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2407c9b9

Branch: refs/heads/trunk
Commit: 2407c9b93aabb021b76c802b19c928fb6cbb0a85
Parents: 663b3c0
Author: Virajith Jalaparti 
Authored: Mon Aug 7 14:31:15 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../datanode/FinalizedProvidedReplica.java  |  6 +++--
 .../hdfs/server/datanode/ProvidedReplica.java   | 25 +++-
 .../hdfs/server/datanode/ReplicaBuilder.java| 11 +++--
 .../fsdataset/impl/ProvidedVolumeImpl.java  | 17 +
 .../datanode/TestProvidedReplicaImpl.java   |  2 +-
 5 files changed, 40 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2407c9b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
index 722d573..e23d6be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedProvidedReplica.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -31,8 +32,9 @@ public class FinalizedProvidedReplica extends ProvidedReplica 
{
 
   public FinalizedProvidedReplica(long blockId, URI fileURI,
   long fileOffset, long blockLen, long genStamp,
-  FsVolumeSpi volume, Configuration conf) {
-super(blockId, fileURI, fileOffset, blockLen, genStamp, volume, conf);
+  FsVolumeSpi volume, Configuration conf, FileSystem remoteFS) {
+super(blockId, fileURI, fileOffset, blockLen, genStamp, volume, conf,
+remoteFS);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2407c9b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
index 946ab5a..2b3bd13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java
@@ -65,16 +65,23 @@ public abstract class ProvidedReplica extends ReplicaInfo {
* @param volume the volume this block belongs to
*/
   public ProvidedReplica(long blockId, URI fileURI, long fileOffset,
-  long blockLen, long genStamp, FsVolumeSpi volume, Configuration conf) {
+  long blockLen, long genStamp, FsVolumeSpi volume, Configuration conf,
+  FileSystem remoteFS) {
 super(volume, blockId, blockLen, genStamp);
 this.fileURI = fileURI;
 this.fileOffset = fileOffset;
 this.conf = conf;
-try {
-  this.remoteFS = FileSystem.get(fileURI, this.conf);
-} catch (IOException e) {
-  LOG.warn("Failed to obtain filesystem for " + fileURI);
-  this.remoteFS = null;
+if (remoteFS != null) {
+  this.remoteFS = remoteFS;
+} else {
+  LOG.warn(
+  "Creating an reference to the remote FS for provided block " + this);
+  try {
+this.remoteFS = FileSystem.get(fileURI, this.conf);
+  } catch (IOException e) {
+LOG.warn("Failed to obtain filesystem for " + fileURI);
+this.remoteFS = null;
+  }
 }
   }
 
@@ -83,11 +90,7 @@ public abstract class ProvidedReplica extends ReplicaInfo {
 this.fileURI = r.fileURI;
 this.fileOffset = r.fileOffset;
 this.conf = r.conf;
-try {
-  this.remoteFS = FileSystem.newInstance(fileURI, this.conf);
-} catch (IOException e) {
-  this.remoteFS = null;
-}
+this.remoteFS = r.remoteFS;
   

[45/46] hadoop git commit: HDFS-12903. [READ] Fix closing streams in ImageWriter. Contributed by Virajith Jalaparti

2017-12-15 Thread cdouglas
HDFS-12903. [READ] Fix closing streams in ImageWriter. Contributed by Virajith 
Jalaparti


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b3a7859
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b3a7859
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b3a7859

Branch: refs/heads/trunk
Commit: 4b3a785914d890c47745e57d12a5a9abd084ffc1
Parents: e515103
Author: Chris Douglas 
Authored: Fri Dec 15 17:41:46 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:42 2017 -0800

--
 .../dev-support/findbugs-exclude.xml| 28 
 1 file changed, 28 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3a7859/hadoop-tools/hadoop-fs2img/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-fs2img/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-fs2img/dev-support/findbugs-exclude.xml
new file mode 100644
index 000..b60767f
--- /dev/null
+++ b/hadoop-tools/hadoop-fs2img/dev-support/findbugs-exclude.xml
@@ -0,0 +1,28 @@
+
+
+
+
+  
+  
+
+
+
+  
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/46] hadoop git commit: HDFS-11190. [READ] Namenode support for data stored in external stores.

2017-12-15 Thread cdouglas
HDFS-11190. [READ] Namenode support for data stored in external stores.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d65df0f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d65df0f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d65df0f2

Branch: refs/heads/trunk
Commit: d65df0f27395792c6e25f5e03b6ba1765e2ba925
Parents: 8da3a6e
Author: Virajith Jalaparti 
Authored: Fri Apr 21 11:12:36 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:37 2017 -0800

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  96 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../blockmanagement/BlockFormatProvider.java|  91 
 .../server/blockmanagement/BlockManager.java|  95 +++--
 .../server/blockmanagement/BlockProvider.java   |  65 +++
 .../BlockStoragePolicySuite.java|   6 +
 .../blockmanagement/DatanodeDescriptor.java |  34 +-
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../blockmanagement/DatanodeStorageInfo.java|   4 +
 .../blockmanagement/LocatedBlockBuilder.java| 109 +
 .../blockmanagement/ProvidedStorageMap.java | 427 +++
 .../src/main/resources/hdfs-default.xml |  30 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |   4 +
 .../blockmanagement/TestDatanodeManager.java|  65 ++-
 .../TestNameNodeProvidedImplementation.java | 345 +++
 15 files changed, 1292 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d65df0f2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 85bec92..5ad0bca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.util.Arrays;
+import java.util.Comparator;
 import java.util.List;
 
 import com.google.common.base.Preconditions;
@@ -62,40 +63,50 @@ public class LocatedBlock {
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
 // By default, startOffset is unknown(-1) and corrupt is false.
-this(b, locs, null, null, -1, false, EMPTY_LOCS);
+this(b, convert(locs, null, null), null, null, -1, false, EMPTY_LOCS);
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
   String[] storageIDs, StorageType[] storageTypes) {
-this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
+this(b, convert(locs, storageIDs, storageTypes),
+ storageIDs, storageTypes, -1, false, EMPTY_LOCS);
   }
 
-  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] 
storageIDs,
-  StorageType[] storageTypes, long startOffset,
+  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+  String[] storageIDs, StorageType[] storageTypes, long startOffset,
+  boolean corrupt, DatanodeInfo[] cachedLocs) {
+this(b, convert(locs, storageIDs, storageTypes),
+storageIDs, storageTypes, startOffset, corrupt,
+null == cachedLocs || 0 == cachedLocs.length ? EMPTY_LOCS : 
cachedLocs);
+  }
+
+  public LocatedBlock(ExtendedBlock b, DatanodeInfoWithStorage[] locs,
+  String[] storageIDs, StorageType[] storageTypes, long startOffset,
   boolean corrupt, DatanodeInfo[] cachedLocs) {
 this.b = b;
 this.offset = startOffset;
 this.corrupt = corrupt;
-if (locs==null) {
-  this.locs = EMPTY_LOCS;
-} else {
-  this.locs = new DatanodeInfoWithStorage[locs.length];
-  for(int i = 0; i < locs.length; i++) {
-DatanodeInfo di = locs[i];
-DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
-storageIDs != null ? storageIDs[i] : null,
-storageTypes != null ? storageTypes[i] : null);
-this.locs[i] = storage;
-  }
-}
+this.locs = null == locs ? EMPTY_LOCS : locs;
 this.storageIDs = storageIDs;
 this.storageTypes = storageTypes;
+this.cachedLocs = null == cachedLocs || 0 == cachedLocs.length
+  ? EMPTY_LOCS
+  : cachedLocs;
+  }
+
+  private static DatanodeInfoWithStorage[] convert(
+  DatanodeInfo[] infos, String[] storageIDs, StorageType[] storageTypes) {
+if (null == infos) {
+  return EMPTY_LOCS;
+}
 
-if (cachedLocs == null || cachedLocs.length == 0) {
-  

[14/46] hadoop git commit: HDFS-11902. [READ] Merge BlockFormatProvider and FileRegionProvider.

2017-12-15 Thread cdouglas
HDFS-11902. [READ] Merge BlockFormatProvider and FileRegionProvider.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98f5ed5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98f5ed5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98f5ed5a

Branch: refs/heads/trunk
Commit: 98f5ed5aa377ddd3f35b763b20c499d2ccac2ed5
Parents: d6a9a89
Author: Virajith Jalaparti 
Authored: Fri Nov 3 13:45:56 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../blockmanagement/BlockFormatProvider.java|  91 
 .../server/blockmanagement/BlockProvider.java   |  75 
 .../blockmanagement/ProvidedStorageMap.java |  63 ++-
 .../hadoop/hdfs/server/common/BlockFormat.java  |  82 
 .../hdfs/server/common/FileRegionProvider.java  |  37 --
 .../server/common/TextFileRegionFormat.java | 442 --
 .../server/common/TextFileRegionProvider.java   |  88 
 .../common/blockaliasmap/BlockAliasMap.java |  88 
 .../impl/TextFileRegionAliasMap.java| 445 +++
 .../common/blockaliasmap/package-info.java  |  27 ++
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  76 ++--
 .../src/main/resources/hdfs-default.xml |  34 +-
 .../blockmanagement/TestProvidedStorageMap.java |  41 +-
 .../hdfs/server/common/TestTextBlockFormat.java | 160 ---
 .../impl/TestTextBlockAliasMap.java | 161 +++
 .../fsdataset/impl/TestProvidedImpl.java|  75 ++--
 .../hdfs/server/namenode/FileSystemImage.java   |   4 +-
 .../hdfs/server/namenode/ImageWriter.java   |  25 +-
 .../hdfs/server/namenode/NullBlockAliasMap.java |  86 
 .../hdfs/server/namenode/NullBlockFormat.java   |  87 
 .../hadoop/hdfs/server/namenode/TreePath.java   |   8 +-
 .../TestNameNodeProvidedImplementation.java |  25 +-
 23 files changed, 994 insertions(+), 1243 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98f5ed5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7449987..cb57675 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -331,22 +331,19 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String DFS_NAMENODE_PROVIDED_ENABLED = 
"dfs.namenode.provided.enabled";
   public static final boolean DFS_NAMENODE_PROVIDED_ENABLED_DEFAULT = false;
 
-  public static final String DFS_NAMENODE_BLOCK_PROVIDER_CLASS = 
"dfs.namenode.block.provider.class";
-
-  public static final String DFS_PROVIDER_CLASS = "dfs.provider.class";
   public static final String DFS_PROVIDER_DF_CLASS = "dfs.provided.df.class";
   public static final String DFS_PROVIDER_STORAGEUUID = 
"dfs.provided.storage.id";
   public static final String DFS_PROVIDER_STORAGEUUID_DEFAULT =  "DS-PROVIDED";
-  public static final String DFS_PROVIDER_BLK_FORMAT_CLASS = 
"dfs.provided.blockformat.class";
+  public static final String DFS_PROVIDED_ALIASMAP_CLASS = 
"dfs.provided.aliasmap.class";
 
-  public static final String DFS_PROVIDED_BLOCK_MAP_DELIMITER = 
"dfs.provided.textprovider.delimiter";
-  public static final String DFS_PROVIDED_BLOCK_MAP_DELIMITER_DEFAULT = ",";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER = 
"dfs.provided.aliasmap.text.delimiter";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER_DEFAULT = 
",";
 
-  public static final String DFS_PROVIDED_BLOCK_MAP_READ_PATH = 
"dfs.provided.textprovider.read.path";
-  public static final String DFS_PROVIDED_BLOCK_MAP_PATH_DEFAULT = 
"file:///tmp/blocks.csv";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_READ_PATH = 
"dfs.provided.aliasmap.text.read.path";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_PATH_DEFAULT = 
"file:///tmp/blocks.csv";
 
-  public static final String DFS_PROVIDED_BLOCK_MAP_CODEC = 
"dfs.provided.textprovider.read.codec";
-  public static final String DFS_PROVIDED_BLOCK_MAP_WRITE_PATH  = 
"dfs.provided.textprovider.write.path";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_CODEC = 
"dfs.provided.aliasmap.text.codec";
+  public static final String DFS_PROVIDED_ALIASMAP_TEXT_WRITE_PATH = 
"dfs.provided.aliasmap.text.write.path";
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public 

[46/46] hadoop git commit: Merge branch 'HDFS-9806' into trunk

2017-12-15 Thread cdouglas
Merge branch 'HDFS-9806' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc7ec80d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc7ec80d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc7ec80d

Branch: refs/heads/trunk
Commit: fc7ec80d85a751b2b2b261a2b97ec38c7b58f1df
Parents: 44825f0 4b3a785
Author: Chris Douglas 
Authored: Fri Dec 15 18:06:24 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 18:06:24 2017 -0800

--
 .../java/org/apache/hadoop/fs/StorageType.java  |   3 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |   3 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |   3 +
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 103 ++-
 .../hdfs/protocol/ProvidedStorageLocation.java  |  89 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  36 +
 .../src/main/proto/hdfs.proto   |  15 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  29 +
 .../hdfs/protocolPB/AliasMapProtocolPB.java |  37 +
 .../AliasMapProtocolServerSideTranslatorPB.java | 134 +++
 ...yAliasMapProtocolClientSideTranslatorPB.java | 174 
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  28 +
 .../hdfs/server/aliasmap/InMemoryAliasMap.java  | 222 +
 .../aliasmap/InMemoryAliasMapProtocol.java  | 103 +++
 .../aliasmap/InMemoryLevelDBAliasMapServer.java | 153 +++
 .../hdfs/server/blockmanagement/BlockInfo.java  |  17 +-
 .../server/blockmanagement/BlockManager.java| 149 ++-
 .../BlockStoragePolicySuite.java|   6 +
 .../blockmanagement/DatanodeDescriptor.java |  44 +-
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../blockmanagement/DatanodeStatistics.java |   3 +
 .../server/blockmanagement/DatanodeStats.java   |   4 +-
 .../blockmanagement/DatanodeStorageInfo.java|  15 +-
 .../blockmanagement/HeartbeatManager.java   |   9 +-
 .../blockmanagement/LocatedBlockBuilder.java| 109 +++
 .../blockmanagement/ProvidedStorageMap.java | 540 +++
 .../blockmanagement/StorageTypeStats.java   |  33 +-
 .../hadoop/hdfs/server/common/BlockAlias.java   |  33 +
 .../hadoop/hdfs/server/common/FileRegion.java   |  85 ++
 .../hadoop/hdfs/server/common/Storage.java  |  71 +-
 .../hadoop/hdfs/server/common/StorageInfo.java  |   6 +
 .../common/blockaliasmap/BlockAliasMap.java | 113 +++
 .../impl/InMemoryLevelDBAliasMapClient.java | 178 
 .../impl/LevelDBFileRegionAliasMap.java | 274 ++
 .../impl/TextFileRegionAliasMap.java| 490 ++
 .../common/blockaliasmap/package-info.java  |  27 +
 .../server/datanode/BlockPoolSliceStorage.java  |  20 +-
 .../hdfs/server/datanode/DataStorage.java   |  44 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  26 +-
 .../datanode/FinalizedProvidedReplica.java  | 122 +++
 .../hdfs/server/datanode/ProvidedReplica.java   | 350 +++
 .../hdfs/server/datanode/ReplicaBuilder.java| 141 ++-
 .../hdfs/server/datanode/ReplicaInfo.java   |  20 +-
 .../hdfs/server/datanode/StorageLocation.java   |  54 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   4 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  38 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  65 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  25 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  19 +-
 .../fsdataset/impl/FsVolumeImplBuilder.java |   6 +
 .../fsdataset/impl/ProvidedVolumeImpl.java  | 718 ++
 .../federation/metrics/FederationMBean.java |   6 +
 .../federation/metrics/FederationMetrics.java   |   5 +
 .../federation/metrics/NamenodeBeanMetrics.java |  10 +
 .../resolver/MembershipNamenodeResolver.java|   1 +
 .../resolver/NamenodeStatusReport.java  |  12 +-
 .../router/NamenodeHeartbeatService.java|   3 +-
 .../store/records/MembershipStats.java  |   4 +
 .../records/impl/pb/MembershipStatsPBImpl.java  |  10 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../server/namenode/FSImageCompression.java |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  12 +
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  10 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  21 +
 .../hdfs/server/namenode/NameNodeMXBean.java|  10 +-
 .../namenode/metrics/FSNamesystemMBean.java |   7 +-
 .../hdfs/server/protocol/NamespaceInfo.java |   8 +
 .../src/main/proto/AliasMapProtocol.proto   |  68 ++
 .../src/main/proto/FederationProtocol.proto |   1 +
 .../src/main/resources/hdfs-default.xml | 119 +++
 .../src/main/webapps/hdfs/dfshealth.html|   1 +
 .../src/site/markdown/HdfsProvidedStorage.md| 247 +
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  30 +-
 

[18/46] hadoop git commit: HDFS-11791. [READ] Test for increasing replication of provided files.

2017-12-15 Thread cdouglas
HDFS-11791. [READ] Test for increasing replication of provided files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4851f06b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4851f06b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4851f06b

Branch: refs/heads/trunk
Commit: 4851f06bc2df9d2cfc69fc7c4cecf7babcaa7728
Parents: 89b9faf
Author: Virajith Jalaparti 
Authored: Wed May 31 10:29:53 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../TestNameNodeProvidedImplementation.java | 55 
 1 file changed, 55 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4851f06b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 5062439..e171557 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.nio.channels.Channels;
 import java.nio.channels.ReadableByteChannel;
@@ -34,10 +35,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockProvider;
 import org.apache.hadoop.hdfs.server.common.BlockFormat;
@@ -378,4 +384,53 @@ public class TestNameNodeProvidedImplementation {
 assertEquals(1, locations.length);
 assertEquals(2, locations[0].getHosts().length);
   }
+
+  private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
+  String filename, int expectedLocations) throws IOException {
+LocatedBlocks locatedBlocks = client.getLocatedBlocks(
+filename, 0, baseFileLen);
+//given the start and length in the above call,
+//only one LocatedBlock in LocatedBlocks
+assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+LocatedBlock locatedBlock = locatedBlocks.getLocatedBlocks().get(0);
+assertEquals(expectedLocations, locatedBlock.getLocations().length);
+return locatedBlock.getLocations();
+  }
+
+  /**
+   * Tests setting replication of provided files.
+   * @throws Exception
+   */
+  @Test
+  public void testSetReplicationForProvidedFiles() throws Exception {
+createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+FixedBlockResolver.class);
+startCluster(NNDIRPATH, 2, null,
+new StorageType[][] {
+{StorageType.PROVIDED},
+{StorageType.DISK}},
+false);
+
+String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
+Path file = new Path(filename);
+FileSystem fs = cluster.getFileSystem();
+
+//set the replication to 2, and test that the file has
+//the required replication.
+fs.setReplication(file, (short) 2);
+DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+file, (short) 2, 1);
+DFSClient client = new DFSClient(new InetSocketAddress("localhost",
+cluster.getNameNodePort()), cluster.getConfiguration(0));
+getAndCheckBlockLocations(client, filename, 2);
+
+//set the replication back to 1
+fs.setReplication(file, (short) 1);
+DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
+file, (short) 1, 1);
+//the only replica left should be the PROVIDED datanode
+DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1);
+assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(),
+infos[0].getDatanodeUuid());
+  }
 }



[33/46] hadoop git commit: HDFS-12665. [AliasMap] Create a version of the AliasMap that runs in memory in the Namenode (leveldb). Contributed by Ewan Higgs.

2017-12-15 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/352f994b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
index 1ef2f2b..faf1f83 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.junit.Before;
 import org.junit.Test;
-
 import java.io.IOException;
 
 import static org.junit.Assert.assertNotNull;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/352f994b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
new file mode 100644
index 000..4a9661b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl;
+
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
+import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.stream.Collectors;
+
+/**
+ * Tests the {@link InMemoryLevelDBAliasMapClient}.
+ */
+public class TestInMemoryLevelDBAliasMapClient {
+
+  private InMemoryLevelDBAliasMapServer levelDBAliasMapServer;
+  private InMemoryLevelDBAliasMapClient inMemoryLevelDBAliasMapClient;
+  private File tempDir;
+  private Configuration conf;
+
+  @Before
+  public void setUp() throws IOException {
+levelDBAliasMapServer =
+new InMemoryLevelDBAliasMapServer(InMemoryAliasMap::init);
+conf = new Configuration();
+int port = 9876;
+
+conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
+"localhost:" + port);
+tempDir = Files.createTempDir();
+conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
+tempDir.getAbsolutePath());
+inMemoryLevelDBAliasMapClient = new InMemoryLevelDBAliasMapClient();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+levelDBAliasMapServer.close();
+inMemoryLevelDBAliasMapClient.close();
+FileUtils.deleteDirectory(tempDir);
+  }
+
+  @Test
+  public void 

[41/46] hadoop git commit: HDFS-11640. [READ] Datanodes should use a unique identifier when reading from external stores

2017-12-15 Thread cdouglas
HDFS-11640. [READ] Datanodes should use a unique identifier when reading from 
external stores


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4531588a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4531588a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4531588a

Branch: refs/heads/trunk
Commit: 4531588a94dcd2b4141b12828cb60ca3b953a58c
Parents: fb996a3
Author: Virajith Jalaparti 
Authored: Wed Dec 6 09:39:56 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../hadoop/hdfs/server/common/FileRegion.java   |  7 ++-
 .../impl/TextFileRegionAliasMap.java| 16 --
 .../datanode/FinalizedProvidedReplica.java  | 20 ---
 .../hdfs/server/datanode/ProvidedReplica.java   | 34 ++--
 .../hdfs/server/datanode/ReplicaBuilder.java| 12 -
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  9 
 .../datanode/TestProvidedReplicaImpl.java   |  2 +-
 .../fsdataset/impl/TestProvidedImpl.java| 57 
 .../hadoop/hdfs/server/namenode/FSTreeWalk.java |  6 +--
 .../hdfs/server/namenode/ImageWriter.java   |  2 +-
 .../hadoop/hdfs/server/namenode/TreePath.java   | 40 ++
 .../hdfs/server/namenode/RandomTreeWalk.java|  6 +--
 12 files changed, 174 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4531588a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
index e6f0d0a..b605234 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/FileRegion.java
@@ -37,8 +37,13 @@ public class FileRegion implements BlockAlias {
 
   public FileRegion(long blockId, Path path, long offset,
   long length, long genStamp) {
+this(blockId, path, offset, length, genStamp, new byte[0]);
+  }
+
+  public FileRegion(long blockId, Path path, long offset,
+long length, long genStamp, byte[] nonce) {
 this(new Block(blockId, length, genStamp),
-new ProvidedStorageLocation(path, offset, length, new byte[0]));
+new ProvidedStorageLocation(path, offset, length, nonce));
   }
 
   public FileRegion(long blockId, Path path, long offset, long length) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4531588a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
index 878a208..150371d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
@@ -26,6 +26,7 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.OutputStreamWriter;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map;
@@ -353,11 +354,16 @@ public class TextFileRegionAliasMap
 return null;
   }
   String[] f = line.split(delim);
-  if (f.length != 5) {
+  if (f.length != 5 && f.length != 6) {
 throw new IOException("Invalid line: " + line);
   }
+  byte[] nonce = new byte[0];
+  if (f.length == 6) {
+nonce = f[5].getBytes(Charset.forName("UTF-8"));
+  }
   return new FileRegion(Long.parseLong(f[0]), new Path(f[1]),
-  Long.parseLong(f[2]), Long.parseLong(f[3]), Long.parseLong(f[4]));
+  Long.parseLong(f[2]), Long.parseLong(f[3]), Long.parseLong(f[4]),
+  nonce);
 }
 
 public InputStream createStream() throws IOException {
@@ -442,7 +448,11 @@ public class TextFileRegionAliasMap
   out.append(psl.getPath().toString()).append(delim);
   out.append(Long.toString(psl.getOffset())).append(delim);
   out.append(Long.toString(psl.getLength())).append(delim);
-  

[02/46] hadoop git commit: HDFS-11663. [READ] Fix NullPointerException in ProvidedBlocksBuilder

2017-12-15 Thread cdouglas
HDFS-11663. [READ] Fix NullPointerException in ProvidedBlocksBuilder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa5ec85f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa5ec85f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa5ec85f

Branch: refs/heads/trunk
Commit: aa5ec85f7fd2dc6ac568a88716109bab8df8be19
Parents: 1108cb7
Author: Virajith Jalaparti 
Authored: Thu May 4 13:06:53 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:37 2017 -0800

--
 .../blockmanagement/ProvidedStorageMap.java | 40 ++-
 .../TestNameNodeProvidedImplementation.java | 70 +++-
 2 files changed, 77 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa5ec85f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index d222344..518b7e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -134,11 +134,13 @@ public class ProvidedStorageMap {
   class ProvidedBlocksBuilder extends LocatedBlockBuilder {
 
 private ShadowDatanodeInfoWithStorage pending;
+private boolean hasProvidedLocations;
 
 ProvidedBlocksBuilder(int maxBlocks) {
   super(maxBlocks);
   pending = new ShadowDatanodeInfoWithStorage(
   providedDescriptor, storageId);
+  hasProvidedLocations = false;
 }
 
 @Override
@@ -154,6 +156,7 @@ public class ProvidedStorageMap {
 types[i] = storages[i].getStorageType();
 if (StorageType.PROVIDED.equals(storages[i].getStorageType())) {
   locs[i] = pending;
+  hasProvidedLocations = true;
 } else {
   locs[i] = new DatanodeInfoWithStorage(
   storages[i].getDatanodeDescriptor(), sids[i], types[i]);
@@ -165,25 +168,28 @@ public class ProvidedStorageMap {
 @Override
 LocatedBlocks build(DatanodeDescriptor client) {
   // TODO: to support multiple provided storages, need to pass/maintain map
-  // set all fields of pending DatanodeInfo
-  List excludedUUids = new ArrayList();
-  for (LocatedBlock b: blocks) {
-DatanodeInfo[] infos = b.getLocations();
-StorageType[] types = b.getStorageTypes();
-
-for (int i = 0; i < types.length; i++) {
-  if (!StorageType.PROVIDED.equals(types[i])) {
-excludedUUids.add(infos[i].getDatanodeUuid());
+  if (hasProvidedLocations) {
+// set all fields of pending DatanodeInfo
+List excludedUUids = new ArrayList();
+for (LocatedBlock b : blocks) {
+  DatanodeInfo[] infos = b.getLocations();
+  StorageType[] types = b.getStorageTypes();
+
+  for (int i = 0; i < types.length; i++) {
+if (!StorageType.PROVIDED.equals(types[i])) {
+  excludedUUids.add(infos[i].getDatanodeUuid());
+}
   }
 }
-  }
 
-  DatanodeDescriptor dn = providedDescriptor.choose(client, excludedUUids);
-  if (dn == null) {
-dn = providedDescriptor.choose(client);
+DatanodeDescriptor dn =
+providedDescriptor.choose(client, excludedUUids);
+if (dn == null) {
+  dn = providedDescriptor.choose(client);
+}
+pending.replaceInternal(dn);
   }
 
-  pending.replaceInternal(dn);
   return new LocatedBlocks(
   flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
 }
@@ -278,7 +284,8 @@ public class ProvidedStorageMap {
 
 DatanodeDescriptor choose(DatanodeDescriptor client) {
   // exact match for now
-  DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+  DatanodeDescriptor dn = client != null ?
+  dns.get(client.getDatanodeUuid()) : null;
   if (null == dn) {
 dn = chooseRandom();
   }
@@ -288,7 +295,8 @@ public class ProvidedStorageMap {
 DatanodeDescriptor choose(DatanodeDescriptor client,
 List excludedUUids) {
   // exact match for now
-  DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+  DatanodeDescriptor dn = client != null ?
+  dns.get(client.getDatanodeUuid()) : null;
 
   if (null == dn || excludedUUids.contains(client.getDatanodeUuid())) 

[03/46] hadoop git commit: HDFS-10706. [READ] Add tool generating FSImage from external store

2017-12-15 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8da3a6e3/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
new file mode 100644
index 000..9aef106
--- /dev/null
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSingleUGIResolver.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.junit.Assert.*;
+
+/**
+ * Validate resolver assigning all paths to a single owner/group.
+ */
+public class TestSingleUGIResolver {
+
+  @Rule public TestName name = new TestName();
+
+  private static final int TESTUID = 10101;
+  private static final int TESTGID = 10102;
+  private static final String TESTUSER = "tenaqvyybdhragqvatbf";
+  private static final String TESTGROUP = "tnyybcvatlnxf";
+
+  private SingleUGIResolver ugi = new SingleUGIResolver();
+
+  @Before
+  public void setup() {
+Configuration conf = new Configuration(false);
+conf.setInt(SingleUGIResolver.UID, TESTUID);
+conf.setInt(SingleUGIResolver.GID, TESTGID);
+conf.set(SingleUGIResolver.USER, TESTUSER);
+conf.set(SingleUGIResolver.GROUP, TESTGROUP);
+ugi.setConf(conf);
+System.out.println(name.getMethodName());
+  }
+
+  @Test
+  public void testRewrite() {
+FsPermission p1 = new FsPermission((short)0755);
+match(ugi.resolve(file("dingo", "dingo", p1)), p1);
+match(ugi.resolve(file(TESTUSER, "dingo", p1)), p1);
+match(ugi.resolve(file("dingo", TESTGROUP, p1)), p1);
+match(ugi.resolve(file(TESTUSER, TESTGROUP, p1)), p1);
+
+FsPermission p2 = new FsPermission((short)0x8000);
+match(ugi.resolve(file("dingo", "dingo", p2)), p2);
+match(ugi.resolve(file(TESTUSER, "dingo", p2)), p2);
+match(ugi.resolve(file("dingo", TESTGROUP, p2)), p2);
+match(ugi.resolve(file(TESTUSER, TESTGROUP, p2)), p2);
+
+Map ids = ugi.ugiMap();
+assertEquals(2, ids.size());
+assertEquals(TESTUSER, ids.get(10101));
+assertEquals(TESTGROUP, ids.get(10102));
+  }
+
+  @Test
+  public void testDefault() {
+String user;
+try {
+  user = UserGroupInformation.getCurrentUser().getShortUserName();
+} catch (IOException e) {
+  user = "hadoop";
+}
+Configuration conf = new Configuration(false);
+ugi.setConf(conf);
+Map ids = ugi.ugiMap();
+assertEquals(2, ids.size());
+assertEquals(user, ids.get(0));
+assertEquals(user, ids.get(1));
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidUid() {
+Configuration conf = ugi.getConf();
+conf.setInt(SingleUGIResolver.UID, (1 << 24) + 1);
+ugi.setConf(conf);
+ugi.resolve(file(TESTUSER, TESTGROUP, new FsPermission((short)0777)));
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testInvalidGid() {
+Configuration conf = ugi.getConf();
+conf.setInt(SingleUGIResolver.GID, (1 << 24) + 1);
+ugi.setConf(conf);
+ugi.resolve(file(TESTUSER, TESTGROUP, new FsPermission((short)0777)));
+  }
+
+  @Test(expected=IllegalStateException.class)
+  public void testDuplicateIds() {
+Configuration conf = new Configuration(false);
+conf.setInt(SingleUGIResolver.UID, 4344);
+conf.setInt(SingleUGIResolver.GID, 4344);
+conf.set(SingleUGIResolver.USER, TESTUSER);
+conf.set(SingleUGIResolver.GROUP, TESTGROUP);
+ugi.setConf(conf);
+ugi.ugiMap();
+  }
+
+  static void 

[11/46] hadoop git commit: HDFS-12289. [READ] HDFS-12091 breaks the tests for provided block reads

2017-12-15 Thread cdouglas
HDFS-12289. [READ] HDFS-12091 breaks the tests for provided block reads


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aca023b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aca023b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aca023b7

Branch: refs/heads/trunk
Commit: aca023b72cdb325ca66d196443218f6107efa1ca
Parents: 2407c9b
Author: Virajith Jalaparti 
Authored: Mon Aug 14 10:29:47 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 30 +++-
 .../TestNameNodeProvidedImplementation.java |  4 ++-
 2 files changed, 32 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca023b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 2d710be..c694854 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -147,6 +147,9 @@ public class MiniDFSCluster implements AutoCloseable {
   GenericTestUtils.SYSPROP_TEST_DATA_DIR;
   /** Configuration option to set the data dir: {@value} */
   public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
+  /** Configuration option to set the provided data dir: {@value} */
+  public static final String HDFS_MINIDFS_BASEDIR_PROVIDED =
+  "hdfs.minidfs.basedir.provided";
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
   = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
   public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY
@@ -1397,7 +1400,12 @@ public class MiniDFSCluster implements AutoCloseable {
   if ((storageTypes != null) && (j >= storageTypes.length)) {
 break;
   }
-  File dir = getInstanceStorageDir(dnIndex, j);
+  File dir;
+  if (storageTypes != null && storageTypes[j] == StorageType.PROVIDED) {
+dir = getProvidedStorageDir(dnIndex, j);
+  } else {
+dir = getInstanceStorageDir(dnIndex, j);
+  }
   dir.mkdirs();
   if (!dir.isDirectory()) {
 throw new IOException("Mkdirs failed to create directory for DataNode 
" + dir);
@@ -2847,6 +2855,26 @@ public class MiniDFSCluster implements AutoCloseable {
   }
 
   /**
+   * Get a storage directory for PROVIDED storages.
+   * The PROVIDED directory to return can be set by using the configuration
+   * parameter {@link #HDFS_MINIDFS_BASEDIR_PROVIDED}. If this parameter is
+   * not set, this function behaves exactly the same as
+   * {@link #getInstanceStorageDir(int, int)}. Currently, the two parameters
+   * are ignored as only one PROVIDED storage is supported in HDFS-9806.
+   *
+   * @param dnIndex datanode index (starts from 0)
+   * @param dirIndex directory index
+   * @return Storage directory
+   */
+  public File getProvidedStorageDir(int dnIndex, int dirIndex) {
+String base = conf.get(HDFS_MINIDFS_BASEDIR_PROVIDED, null);
+if (base == null) {
+  return getInstanceStorageDir(dnIndex, dirIndex);
+}
+return new File(base);
+  }
+
+  /**
* Get a storage directory for a datanode.
* 
* /data/data<2*dnIndex + 1>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca023b7/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 60b306f..3f937c4 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -74,7 +74,7 @@ public class TestNameNodeProvidedImplementation {
   final Random r = new Random();
   final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
   final Path BASE = new Path(fBASE.toURI().toString());
-  final Path NAMEPATH = new Path(BASE, "providedDir");;
+  final Path NAMEPATH = new Path(BASE, "providedDir");
   final Path NNDIRPATH = new Path(BASE, "nnDir");
   final Path 

[44/46] hadoop git commit: Revert "HDFS-12903. [READ] Fix closing streams in ImageWriter"

2017-12-15 Thread cdouglas
Revert "HDFS-12903. [READ] Fix closing streams in ImageWriter"

This reverts commit c1bf2654b0e9118985b8518b0254eac4dd302a2f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e515103a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e515103a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e515103a

Branch: refs/heads/trunk
Commit: e515103a83e12ad4908c0ca0b4b1aa4a87e2a840
Parents: 8239e3a
Author: Chris Douglas 
Authored: Fri Dec 15 17:40:50 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:42 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e515103a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 1be5190..14a5f8f 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -183,9 +183,9 @@ public class ImageWriter implements Closeable {
   dirsTmp.deleteOnExit();
   dirsTmpStream = new FileOutputStream(dirsTmp);
   dirs = beginSection(dirsTmpStream);
-} catch (Throwable e) {
+} catch (IOException e) {
   IOUtils.cleanupWithLogger(null, raw, dirsTmpStream);
-  throw new IOException(e);
+  throw e;
 }
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/46] hadoop git commit: HDFS-12671. [READ] Test NameNode restarts when PROVIDED is configured

2017-12-15 Thread cdouglas
HDFS-12671. [READ] Test NameNode restarts when PROVIDED is configured


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c293cc8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c293cc8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c293cc8e

Branch: refs/heads/trunk
Commit: c293cc8e9b032d2c573340725ef8ecc15d49430d
Parents: 71d0a82
Author: Virajith Jalaparti 
Authored: Tue Nov 7 12:54:27 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../TestNameNodeProvidedImplementation.java | 52 +++-
 1 file changed, 39 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c293cc8e/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index aae04be..f0303b5 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -507,16 +507,10 @@ public class TestNameNodeProvidedImplementation {
 DataNode providedDatanode = cluster.getDataNodes().get(0);
 
 DFSClient client = new DFSClient(new InetSocketAddress("localhost",
-cluster.getNameNodePort()), cluster.getConfiguration(0));
+cluster.getNameNodePort()), cluster.getConfiguration(0));
 
 for (int i= 0; i < numFiles; i++) {
-  String filename = "/" + filePrefix + i + fileSuffix;
-
-  DatanodeInfo[] dnInfos = getAndCheckBlockLocations(client, filename, 1);
-  // location should be the provided DN.
-  assertTrue(dnInfos[0].getDatanodeUuid()
-  .equals(providedDatanode.getDatanodeUuid()));
-
+  verifyFileLocation(i);
   // NameNode thinks the datanode is down
   BlockManagerTestUtil.noticeDeadDatanode(
   cluster.getNameNode(),
@@ -524,12 +518,44 @@ public class TestNameNodeProvidedImplementation {
   cluster.waitActive();
   cluster.triggerHeartbeats();
   Thread.sleep(1000);
+  verifyFileLocation(i);
+}
+  }
 
-  // should find the block on the 2nd provided datanode.
-  dnInfos = getAndCheckBlockLocations(client, filename, 1);
-  assertTrue(
-  dnInfos[0].getDatanodeUuid()
-  .equals(providedDatanode.getDatanodeUuid()));
+  @Test(timeout=3)
+  public void testNamenodeRestart() throws Exception {
+createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
+FixedBlockResolver.class);
+// 2 Datanodes, 1 PROVIDED and other DISK
+startCluster(NNDIRPATH, 2, null,
+new StorageType[][] {
+{StorageType.PROVIDED},
+{StorageType.DISK}},
+false);
+
+verifyFileLocation(numFiles - 1);
+cluster.restartNameNodes();
+cluster.waitActive();
+verifyFileLocation(numFiles - 1);
+  }
+
+  /**
+   * verify that the specified file has a valid provided location.
+   * @param fileIndex the index of the file to verify.
+   * @throws Exception
+   */
+  private void verifyFileLocation(int fileIndex)
+  throws Exception {
+DataNode providedDatanode = cluster.getDataNodes().get(0);
+DFSClient client = new DFSClient(
+new InetSocketAddress("localhost", cluster.getNameNodePort()),
+cluster.getConfiguration(0));
+if (fileIndex <= numFiles && fileIndex >= 0) {
+  String filename = "/" + filePrefix + fileIndex + fileSuffix;
+  DatanodeInfo[] dnInfos = getAndCheckBlockLocations(client, filename, 1);
+  // location should be the provided DN
+  assertEquals(providedDatanode.getDatanodeUuid(),
+  dnInfos[0].getDatanodeUuid());
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/46] hadoop git commit: HDFS-12607. [READ] Even one dead datanode with PROVIDED storage results in ProvidedStorageInfo being marked as FAILED

2017-12-15 Thread cdouglas
HDFS-12607. [READ] Even one dead datanode with PROVIDED storage results in 
ProvidedStorageInfo being marked as FAILED


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71d0a825
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71d0a825
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71d0a825

Branch: refs/heads/trunk
Commit: 71d0a825711387fe06396323a9ca6a5af0ade415
Parents: 98f5ed5
Author: Virajith Jalaparti 
Authored: Mon Nov 6 11:05:59 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../blockmanagement/DatanodeDescriptor.java |  6 ++-
 .../blockmanagement/ProvidedStorageMap.java | 40 +---
 .../TestNameNodeProvidedImplementation.java | 40 
 3 files changed, 71 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71d0a825/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index e3d6582..c17ab4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -455,8 +455,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
   totalDfsUsed += report.getDfsUsed();
   totalNonDfsUsed += report.getNonDfsUsed();
 
-  if (StorageType.PROVIDED.equals(
-  report.getStorage().getStorageType())) {
+  // for PROVIDED storages, do not call updateStorage() unless
+  // DatanodeStorageInfo already exists!
+  if (StorageType.PROVIDED.equals(report.getStorage().getStorageType())
+  && storageMap.get(report.getStorage().getStorageID()) == null) {
 continue;
   }
   DatanodeStorageInfo storage = updateStorage(report.getStorage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71d0a825/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index a848d50..3d19775 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -66,7 +66,6 @@ public class ProvidedStorageMap {
   // limit to a single provider for now
   private RwLock lock;
   private BlockManager bm;
-  private boolean hasDNs = false;
   private BlockAliasMap aliasMap;
 
   private final String storageId;
@@ -123,6 +122,11 @@ public class ProvidedStorageMap {
   BlockReportContext context) throws IOException {
 if (providedEnabled && storageId.equals(s.getStorageID())) {
   if (StorageType.PROVIDED.equals(s.getStorageType())) {
+if (providedStorageInfo.getState() == State.FAILED
+&& s.getState() == State.NORMAL) {
+  providedStorageInfo.setState(State.NORMAL);
+  LOG.info("Provided storage transitioning to state " + State.NORMAL);
+}
 processProvidedStorageReport(context);
 dn.injectStorage(providedStorageInfo);
 return providedDescriptor.getProvidedStorage(dn, s);
@@ -135,21 +139,14 @@ public class ProvidedStorageMap {
   private void processProvidedStorageReport(BlockReportContext context)
   throws IOException {
 assert lock.hasWriteLock() : "Not holding write lock";
-if (hasDNs) {
-  return;
-}
-if (providedStorageInfo.getBlockReportCount() == 0) {
+if (providedStorageInfo.getBlockReportCount() == 0
+|| providedDescriptor.activeProvidedDatanodes() == 0) {
   LOG.info("Calling process first blk report from storage: "
   + providedStorageInfo);
   // first pass; periodic refresh should call bm.processReport
   bm.processFirstBlockReport(providedStorageInfo,
   new ProvidedBlockList(aliasMap.getReader(null).iterator()));
-} else {
-  bm.processReport(providedStorageInfo,
-  new 

[38/46] hadoop git commit: HDFS-12712. [9806] Code style cleanup

2017-12-15 Thread cdouglas
HDFS-12712. [9806] Code style cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8239e3af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8239e3af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8239e3af

Branch: refs/heads/trunk
Commit: 8239e3afb31d3c4485817d4b8b8b195b554acbe7
Parents: 80c3fec
Author: Virajith Jalaparti 
Authored: Fri Dec 15 10:15:15 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |   1 -
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  59 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |   2 +-
 .../server/blockmanagement/BlockManager.java|   5 +-
 .../server/blockmanagement/DatanodeManager.java |   2 +-
 .../blockmanagement/ProvidedStorageMap.java |   4 +-
 .../hadoop/hdfs/server/common/Storage.java  |   6 +-
 .../impl/TextFileRegionAliasMap.java|   2 +-
 .../server/datanode/BlockPoolSliceStorage.java  |   3 +-
 .../hdfs/server/datanode/DataStorage.java   |   4 +-
 .../hdfs/server/datanode/ProvidedReplica.java   |   1 -
 .../hdfs/server/datanode/StorageLocation.java   |  12 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   6 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  |  21 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |   8 +-
 .../blockmanagement/TestDatanodeManager.java|   5 +-
 .../blockmanagement/TestProvidedStorageMap.java |  12 +-
 .../datanode/TestProvidedReplicaImpl.java   |  13 +-
 .../fsdataset/impl/TestProvidedImpl.java|  64 +-
 hadoop-tools/hadoop-fs2img/pom.xml  |   4 +-
 .../hdfs/server/namenode/FileSystemImage.java   |   3 +-
 .../hdfs/server/namenode/ImageWriter.java   |   7 +-
 .../hdfs/server/namenode/SingleUGIResolver.java |   4 +-
 .../hadoop/hdfs/server/namenode/TreePath.java   |   3 +-
 .../namenode/ITestProvidedImplementation.java   | 927 ++
 .../hdfs/server/namenode/RandomTreeWalk.java|   4 +-
 .../TestNameNodeProvidedImplementation.java | 934 ---
 27 files changed, 1040 insertions(+), 1076 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8239e3af/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index e9e6103..fd7f9e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -47,7 +47,6 @@ public final class HdfsConstants {
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
   public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
-  // branch HDFS-9806 XXX temporary until HDFS-7076
   public static final byte PROVIDED_STORAGE_POLICY_ID = 1;
   public static final String PROVIDED_STORAGE_POLICY_NAME = "PROVIDED";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8239e3af/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 5ad0bca..29f1b6d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
@@ -40,6 +41,32 @@ import com.google.common.collect.Lists;
 @InterfaceStability.Evolving
 public class LocatedBlock {
 
+  /**
+   * Comparator that ensures that a PROVIDED storage type is greater than any
+   * other storage type. Any other storage types are considered equal.
+   */
+  private static class ProvidedLastComparator
+  implements Comparator, Serializable {
+
+private static final long serialVersionUID = 6441720011443190984L;
+
+@Override
+public int compare(DatanodeInfoWithStorage dns1,
+DatanodeInfoWithStorage dns2) {
+  if 

[16/46] hadoop git commit: HDFS-11673. [READ] Handle failures of Datanode with PROVIDED storage

2017-12-15 Thread cdouglas
HDFS-11673. [READ] Handle failures of Datanode with PROVIDED storage


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/546b95f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/546b95f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/546b95f4

Branch: refs/heads/trunk
Commit: 546b95f4843f3cbbbdf72d90d202cad551696082
Parents: 55ade54
Author: Virajith Jalaparti 
Authored: Thu Jun 1 16:01:31 2017 -0700
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:38 2017 -0800

--
 .../hdfs/server/blockmanagement/BlockInfo.java  | 12 +++-
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../server/blockmanagement/BlockProvider.java   | 18 +++--
 .../blockmanagement/ProvidedStorageMap.java | 54 +--
 .../blockmanagement/TestProvidedStorageMap.java | 10 ++-
 .../TestNameNodeProvidedImplementation.java | 72 +++-
 6 files changed, 150 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/546b95f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index e9d235c..eb09b7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -24,6 +24,7 @@ import java.util.NoSuchElementException;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -188,8 +189,15 @@ public abstract class BlockInfo extends Block
 int len = getCapacity();
 for(int idx = 0; idx < len; idx++) {
   DatanodeStorageInfo cur = getStorageInfo(idx);
-  if(cur != null && cur.getDatanodeDescriptor() == dn) {
-return cur;
+  if(cur != null) {
+if (cur.getStorageType() == StorageType.PROVIDED) {
+  //if block resides on provided storage, only match the storage ids
+  if (dn.getStorageInfo(cur.getStorageID()) != null) {
+return cur;
+  }
+} else if (cur.getDatanodeDescriptor() == dn) {
+  return cur;
+}
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546b95f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0e3eab3..07502c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1514,6 +1514,7 @@ public class BlockManager implements BlockStatsMXBean {

   /** Remove the blocks associated to the given datanode. */
   void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
+providedStorageMap.removeDatanode(node);
 for (DatanodeStorageInfo storage : node.getStorageInfos()) {
   final Iterator it = storage.getBlockIterator();
   //add the BlockInfos to a new collection as the
@@ -2462,7 +2463,7 @@ public class BlockManager implements BlockStatsMXBean {
   // !#! Register DN with provided storage, not with storage owned by DN
   // !#! DN should still have a ref to the DNStorageInfo
   DatanodeStorageInfo storageInfo =
-  providedStorageMap.getStorage(node, storage);
+  providedStorageMap.getStorage(node, storage, context);
 
   if (storageInfo == null) {
 // We handle this for backwards compatibility.
@@ -2589,7 +2590,7 @@ public class BlockManager implements BlockStatsMXBean {
 }
   }
   
-  private Collection processReport(
+  Collection processReport(
   final DatanodeStorageInfo storageInfo,
   final BlockListAsLongs report,
   BlockReportContext context) throws IOException {


[24/46] hadoop git commit: HDFS-12779. [READ] Allow cluster id to be specified to the Image generation tool

2017-12-15 Thread cdouglas
HDFS-12779. [READ] Allow cluster id to be specified to the Image generation tool


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cd80b25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cd80b25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cd80b25

Branch: refs/heads/trunk
Commit: 6cd80b2521e6283036d8c7058d8e452a93ff8e4b
Parents: 90d1b47
Author: Virajith Jalaparti 
Authored: Thu Nov 9 14:09:14 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../hdfs/server/protocol/NamespaceInfo.java |  4 
 .../hdfs/server/namenode/FileSystemImage.java   |  4 
 .../hdfs/server/namenode/ImageWriter.java   | 11 -
 .../TestNameNodeProvidedImplementation.java | 24 +++-
 4 files changed, 41 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd80b25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index 66ce9ee..433d9b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@ -160,6 +160,10 @@ public class NamespaceInfo extends StorageInfo {
 return state;
   }
 
+  public void setClusterID(String clusterID) {
+this.clusterID = clusterID;
+  }
+
   @Override
   public String toString(){
 return super.toString() + ";bpid=" + blockPoolID;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd80b25/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
index 2e57c9f..b66c830 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileSystemImage.java
@@ -68,6 +68,7 @@ public class FileSystemImage implements Tool {
 options.addOption("b", "blockclass", true, "Block output class");
 options.addOption("i", "blockidclass", true, "Block resolver class");
 options.addOption("c", "cachedirs", true, "Max active dirents");
+options.addOption("cid", "clusterID", true, "Cluster ID");
 options.addOption("h", "help", false, "Print usage");
 return options;
   }
@@ -112,6 +113,9 @@ public class FileSystemImage implements Tool {
   case "c":
 opts.cache(Integer.parseInt(o.getValue()));
 break;
+  case "cid":
+opts.clusterID(o.getValue());
+break;
   default:
 throw new UnsupportedOperationException("Internal error");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cd80b25/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 390bb39..9bd8852 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -126,13 +126,16 @@ public class ImageWriter implements Closeable {
   throw new IllegalStateException("Incompatible layout " +
   info.getLayoutVersion() + " (expected " + LAYOUT_VERSION);
 }
+// set the cluster id, if given
+if (opts.clusterID.length() > 0) {
+  info.setClusterID(opts.clusterID);
+}
 stor.format(info);
 blockPoolID = info.getBlockPoolID();
   }
   outdir = new Path(tmp, "current");
   out = outfs.create(new Path(outdir, "fsimage_000"));
 } else {
-  // XXX necessary? writing a NNStorage now...
   outdir = null;
   outfs = null;
   out = opts.outStream;
@@ -517,6 +520,7 @@ public class ImageWriter implements Closeable {
 private UGIResolver ugis;
 

[35/46] hadoop git commit: HDFS-12893. [READ] Support replication of Provided blocks with non-default topologies.

2017-12-15 Thread cdouglas
HDFS-12893. [READ] Support replication of Provided blocks with non-default 
topologies.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c89b29bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c89b29bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c89b29bd

Branch: refs/heads/trunk
Commit: c89b29bd421152f0e7e16936f18d9e852895c37a
Parents: 0f6aa95
Author: Virajith Jalaparti 
Authored: Fri Dec 8 14:52:48 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:41 2017 -0800

--
 .../server/blockmanagement/BlockManager.java| 30 +++-
 .../blockmanagement/DatanodeStorageInfo.java| 11 +++--
 .../blockmanagement/ProvidedStorageMap.java | 18 ++-
 .../TestNameNodeProvidedImplementation.java | 49 ++--
 4 files changed, 97 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c89b29bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 916cbaa..c1cd4db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2151,6 +2151,22 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Get the associated {@link DatanodeDescriptor} for the storage.
+   * If the storage is of type PROVIDED, one of the nodes that reported
+   * PROVIDED storage are returned. If not, this is equivalent to
+   * {@code storage.getDatanodeDescriptor()}.
+   * @param storage
+   * @return the associated {@link DatanodeDescriptor}.
+   */
+  private DatanodeDescriptor getDatanodeDescriptorFromStorage(
+  DatanodeStorageInfo storage) {
+if (storage.getStorageType() == StorageType.PROVIDED) {
+  return providedStorageMap.chooseProvidedDatanode();
+}
+return storage.getDatanodeDescriptor();
+  }
+
+  /**
* Parse the data-nodes the block belongs to and choose a certain number
* from them to be the recovery sources.
*
@@ -2198,10 +2214,14 @@ public class BlockManager implements BlockStatsMXBean {
 BitSet bitSet = isStriped ?
 new BitSet(((BlockInfoStriped) block).getTotalBlockNum()) : null;
 for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
-  final DatanodeDescriptor node = storage.getDatanodeDescriptor();
+  final DatanodeDescriptor node = 
getDatanodeDescriptorFromStorage(storage);
   final StoredReplicaState state = checkReplicaOnStorage(numReplicas, 
block,
   storage, corruptReplicas.getNodes(block), false);
   if (state == StoredReplicaState.LIVE) {
+if (storage.getStorageType() == StorageType.PROVIDED) {
+  storage = new DatanodeStorageInfo(node, storage.getStorageID(),
+  storage.getStorageType(), storage.getState());
+}
 nodesContainingLiveReplicas.add(storage);
   }
   containingNodes.add(node);
@@ -4338,7 +4358,13 @@ public class BlockManager implements BlockStatsMXBean {
 Collection corruptNodes = corruptReplicas
 .getNodes(storedBlock);
 for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
-  final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
+  if (storage.getStorageType() == StorageType.PROVIDED
+  && storage.getState() == State.NORMAL) {
+// assume the policy is satisfied for blocks on PROVIDED storage
+// as long as the storage is in normal state.
+return true;
+  }
+  final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage);
   // Nodes under maintenance should be counted as valid replicas from
   // rack policy point of view.
   if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c89b29bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 76bf915..3a56ef1 100644
--- 

[25/46] hadoop git commit: HDFS-12776. [READ] Increasing replication for PROVIDED files should create local replicas

2017-12-15 Thread cdouglas
HDFS-12776. [READ] Increasing replication for PROVIDED files should create 
local replicas


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90d1b47a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90d1b47a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90d1b47a

Branch: refs/heads/trunk
Commit: 90d1b47a2a400e07e2b6b812c4bbd9c4f2877786
Parents: 87dc026
Author: Virajith Jalaparti 
Authored: Thu Nov 9 13:03:41 2017 -0800
Committer: Chris Douglas 
Committed: Fri Dec 15 17:51:39 2017 -0800

--
 .../hdfs/server/blockmanagement/BlockInfo.java  |  7 ++--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 25 +++---
 .../TestNameNodeProvidedImplementation.java | 36 +++-
 3 files changed, 45 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90d1b47a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index eb09b7b..8f59df6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -187,20 +187,23 @@ public abstract class BlockInfo extends Block
*/
   DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) {
 int len = getCapacity();
+DatanodeStorageInfo providedStorageInfo = null;
 for(int idx = 0; idx < len; idx++) {
   DatanodeStorageInfo cur = getStorageInfo(idx);
   if(cur != null) {
 if (cur.getStorageType() == StorageType.PROVIDED) {
   //if block resides on provided storage, only match the storage ids
   if (dn.getStorageInfo(cur.getStorageID()) != null) {
-return cur;
+// do not return here as we have to check the other
+// DatanodeStorageInfos for this block which could be local
+providedStorageInfo = cur;
   }
 } else if (cur.getDatanodeDescriptor() == dn) {
   return cur;
 }
   }
 }
-return null;
+return providedStorageInfo;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90d1b47a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index db8d60c..fd06a56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1512,6 +1512,13 @@ class FsDatasetImpl implements 
FsDatasetSpi {
 }
   }
 
+  private boolean isReplicaProvided(ReplicaInfo replicaInfo) {
+if (replicaInfo == null) {
+  return false;
+}
+return replicaInfo.getVolume().getStorageType() == StorageType.PROVIDED;
+  }
+
   @Override // FsDatasetSpi
   public ReplicaHandler createTemporary(StorageType storageType,
   String storageId, ExtendedBlock b, boolean isTransfer)
@@ -1530,12 +1537,14 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   isInPipeline = currentReplicaInfo.getState() == 
ReplicaState.TEMPORARY
   || currentReplicaInfo.getState() == ReplicaState.RBW;
   /*
-   * If the current block is old, reject.
+   * If the current block is not PROVIDED and old, reject.
* else If transfer request, then accept it.
* else if state is not RBW/Temporary, then reject
+   * If current block is PROVIDED, ignore the replica.
*/
-  if ((currentReplicaInfo.getGenerationStamp() >= 
b.getGenerationStamp())
-  || (!isTransfer && !isInPipeline)) {
+  if (((currentReplicaInfo.getGenerationStamp() >= b
+  .getGenerationStamp()) || (!isTransfer && !isInPipeline))
+  && !isReplicaProvided(currentReplicaInfo)) {
 throw new ReplicaAlreadyExistsException("Block " + b
 + " already exists in state " + currentReplicaInfo.getState()
 + " and thus cannot be 

  1   2   3   4   5   6   >