hadoop git commit: HDFS-13598. Reduce unnecessary byte-to-string transform operation in INodesInPath#toString. Contributed by Gabor Bota.

2018-05-23 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk d99647995 -> 7a87add4e


HDFS-13598. Reduce unnecessary byte-to-string transform operation in 
INodesInPath#toString. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a87add4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a87add4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a87add4

Branch: refs/heads/trunk
Commit: 7a87add4ea4c317aa9377d1fc8e43fb5e7418a46
Parents: d996479
Author: Yiqun Lin 
Authored: Thu May 24 10:57:35 2018 +0800
Committer: Yiqun Lin 
Committed: Thu May 24 10:57:35 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a87add4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 8235bf0..50ead61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -484,7 +484,7 @@ public class INodesInPath {
 }
 
 final StringBuilder b = new StringBuilder(getClass().getSimpleName())
-.append(": path = ").append(DFSUtil.byteArray2PathString(path))
+.append(": path = ").append(getPath())
 .append("\n  inodes = ");
 if (inodes == null) {
   b.append("null");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

2018-05-23 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 fa8af4aab -> 05d905f58


YARN-7530.  Refactored YARN service API project location.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05d905f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05d905f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05d905f5

Branch: refs/heads/branch-3.1
Commit: 05d905f586bdd2072519bc7e02c70d1739d08bec
Parents: fa8af4a
Author: Eric Yang 
Authored: Wed May 23 22:43:54 2018 -0400
Committer: Eric Yang 
Committed: Wed May 23 22:43:54 2018 -0400

--
 .../hadoop-yarn-services/hadoop-yarn-services-api/pom.xml  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d905f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
index d45da09..a4c4cdf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
@@ -18,7 +18,7 @@
   4.0.0
   
 org.apache.hadoop
-hadoop-yarn-applications
+hadoop-yarn-services
 3.1.1-SNAPSHOT
   
   hadoop-yarn-services-api


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)

2018-05-23 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk f09dc7300 -> d99647995


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
new file mode 100644
index 000..118d172
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.junit.Test;
+
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test for elastic non-strict memory controller based on cgroups.
+ */
+public class TestCGroupElasticMemoryController {
+  private YarnConfiguration conf = new YarnConfiguration();
+  private File script = new File("target/" +
+  TestCGroupElasticMemoryController.class.getName());
+
+  /**
+   * Test that at least one memory type is requested.
+   * @throws YarnException on exception
+   */
+  @Test(expected = YarnException.class)
+  public void testConstructorOff()
+  throws YarnException {
+CGroupElasticMemoryController controller =
+new CGroupElasticMemoryController(
+conf,
+null,
+null,
+false,
+false,
+1
+);
+  }
+
+  /**
+   * Test that the OOM logic is pluggable.
+   * @throws YarnException on exception
+   */
+  @Test
+  public void testConstructorHandler()
+  throws YarnException {
+conf.setClass(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
+DummyRunnableWithContext.class, Runnable.class);
+CGroupsHandler handler = mock(CGroupsHandler.class);
+when(handler.getPathForCGroup(any(), any())).thenReturn("");
+CGroupElasticMemoryController controller =
+new CGroupElasticMemoryController(
+conf,
+null,
+handler,
+true,
+false,
+1
+);
+  }
+
+  /**
+   * Test that the handler is notified about multiple OOM events.
+   * @throws Exception on exception
+   */
+  @Test
+  public void testMultipleOOMEvents() throws Exception {
+conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+script.getAbsolutePath());
+try {
+  FileUtils.writeStringToFile(script,
+  "#!/bin/bash\nprintf oomevent;printf oomevent;\n",
+  Charset.defaultCharset(), false);
+  assertTrue("Could not set executable",
+  script.setExecutable(true));
+
+  CGroupsHandler cgroups = mock(CGroupsHandler.class);
+  when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+  when(cgroups.getCGroupParam(any(), any(), any()))
+  .thenReturn("under_oom 0");
+
+  Runnable handler = 

[2/2] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)

2018-05-23 Thread haibochen
YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9964799
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9964799
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9964799

Branch: refs/heads/trunk
Commit: d9964799544eefcf424fcc178d987525f5356cdf
Parents: f09dc73
Author: Haibo Chen 
Authored: Wed May 23 11:29:55 2018 -0700
Committer: Haibo Chen 
Committed: Wed May 23 16:35:37 2018 -0700

--
 .gitignore  |   1 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  26 +-
 .../src/main/resources/yarn-default.xml |  67 ++-
 .../src/CMakeLists.txt  |  19 +
 .../CGroupElasticMemoryController.java  | 476 +++
 .../linux/resources/CGroupsHandler.java |   6 +
 .../linux/resources/CGroupsHandlerImpl.java |   6 +-
 .../CGroupsMemoryResourceHandlerImpl.java   |  15 -
 .../linux/resources/DefaultOOMHandler.java  | 254 ++
 .../monitor/ContainersMonitorImpl.java  |  50 ++
 .../executor/ContainerSignalContext.java|  41 ++
 .../native/oom-listener/impl/oom_listener.c | 171 +++
 .../native/oom-listener/impl/oom_listener.h | 102 
 .../oom-listener/impl/oom_listener_main.c   | 104 
 .../oom-listener/test/oom_listener_test_main.cc | 292 
 .../resources/DummyRunnableWithContext.java |  31 ++
 .../TestCGroupElasticMemoryController.java  | 319 +
 .../TestCGroupsMemoryResourceHandlerImpl.java   |   6 +-
 .../linux/resources/TestDefaultOOMHandler.java  | 307 
 .../monitor/TestContainersMonitor.java  |   1 +
 .../TestContainersMonitorResourceChange.java|   3 +-
 .../site/markdown/NodeManagerCGroupsMemory.md   | 133 ++
 22 files changed, 2391 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 934c009..428950b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@
 target
 build
 dependency-reduced-pom.xml
+make-build-debug
 
 # Filesystem contract test options and credentials
 auth-keys.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8e56cb8..6d08831 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1440,6 +1440,25 @@ public class YarnConfiguration extends Configuration {
 NM_PREFIX + "vmem-pmem-ratio";
   public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f;
 
+  /** Specifies whether to do memory check on overall usage. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_ENABLED = NM_PREFIX
+  + "elastic-memory-control.enabled";
+  public static final boolean DEFAULT_NM_ELASTIC_MEMORY_CONTROL_ENABLED = 
false;
+
+  /** Specifies the OOM handler code. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER = NM_PREFIX
+  + "elastic-memory-control.oom-handler";
+
+  /** The path to the OOM listener.*/
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH =
+  NM_PREFIX + "elastic-memory-control.oom-listener.path";
+
+  /** Maximum time in seconds to resolve an OOM situation. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC =
+  NM_PREFIX + "elastic-memory-control.timeout-sec";
+  public static final Integer
+  DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC = 5;
+
   /** Number of Virtual CPU Cores which can be allocated for containers.*/
   public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
   public static final int DEFAULT_NM_VCORES = 8;
@@ -2006,13 +2025,6 @@ public class YarnConfiguration extends Configuration {
   /** The path to the Linux container executor.*/
   public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH =
 NM_PREFIX + "linux-container-executor.path";
-  
-  /** 
-   * The UNIX group that the linux-container-executor should run as.
-   * This is intended to be set as part of container-executor.cfg. 
-   */
-  

[1/5] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8f43ade46 -> aa3b20b76
  refs/heads/branch-2.9 50347eaf5 -> 0b2ffb2f1
  refs/heads/branch-3.0 0e7ea7735 -> f7274ca54
  refs/heads/branch-3.1 61b5b2f4f -> fa8af4aab
  refs/heads/trunk d72615611 -> f09dc7300


YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f09dc730
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f09dc730
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f09dc730

Branch: refs/heads/trunk
Commit: f09dc73001fd5f3319765fa997f4b0ca9e8f2aff
Parents: d726156
Author: Inigo Goiri 
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 15:59:30 2018 -0700

--
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09dc730/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
 // Since we could not open the fileInputStream for stderr, this file is not
 // aggregated.
 String s = writer.toString();
-int expectedLength =
-"LogType:stdout".length()
-+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-  .currentTimeMillis())).length() : 0)
-+ ("\nLogLength:" + numChars).length()
-+ "\nLog Contents:\n".length() + numChars + "\n".length()
-+ "\nEnd of LogType:stdout\n".length();
+
+int expectedLength = "LogType:stdout".length()
++ (logUploadedTime
+? (System.lineSeparator() + "Log Upload Time:"
++ Times.format(System.currentTimeMillis())).length()
+: 0)
++ (System.lineSeparator() + "LogLength:" + numChars).length()
++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+.length()
++ numChars + ("\n").length() + ("End of LogType:stdout"
++ System.lineSeparator() + System.lineSeparator()).length();
+
 Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
 Assert.assertTrue("log file:stderr should not be aggregated.", 
!s.contains("LogType:stderr"));
 Assert.assertTrue("log file:logs should not be aggregated.", 
!s.contains("LogType:logs"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. 
Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit f09dc73001fd5f3319765fa997f4b0ca9e8f2aff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b2ffb2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b2ffb2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b2ffb2f

Branch: refs/heads/branch-2.9
Commit: 0b2ffb2f1f64dc7aca05d7cd08db2e6f8006b161
Parents: 50347ea
Author: Inigo Goiri 
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 16:01:50 2018 -0700

--
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b2ffb2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
 // Since we could not open the fileInputStream for stderr, this file is not
 // aggregated.
 String s = writer.toString();
-int expectedLength =
-"LogType:stdout".length()
-+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-  .currentTimeMillis())).length() : 0)
-+ ("\nLogLength:" + numChars).length()
-+ "\nLog Contents:\n".length() + numChars + "\n".length()
-+ "\nEnd of LogType:stdout\n".length();
+
+int expectedLength = "LogType:stdout".length()
++ (logUploadedTime
+? (System.lineSeparator() + "Log Upload Time:"
++ Times.format(System.currentTimeMillis())).length()
+: 0)
++ (System.lineSeparator() + "LogLength:" + numChars).length()
++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+.length()
++ numChars + ("\n").length() + ("End of LogType:stdout"
++ System.lineSeparator() + System.lineSeparator()).length();
+
 Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
 Assert.assertTrue("log file:stderr should not be aggregated.", 
!s.contains("LogType:stderr"));
 Assert.assertTrue("log file:logs should not be aggregated.", 
!s.contains("LogType:logs"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. 
Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit f09dc73001fd5f3319765fa997f4b0ca9e8f2aff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7274ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7274ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7274ca5

Branch: refs/heads/branch-3.0
Commit: f7274ca54a778aad0fc7a171b06fe946b349648d
Parents: 0e7ea77
Author: Inigo Goiri 
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 16:00:47 2018 -0700

--
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7274ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
 // Since we could not open the fileInputStream for stderr, this file is not
 // aggregated.
 String s = writer.toString();
-int expectedLength =
-"LogType:stdout".length()
-+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-  .currentTimeMillis())).length() : 0)
-+ ("\nLogLength:" + numChars).length()
-+ "\nLog Contents:\n".length() + numChars + "\n".length()
-+ "\nEnd of LogType:stdout\n".length();
+
+int expectedLength = "LogType:stdout".length()
++ (logUploadedTime
+? (System.lineSeparator() + "Log Upload Time:"
++ Times.format(System.currentTimeMillis())).length()
+: 0)
++ (System.lineSeparator() + "LogLength:" + numChars).length()
++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+.length()
++ numChars + ("\n").length() + ("End of LogType:stdout"
++ System.lineSeparator() + System.lineSeparator()).length();
+
 Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
 Assert.assertTrue("log file:stderr should not be aggregated.", 
!s.contains("LogType:stderr"));
 Assert.assertTrue("log file:logs should not be aggregated.", 
!s.contains("LogType:logs"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. 
Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit f09dc73001fd5f3319765fa997f4b0ca9e8f2aff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa8af4aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa8af4aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa8af4aa

Branch: refs/heads/branch-3.1
Commit: fa8af4aab544d2aab37c5b0e8188ddeb57d3e97c
Parents: 61b5b2f
Author: Inigo Goiri 
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 15:59:59 2018 -0700

--
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa8af4aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
 // Since we could not open the fileInputStream for stderr, this file is not
 // aggregated.
 String s = writer.toString();
-int expectedLength =
-"LogType:stdout".length()
-+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-  .currentTimeMillis())).length() : 0)
-+ ("\nLogLength:" + numChars).length()
-+ "\nLog Contents:\n".length() + numChars + "\n".length()
-+ "\nEnd of LogType:stdout\n".length();
+
+int expectedLength = "LogType:stdout".length()
++ (logUploadedTime
+? (System.lineSeparator() + "Log Upload Time:"
++ Times.format(System.currentTimeMillis())).length()
+: 0)
++ (System.lineSeparator() + "LogLength:" + numChars).length()
++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+.length()
++ numChars + ("\n").length() + ("End of LogType:stdout"
++ System.lineSeparator() + System.lineSeparator()).length();
+
 Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
 Assert.assertTrue("log file:stderr should not be aggregated.", 
!s.contains("LogType:stderr"));
 Assert.assertTrue("log file:logs should not be aggregated.", 
!s.contains("LogType:logs"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. 
Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit f09dc73001fd5f3319765fa997f4b0ca9e8f2aff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa3b20b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa3b20b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa3b20b7

Branch: refs/heads/branch-2
Commit: aa3b20b76268ed0130fcf5dc03157537f6b007dc
Parents: 8f43ade
Author: Inigo Goiri 
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 16:01:37 2018 -0700

--
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa3b20b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
 // Since we could not open the fileInputStream for stderr, this file is not
 // aggregated.
 String s = writer.toString();
-int expectedLength =
-"LogType:stdout".length()
-+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-  .currentTimeMillis())).length() : 0)
-+ ("\nLogLength:" + numChars).length()
-+ "\nLog Contents:\n".length() + numChars + "\n".length()
-+ "\nEnd of LogType:stdout\n".length();
+
+int expectedLength = "LogType:stdout".length()
++ (logUploadedTime
+? (System.lineSeparator() + "Log Upload Time:"
++ Times.format(System.currentTimeMillis())).length()
+: 0)
++ (System.lineSeparator() + "LogLength:" + numChars).length()
++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+.length()
++ numChars + ("\n").length() + ("End of LogType:stdout"
++ System.lineSeparator() + System.lineSeparator()).length();
+
 Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
 Assert.assertTrue("log file:stderr should not be aggregated.", 
!s.contains("LogType:stderr"));
 Assert.assertTrue("log file:logs should not be aggregated.", 
!s.contains("LogType:logs"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread xyao
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51ce02bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51ce02bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51ce02bb

Branch: refs/heads/HDDS-4
Commit: 51ce02bb54d6047a8191624a86d427b0c9445cb1
Parents: aa23d49
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:30:12 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51ce02bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html 
b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
new file mode 100644
index 000..b73a769
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
@@ -0,0 +1,33 @@
+
+
+  
+
+  
+Toggle navigation
+
+
+
+  
+  Apache Hadoop Ozone/HDDS 
documentation
+
+
+  
+https://github.com/apache/hadoop;>Source
+https://hadoop.apache.org;>Apache Hadoop
+https://apache.org;>ASF
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html 
b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
new file mode 100644
index 000..b043911
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
@@ -0,0 +1,43 @@
+
+
+  
+{{ $currentPage := . }}
+{{ range .Site.Menus.main }}
+{{ if .HasChildren }}
+
+
+{{ .Pre }}
+{{ .Name }}
+
+
+{{ range .Children }}
+
+{{ .Name }}
+
+{{ end }}
+
+
+{{ else }}
+
+
+{{ .Pre }}
+{{ .Name }}
+
+
+{{ end }}
+{{ end }}
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css 
b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
new file mode 100644
index 000..5e39401
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ 
*/.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0
 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 
1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 
1px 
rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset
 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px 
rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled]
 .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] 
.btn-info,fieldset[disabled] .btn-primary,fieldset[disab
 led] .btn-success,fieldset[disabled] 
.btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger 
.badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success 
.badge,.btn-warning 
.badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0
 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 
100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 
100%);background-image:-webkit-gradient(linear,left top,left 
bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 
0,#e0e0e0 
100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#',
 endColorstr='#ffe0e0e0', 
GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0
 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-c
 
olor:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled]
 .btn-default,fieldset[disabled] 

[45/50] [abbrv] hadoop git commit: Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

2018-05-23 Thread xyao
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d4a29d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d4a29d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d4a29d7

Branch: refs/heads/HDDS-4
Commit: 1d4a29d7c9dd33a629c2a36db0d426a825bdcb77
Parents: 293d4d6
Author: Xiaoyu Yao 
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:53:51 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d4a29d7/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 1857fc4..9f7fc84 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,6 +129,18 @@
 
   
   
+dfs.ratis.client.request.timeout.duration
+3s
+OZONE, RATIS, MANAGEMENT
+The timeout duration for ratis client request.
+  
+  
+dfs.ratis.server.request.timeout.duration
+3s
+OZONE, RATIS, MANAGEMENT
+The timeout duration for ratis server request.
+  
+  
 ozone.container.report.interval
 6ms
 OZONE, CONTAINER, MANAGEMENT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDDS-85. Send Container State Info while sending the container report from Datanode to SCM. Contributed by Shashikant Banerjee.

2018-05-23 Thread xyao
HDDS-85. Send Container State Info while sending the container report from 
Datanode to SCM. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fed2bef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fed2bef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fed2bef6

Branch: refs/heads/HDDS-4
Commit: fed2bef647d9a15fe020ad5d3bb89fcb77ed30e6
Parents: 745f203
Author: Mukul Kumar Singh 
Authored: Wed May 23 14:15:35 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 14:15:35 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |  1 +
 .../container/common/helpers/ContainerData.java |  8 
 .../common/impl/ContainerManagerImpl.java   | 45 ++--
 .../common/interfaces/ContainerManager.java |  2 +-
 .../commandhandler/ContainerReportHandler.java  |  4 +-
 .../container/ozoneimpl/OzoneContainer.java |  4 +-
 .../common/impl/TestContainerPersistence.java   |  2 +-
 7 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 1138297..53da18a 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -131,6 +131,7 @@ enum Result {
   UNCLOSED_CONTAINER_IO = 25;
   DELETE_ON_OPEN_CONTAINER = 26;
   CLOSED_CONTAINER_RETRY = 27;
+  INVALID_CONTAINER_STATE = 28;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 14ee33a..d1746f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -340,6 +340,14 @@ public class ContainerData {
   }
 
   /**
+   * checks if the container is closed.
+   * @return - boolean
+   */
+  public synchronized  boolean isClosed() {
+return ContainerLifeCycleState.CLOSED == state;
+  }
+
+  /**
* Marks this container as closed.
*/
   public synchronized void closeContainer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index faee5d0..9355364 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -100,6 +102,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNCLOSED_CONTAINER_IO;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+Result.INVALID_CONTAINER_STATE;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 
 /**
@@ -707,6 +711,39 @@ public class ContainerManagerImpl implements 
ContainerManager {
   }
 
   /**
+   * Returns LifeCycle State of the container
+   * @param 

[47/50] [abbrv] hadoop git commit: HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.

2018-05-23 Thread xyao
HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be3b57a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be3b57a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be3b57a0

Branch: refs/heads/HDDS-4
Commit: be3b57a0be65725d6ecddc218fa14b56b5dc7d32
Parents: 1d4a29d
Author: Xiaoyu Yao 
Authored: Fri May 18 13:09:17 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:53:51 2018 -0700

--
 .../src/test/compose/compose-secure/.env| 17 
 .../compose/compose-secure/docker-compose.yaml  | 66 ++
 .../test/compose/compose-secure/docker-config   | 66 ++
 .../acceptance/ozone-secure.robot   | 95 
 .../hadoop/ozone/client/rest/RestClient.java|  4 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  6 +-
 6 files changed, 248 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be3b57a0/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
--
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
new file mode 100644
index 000..3254735
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be3b57a0/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
new file mode 100644
index 000..2661163
--- /dev/null
+++ 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   ozone.kdc:
+  image: ahadoop/kdc:v1
+   namenode:
+  image: ahadoop/ozone:v1
+  hostname: namenode
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+  ports:
+ - 9000:9000
+  environment:
+  ENSURE_NAMENODE_DIR: /data/namenode
+  env_file:
+ - ./docker-config
+  command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+  image: ahadoop/ozone:v1
+  hostname: datanode
+  volumes:
+- ${OZONEDIR}:/opt/hadoop
+  ports:
+- 9874
+  env_file:
+- ./docker-config
+  command: ["/opt/hadoop/bin/ozone","datanode"]
+   ksm:
+  image: ahadoop/ozone:v1
+  hostname: ksm
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+  ports:
+ - 9874:9874
+  environment:
+ ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+  env_file:
+  - ./docker-config
+  command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+  image: ahadoop/ozone:v1
+  hostname: scm
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+ 

[13/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css 
b/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
new file mode 100644
index 000..39fae72
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Base structure
+ */
+
+/* Move down content because we have a fixed navbar that is 50px tall */
+body {
+  padding-top: 50px;
+  font-size: 150%;
+}
+
+
+/*
+ * Global add-ons
+ */
+
+.sub-header {
+  padding-bottom: 10px;
+  border-bottom: 1px solid #eee;
+}
+
+/*
+ * Top navigation
+ * Hide default border to remove 1px line.
+ */
+.navbar-fixed-top {
+  border: 0;
+}
+
+/*
+ * Sidebar
+ */
+
+/* Hide for mobile, show later */
+.sidebar {
+  display: none;
+}
+@media (min-width: 768px) {
+  .sidebar {
+position: fixed;
+top: 51px;
+bottom: 0;
+left: 0;
+z-index: 1000;
+display: block;
+padding: 20px;
+overflow-x: hidden;
+overflow-y: auto; /* Scrollable contents if viewport is shorter than 
content. */
+background-color: #f5f5f5;
+border-right: 1px solid #eee;
+  }
+}
+
+/* Sidebar navigation */
+.nav-sidebar {
+  margin-right: -21px; /* 20px padding + 1px border */
+  margin-bottom: 20px;
+  margin-left: -20px;
+}
+.nav-sidebar > li > a {
+  padding-right: 20px;
+  padding-left: 20px;
+}
+.nav-sidebar > li > ul > li > a {
+  padding-right: 40px;
+  padding-left: 40px;
+}
+.nav-sidebar  .active > a,
+.nav-sidebar  .active > a:hover,
+.nav-sidebar  .active > a:focus {
+  color: #fff;
+  background-color: #428bca;
+}
+
+
+/*
+ * Main content
+ */
+
+.main {
+  padding: 20px;
+}
+@media (min-width: 768px) {
+  .main {
+padding-right: 40px;
+padding-left: 40px;
+  }
+}
+.main .page-header {
+  margin-top: 0;
+}
+
+
+/*
+ * Placeholder dashboard ideas
+ */
+
+.placeholders {
+  margin-bottom: 30px;
+  text-align: center;
+}
+.placeholders h4 {
+  margin-bottom: 0;
+}
+.placeholder {
+  margin-bottom: 20px;
+}
+.placeholder img {
+  display: inline-block;
+  border-radius: 50%;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
new file mode 100644
index 000..b93a495
Binary files /dev/null and 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread xyao
YARN-8336. Fix potential connection leak in SchedConfCLI and 
YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e30938af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e30938af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e30938af

Branch: refs/heads/HDDS-4
Commit: e30938af1270e079587e7bc06b755f9e93e660a5
Parents: c13dea8
Author: Inigo Goiri 
Authored: Wed May 23 11:55:31 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:55:31 2018 -0700

--
 .../hadoop/yarn/client/cli/SchedConfCLI.java| 42 
 .../yarn/webapp/util/YarnWebServiceUtils.java   | 17 +---
 2 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index 11bfdd7..a5f3b80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -132,25 +132,35 @@ public class SchedConfCLI extends Configured implements 
Tool {
 }
 
 Client webServiceClient = Client.create();
-WebResource webResource = webServiceClient.resource(WebAppUtils.
-getRMWebAppURLWithScheme(getConf()));
-ClientResponse response = webResource.path("ws").path("v1").path("cluster")
-.path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
-.entity(YarnWebServiceUtils.toJson(updateInfo,
-SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
-.put(ClientResponse.class);
-if (response != null) {
-  if (response.getStatus() == Status.OK.getStatusCode()) {
-System.out.println("Configuration changed successfully.");
-return 0;
+WebResource webResource = webServiceClient
+.resource(WebAppUtils.getRMWebAppURLWithScheme(getConf()));
+ClientResponse response = null;
+
+try {
+  response =
+  webResource.path("ws").path("v1").path("cluster")
+  .path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
+  .entity(YarnWebServiceUtils.toJson(updateInfo,
+  SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
+  .put(ClientResponse.class);
+  if (response != null) {
+if (response.getStatus() == Status.OK.getStatusCode()) {
+  System.out.println("Configuration changed successfully.");
+  return 0;
+} else {
+  System.err.println("Configuration change unsuccessful: "
+  + response.getEntity(String.class));
+}
   } else {
-System.err.println("Configuration change unsuccessful: "
-+ response.getEntity(String.class));
+System.err.println("Configuration change unsuccessful: null response");
   }
-} else {
-  System.err.println("Configuration change unsuccessful: null response");
+  return -1;
+} finally {
+  if (response != null) {
+response.close();
+  }
+  webServiceClient.destroy();
 }
-return -1;
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
index 1cf1e97..e7bca2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
@@ -58,11 +58,18 @@ public final class YarnWebServiceUtils {
 
 WebResource webResource = webServiceClient.resource(webAppAddress);
 
-ClientResponse response = webResource.path("ws").path("v1")
-.path("cluster").path("nodes")
-.path(nodeId).accept(MediaType.APPLICATION_JSON)
-

[25/50] [abbrv] hadoop git commit: YARN-8310. Handle old NMTokenIdentifier, AMRMTokenIdentifier, and ContainerTokenIdentifier formats. Contributed by Robert Kanter.

2018-05-23 Thread xyao
YARN-8310. Handle old NMTokenIdentifier, AMRMTokenIdentifier, and 
ContainerTokenIdentifier formats. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e5f7ea9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e5f7ea9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e5f7ea9

Branch: refs/heads/HDDS-4
Commit: 3e5f7ea986600e084fcac723b0423e7de1b3bb8a
Parents: 68c7fd8
Author: Miklos Szegedi 
Authored: Tue May 22 18:10:33 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue May 22 18:10:33 2018 -0700

--
 .../main/java/org/apache/hadoop/io/IOUtils.java |  20 +++
 .../yarn/security/AMRMTokenIdentifier.java  |  33 -
 .../yarn/security/ContainerTokenIdentifier.java |  98 ---
 .../hadoop/yarn/security/NMTokenIdentifier.java |  32 -
 .../yarn/security/TestYARNTokenIdentifier.java  | 121 ++-
 5 files changed, 278 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e5f7ea9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 7288812..3708a3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -513,4 +513,24 @@ public class IOUtils {
   throw exception;
 }
   }
+
+  /**
+   * Reads a DataInput until EOF and returns a byte array.  Make sure not to
+   * pass in an infinite DataInput or this will never return.
+   *
+   * @param in A DataInput
+   * @return a byte array containing the data from the DataInput
+   * @throws IOException on I/O error, other than EOF
+   */
+  public static byte[] readFullyToByteArray(DataInput in) throws IOException {
+ByteArrayOutputStream baos = new ByteArrayOutputStream();
+try {
+  while (true) {
+baos.write(in.readByte());
+  }
+} catch (EOFException eof) {
+  // finished reading, do nothing
+}
+return baos.toByteArray();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e5f7ea9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
index 56411a7..ed83b06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenIdentifier.java
@@ -18,20 +18,26 @@
 
 package org.apache.hadoop.yarn.security;
 
+import java.io.ByteArrayInputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import 
org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto;
 
@@ -45,6 +51,8 @@ import com.google.protobuf.TextFormat;
 @Evolving
 public class AMRMTokenIdentifier extends TokenIdentifier {
 
+  private static final Log LOG = LogFactory.getLog(AMRMTokenIdentifier.class);
+
   public static final Text KIND_NAME = new Text("YARN_AM_RM_TOKEN");
   private AMRMTokenIdentifierProto proto;
 
@@ -78,7 +86,30 @@ public class AMRMTokenIdentifier extends TokenIdentifier {
 
   @Override
   public void readFields(DataInput in) throws IOException {

[22/50] [abbrv] hadoop git commit: YARN-8273. Log aggregation does not warn if HDFS quota in target directory is exceeded (grepas via rkanter)

2018-05-23 Thread xyao
YARN-8273. Log aggregation does not warn if HDFS quota in target directory is 
exceeded (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b22f56c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b22f56c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b22f56c4

Branch: refs/heads/HDDS-4
Commit: b22f56c4719e63bd4f6edc2a075e0bcdb9442255
Parents: 83f53e5
Author: Robert Kanter 
Authored: Tue May 22 14:24:38 2018 -0700
Committer: Robert Kanter 
Committed: Tue May 22 14:24:38 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  4 ++
 .../logaggregation/AggregatedLogFormat.java | 14 +++-
 .../LogAggregationDFSException.java | 45 
 .../LogAggregationFileController.java   |  4 +-
 .../tfile/LogAggregationTFileController.java| 13 +++-
 .../logaggregation/TestContainerLogsUtils.java  |  4 +-
 .../logaggregation/AppLogAggregatorImpl.java| 49 ++---
 .../TestAppLogAggregatorImpl.java   | 75 +---
 .../nodemanager/webapp/TestNMWebServices.java   |  7 +-
 9 files changed, 183 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index db6c11a..a25c524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -40,6 +40,10 @@
   hadoop-common
   provided
 
+
+  org.apache.hadoop
+  hadoop-hdfs-client
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index af3066e..81d5053 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
@@ -547,7 +548,7 @@ public class AggregatedLogFormat {
 }
 
 @Override
-public void close() {
+public void close() throws DSQuotaExceededException {
   try {
 if (writer != null) {
   writer.close();
@@ -555,7 +556,16 @@ public class AggregatedLogFormat {
   } catch (Exception e) {
 LOG.warn("Exception closing writer", e);
   } finally {
-IOUtils.cleanupWithLogger(LOG, this.fsDataOStream);
+try {
+  this.fsDataOStream.close();
+} catch (DSQuotaExceededException e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+  throw e;
+} catch (Throwable e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
new file mode 100644
index 000..19953e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  

[44/50] [abbrv] hadoop git commit: YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread xyao
YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7261561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7261561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7261561

Branch: refs/heads/HDDS-4
Commit: d72615611cfa6bd82756270d4b10136ec1e56741
Parents: e99e5bf
Author: Inigo Goiri 
Authored: Wed May 23 14:43:59 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:43:59 2018 -0700

--
 .../storage/TestHBaseTimelineStorageApps.java| 4 +++-
 .../storage/TestHBaseTimelineStorageDomain.java  | 8 
 .../storage/TestHBaseTimelineStorageEntities.java| 4 +++-
 .../storage/TestHBaseTimelineStorageSchema.java  | 8 
 .../storage/flow/TestHBaseStorageFlowActivity.java   | 4 +++-
 .../storage/flow/TestHBaseStorageFlowRun.java| 4 +++-
 .../storage/flow/TestHBaseStorageFlowRunCompaction.java  | 4 +++-
 7 files changed, 31 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index bc33427..0dee442 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -1936,6 +1936,8 @@ public class TestHBaseTimelineStorageApps {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-util.shutdownMiniCluster();
+if (util != null) {
+  util.shutdownMiniCluster();
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
index 2932e0c..1f59088 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelp
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainRowKey;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -123,4 +124,11 @@ public class TestHBaseTimelineStorageDomain {
 assertEquals("user1,user2 group1,group2", readers);
 assertEquals("writer1,writer2", writers);
   }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+if (util != null) {
+  util.shutdownMiniCluster();
+}
+  }
 }


[09/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js 
b/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
new file mode 100644
index 000..3f96f00
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$(
+  function(){
+$("table").addClass("table table-condensed table-bordered table-striped");
+  }
+);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/theme.toml
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/theme.toml 
b/hadoop-ozone/docs/themes/ozonedoc/theme.toml
new file mode 100644
index 000..9f427fe
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/theme.toml
@@ -0,0 +1,2 @@
+
+name = "Ozonedoc"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md 
b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
deleted file mode 100644
index fc63742..000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
+++ /dev/null
@@ -1,150 +0,0 @@
-
-
-Ozone Command Shell
-===
-
-Ozone command shell gives a command shell interface to work against ozone.
-Please note that this  document assumes that cluster is deployed
-with simple authentication.
-
-The Ozone commands take the following format.
-
-* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
- -root`
-
-The *port* specified in command should match the port mentioned in the config
-property `hdds.rest.http-address`. This property can be set in 
`ozone-site.xml`.
-The default value for the port is `9880` and is used in below commands.
-
-The *-root* option is a command line short cut that allows *ozone oz*
-commands to be run as the user that started the cluster. This is useful to
-indicate that you want the commands to be run as some admin user. The only
-reason for this option is that it makes the life of a lazy developer more
-easier.
-
-Ozone Volume Commands
-
-
-The volume commands allow users to create, delete and list the volumes in the
-ozone cluster.
-
-### Create Volume
-
-Volumes can be created only by Admins. Here is an example of creating a volume.
-
-* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota
-100TB -root`
-
-The above command creates a volume called `hive` owned by user `bilbo`. The
-`-root` option allows the command to be executed as user `hdfs` which is an
-admin in the cluster.
-
-### Update Volume
-
-Updates information like ownership and quota on an existing volume.
-
-* `ozone oz  -updateVolume  http://localhost:9880/hive -quota 500TB -root`
-
-The above command changes the volume quota of hive from 100TB to 500TB.
-
-### Delete Volume
-Deletes a Volume if it is empty.
-
-* `ozone oz -deleteVolume http://localhost:9880/hive -root`
-
-
-### Info Volume
-Info volume command allows the owner or the administrator of the cluster to 
read meta-data about a specific volume.
-
-* `ozone oz -infoVolume http://localhost:9880/hive -root`
-
-### List Volumes
-
-List volume command can be used by administrator to list volumes of any user. 
It can also be used by a user to list volumes owned by him.
-
-* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root`
-
-The above command lists all volumes owned by user bilbo.
-
-Ozone Bucket Commands
-
-
-Bucket commands follow a similar pattern as volume commands. However bucket 
commands are designed to be run by the owner of the volume.
-Following examples assume that these commands are run by the owner of the 
volume or bucket.
-
-
-### Create Bucket
-
-Create bucket call allows the owner of a volume to create a bucket.
-
-* `ozone oz 

[33/50] [abbrv] hadoop git commit: HDDS-84. The root directory of ozone.tar.gz should contain the version string. Contributed by Elek, Marton.

2018-05-23 Thread xyao
HDDS-84. The root directory of ozone.tar.gz should contain the version string. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63fc5873
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63fc5873
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63fc5873

Branch: refs/heads/HDDS-4
Commit: 63fc5873cee41b883e988ead00fc6f6cf74fae97
Parents: f61e3e7
Author: Mukul Kumar Singh 
Authored: Wed May 23 21:07:37 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 21:07:37 2018 +0530

--
 dev-support/bin/ozone-dist-tar-stitching | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fc5873/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index decfa23..d1116e4 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -41,7 +41,7 @@ function run()
 #To include the version name in the root directory of the tar file
 # we create a symbolic link and dereference it during the tar creation
 ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone"
+run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.

2018-05-23 Thread xyao
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97ce8bff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97ce8bff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97ce8bff

Branch: refs/heads/HDDS-4
Commit: 97ce8bff18f983b9bbd3a70ca88589e3b845d4e5
Parents: f8ab025
Author: Xiaoyu Yao 
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:53:51 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   4 +
 .../common/src/main/resources/ozone-default.xml |  33 +++-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |   5 +
 .../ksm/protocol/KeySpaceManagerProtocol.java   |   4 +
 .../protocolPB/KeySpaceManagerProtocolPB.java   |   5 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   3 +-
 .../ozone/TestOzoneConfigurationFields.java |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 169 +++
 .../hadoop/ozone/ksm/KeySpaceManager.java   |  53 +-
 .../ozone/ksm/KeySpaceManagerHttpServer.java|   5 +-
 10 files changed, 238 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97ce8bff/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..a12d6ac 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,4 +20,8 @@ package org.apache.hadoop.hdds;
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
+  public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
+  + "kerberos.keytab.file";
+  public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
+  + ".kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97ce8bff/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 7012946..9f7fc84 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1099,7 +1099,23 @@
 ozone.scm.kerberos.principal
 
  OZONE, SECURITY
-The SCM service principal. Ex 
scm/_h...@realm.tld.
+The SCM service principal. Ex 
scm/_h...@realm.com
+  
+
+  
+hdds.ksm.kerberos.keytab.file
+
+ HDDS, SECURITY
+ The keytab file used by KSM daemon to login as its
+  service principal. The principal name is configured with
+  hdds.ksm.kerberos.principal.
+
+  
+  
+hdds.ksm.kerberos.principal
+
+ HDDS, SECURITY
+The KSM service principal. Ex 
ksm/_h...@realm.com
   
 
   
@@ -,4 +1127,19 @@
 /etc/security/keytabs/HTTP.keytab
   
 
+  
+hdds.ksm.web.authentication.kerberos.principal
+HTTP/_h...@example.com
+
+  KSM http server kerberos principal.
+
+  
+  
+hdds.ksm.web.authentication.kerberos.keytab
+/etc/security/keytabs/HTTP.keytab
+
+  KSM http server kerberos keytab.
+
+  
+
 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97ce8bff/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index 75cf613..d911bcb 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -78,4 +78,9 @@ public final class KSMConfigKeys {
   public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
   "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+  "hdds.ksm.web.authentication.kerberos.principal";
+  public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+  "hdds.ksm.web.authentication.kerberos.keytab";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97ce8bff/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java

[15/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css 
b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
new file mode 100644
index 000..ed3905e
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css 
*/html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px
 dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 
0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em
 
40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;fo
 
nt-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html
 
input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html
 
input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em
 .625em .75em;margin:0 2px;border:1px solid 
silver}legend{padding:0;border:0}textarea{overflow:
 
auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*!
 Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css 
*/@media 
print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0
 
0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:"
 (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) 
")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px
 solid 
#999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px
 solid #000}.table{border-collapse:collapse!important}.table td,.table 
th{background-color:#fff!important}.table-bordered td,.table-bordered 
th{border:1px so
 lid #ddd!important}}@font-face{font-family:'Glyphicons 
Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix)
 format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) 
format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) 
format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) 
format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular)
 
format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons
 
Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\
 

[06/50] [abbrv] hadoop git commit: YARN-8206. Sending a kill does not immediately kill docker containers. Contributed by Eric Badger

2018-05-23 Thread xyao
YARN-8206. Sending a kill does not immediately kill docker containers. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f11288e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f11288e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f11288e

Branch: refs/heads/HDDS-4
Commit: 5f11288e41fca2e414dcbea130c7702e29d4d610
Parents: 57c2feb
Author: Jason Lowe 
Authored: Tue May 22 09:27:08 2018 -0500
Committer: Jason Lowe 
Committed: Tue May 22 09:27:08 2018 -0500

--
 .../runtime/DockerLinuxContainerRuntime.java|  93 --
 .../runtime/TestDockerContainerRuntime.java | 301 +--
 2 files changed, 198 insertions(+), 196 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f11288e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 40cb031..787e892 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -617,19 +617,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
*/
   private boolean allowPrivilegedContainerExecution(Container container)
   throws ContainerExecutionException {
-Map environment = container.getLaunchContext()
-.getEnvironment();
-String runPrivilegedContainerEnvVar = environment
-.get(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER);
-
-if (runPrivilegedContainerEnvVar == null) {
-  return false;
-}
 
-if (!runPrivilegedContainerEnvVar.equalsIgnoreCase("true")) {
-  LOG.warn("NOT running a privileged container. Value of " +
-  ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER
-  + "is invalid: " + runPrivilegedContainerEnvVar);
+if(!isContainerRequestedAsPrivileged(container)) {
   return false;
 }
 
@@ -669,6 +658,20 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 return true;
   }
 
+  /**
+   * This function only returns whether a privileged container was requested,
+   * not whether the container was or will be launched as privileged.
+   * @param container
+   * @return
+   */
+  private boolean isContainerRequestedAsPrivileged(
+  Container container) {
+String runPrivilegedContainerEnvVar = container.getLaunchContext()
+.getEnvironment().get(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER);
+return Boolean.parseBoolean(runPrivilegedContainerEnvVar);
+  }
+
+  @VisibleForTesting
   private String mountReadOnlyPath(String mount,
   Map localizedResources)
   throws ContainerExecutionException {
@@ -963,19 +966,16 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public void signalContainer(ContainerRuntimeContext ctx)
   throws ContainerExecutionException {
 ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL);
-String containerId = ctx.getContainer().getContainerId().toString();
 Map env =
 ctx.getContainer().getLaunchContext().getEnvironment();
 try {
   if (ContainerExecutor.Signal.NULL.equals(signal)) {
 executeLivelinessCheck(ctx);
+  } else if (ContainerExecutor.Signal.TERM.equals(signal)) {
+String containerId = ctx.getContainer().getContainerId().toString();
+handleContainerStop(containerId, env);
   } else {
-if (ContainerExecutor.Signal.KILL.equals(signal)
-|| ContainerExecutor.Signal.TERM.equals(signal)) {
-  handleContainerStop(containerId, env);
-} else {
-  handleContainerKill(containerId, env, signal);
-}
+handleContainerKill(ctx, env, signal);
   }
 } catch (ContainerExecutionException e) {
   LOG.warn("Signal docker container failed. Exception: ", e);
@@ 

[11/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 000..1413fc6
Binary files /dev/null and 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 000..9e61285
Binary files /dev/null and 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
new file mode 100644
index 000..64539b5
Binary files /dev/null and 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js 
b/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
new file mode 100644
index 000..9bcd2fc
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under the MIT license
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires 
jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" 
")[0].split(".");if(b[0]<2&[1]<9||1==b[0]&&9==b[1]&[2]<1||b[0]>3)throw new 
Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but 
lower than version 4")}(jQuery),+function(a){"use strict";function b(){var 
a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd
 otransitionend",transition:"transitionend"};for(var c in b)if(void 
0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var
 c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var 
e=function(){c||a(d).trigger(a.support.transition.end)};return 
setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b
 .target).is(this))return 
b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use 
strict";function b(b){return this.each(function(){var 
c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new 
d(this)),"string"==typeof b&[b].call(c)})}var 
c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function
 c(){g.detach().trigger("closed.bs.alert").remove()}var 
e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&(/.*(?=#[^\s]*$)/,""));var
 
g=a("#"===f?[]:f);b&(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var
 
e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return
 a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c
 ,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return 
this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof 
b&e||d.data("bs.button",e=new 
c(this,f)),"toggle"==b?e.toggle():b&(b)})}var 
c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var
 

[21/50] [abbrv] hadoop git commit: YARN-8332. Incorrect min/max allocation property name in resource types doc. (Weiwei Yang via wangda)

2018-05-23 Thread xyao
YARN-8332. Incorrect min/max allocation property name in resource types doc. 
(Weiwei Yang via wangda)

Change-Id: If74f1ceed9c045a2cb2d6593741278b65ac44a9f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83f53e5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83f53e5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83f53e5c

Branch: refs/heads/HDDS-4
Commit: 83f53e5c6236de30c213dc41878cebfb02597e26
Parents: bd15d23
Author: Wangda Tan 
Authored: Tue May 22 13:29:21 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 22 13:33:33 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f53e5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
index f968b5f..ac16d53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -49,8 +49,8 @@ The following configuration properties are supported. See 
below for details.
 |: |: |
 | `yarn.resource-types` | Comma-separated list of additional resources. May 
not include `memory`, `memory-mb`, or `vcores` |
 | `yarn.resource-types..units` | Default unit for the specified 
resource type |
-| `yarn.resource-types..minimum` | The minimum request for the 
specified resource type |
-| `yarn.resource-types..maximum` | The maximum request for the 
specified resource type |
+| `yarn.resource-types..minimum-allocation` | The minimum request 
for the specified resource type |
+| `yarn.resource-types..maximum-allocation` | The maximum request 
for the specified resource type |
 
 `node-resources.xml`
 
@@ -127,8 +127,8 @@ set the default unit for the resource type. Valid values 
are:
 
 The property must be named `yarn.resource-types..units`. Each defined
 resource may also have optional minimum and maximum properties. The properties
-must be named `yarn.resource-types..minimum` and
-`yarn.resource-types..maximum`.
+must be named `yarn.resource-types..minimum-allocation` and
+`yarn.resource-types..maximum-allocation`.
 
 The `yarn.resource-types` property and any unit, mimimum, or maximum properties
 may be defined in either the usual `yarn-site.xml` file or in a file named
@@ -147,12 +147,12 @@ may be defined in either the usual `yarn-site.xml` file 
or in a file named
   
 
   
-yarn.resource-types.resource2.minimum
+yarn.resource-types.resource2.minimum-allocation
 1
   
 
   
-yarn.resource-types.resource2.maximum
+yarn.resource-types.resource2.maximum-allocation
 1024
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HDDS-110. Checkstyle is not working in the HDDS precommit hook. Contributed by Elek, Marton.

2018-05-23 Thread xyao
HDDS-110. Checkstyle is not working in the HDDS precommit hook.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/699a6918
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/699a6918
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/699a6918

Branch: refs/heads/HDDS-4
Commit: 699a6918aca2b57ae9ad0bff2c3aaf5a776da614
Parents: c0c9b7a
Author: Anu Engineer 
Authored: Wed May 23 09:42:21 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 23 10:01:53 2018 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/699a6918/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 0e7b23a..13f9255 100644
--- a/pom.xml
+++ b/pom.xml
@@ -322,7 +322,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   org.apache.hadoop
   hadoop-build-tools
-  ${project.version}
+  ${hadoop.version}
 
 
   com.puppycrawl.tools


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter)

2018-05-23 Thread xyao
HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa23d49f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa23d49f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa23d49f

Branch: refs/heads/HDDS-4
Commit: aa23d49fc8b9c2537529dbdc13512000e2ab295a
Parents: bc6d9d4
Author: Robert Kanter 
Authored: Wed May 23 10:23:17 2018 -0700
Committer: Robert Kanter 
Committed: Wed May 23 10:24:09 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 79 +++-
 .../org/apache/hadoop/http/TestHttpServer.java  | 61 +++
 2 files changed, 121 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 47ca841..c273c78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -34,6 +34,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -172,10 +174,16 @@ public final class HttpServer2 implements FilterContainer 
{
   private final SignerSecretProvider secretProvider;
   private XFrameOption xFrameOption;
   private boolean xFrameOptionIsEnabled;
-  private static final String X_FRAME_VALUE = "xFrameOption";
-  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
-
-
+  public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
+  private static final String HTTP_HEADER_REGEX =
+  "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
+  static final String X_XSS_PROTECTION  =
+  "X-XSS-Protection:1; mode=block";
+  static final String X_CONTENT_TYPE_OPTIONS =
+  "X-Content-Type-Options:nosniff";
+  private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
+  private static final Pattern PATTERN_HTTP_HEADER_REGEX =
+  Pattern.compile(HTTP_HEADER_REGEX);
   /**
* Class to construct instances of HTTP server with specific options.
*/
@@ -574,10 +582,7 @@ public final class HttpServer2 implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 webServer.setHandler(handlers);
 
-Map xFrameParams = new HashMap<>();
-xFrameParams.put(X_FRAME_ENABLED,
-String.valueOf(this.xFrameOptionIsEnabled));
-xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+Map xFrameParams = setHeaders(conf);
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), 
xFrameParams);
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
@@ -1475,9 +1480,11 @@ public final class HttpServer2 implements 
FilterContainer {
   public static class QuotingInputFilter implements Filter {
 
 private FilterConfig config;
+private Map headerMap;
 
 public static class RequestQuoter extends HttpServletRequestWrapper {
   private final HttpServletRequest rawRequest;
+
   public RequestQuoter(HttpServletRequest rawRequest) {
 super(rawRequest);
 this.rawRequest = rawRequest;
@@ -1566,6 +1573,7 @@ public final class HttpServer2 implements FilterContainer 
{
 @Override
 public void init(FilterConfig config) throws ServletException {
   this.config = config;
+  initHttpHeaderMap();
 }
 
 @Override
@@ -1593,11 +1601,7 @@ public final class HttpServer2 implements 
FilterContainer {
   } else if (mime.startsWith("application/xml")) {
 httpResponse.setContentType("text/xml; charset=utf-8");
   }
-
-  if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) {
-httpResponse.addHeader("X-FRAME-OPTIONS",
-this.config.getInitParameter(X_FRAME_VALUE));
-  }
+  headerMap.forEach((k, v) -> httpResponse.addHeader(k, v));
   chain.doFilter(quoted, httpResponse);
 }
 
@@ -1613,14 +1617,25 @@ public final class HttpServer2 implements 
FilterContainer {
   return (mime == null) ? null : mime;
 }
 
+private void initHttpHeaderMap() {
+  Enumeration params = this.config.getInitParameterNames();
+  headerMap = 

[08/50] [abbrv] hadoop git commit: YARN-7960. Added security flag no-new-privileges for YARN Docker integration. Contributed by Eric Badger

2018-05-23 Thread xyao
YARN-7960.  Added security flag no-new-privileges for YARN Docker integration.
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6176d2b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6176d2b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6176d2b3

Branch: refs/heads/HDDS-4
Commit: 6176d2b35c85715aae93526236c29540f71ecac8
Parents: bcc8e76
Author: Eric Yang 
Authored: Tue May 22 13:44:58 2018 -0400
Committer: Eric Yang 
Committed: Tue May 22 13:44:58 2018 -0400

--
 .../hadoop-yarn/conf/container-executor.cfg |  1 +
 .../container-executor/impl/utils/docker-util.c | 12 +++
 .../test/utils/test_docker_util.cc  | 90 
 .../src/site/markdown/DockerContainers.md   |  1 +
 4 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index 36676b0..d19874f 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -15,6 +15,7 @@ feature.tc.enabled=false
 #  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to this list to run Hadoop jobs
 #  docker.privileged-containers.enabled=false
 #  docker.allowed.volume-drivers=## comma seperated list of allowed 
volume-drivers
+#  docker.no-new-privileges.enabled=## enable/disable the no-new-privileges 
flag for docker run. Set to "true" to enable, disabled by default
 
 # The configs below deal with settings for FPGA resource
 #[fpga]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 099e5b5..d34a5b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1374,6 +1374,18 @@ int get_docker_run_command(const char *command_file, 
const struct configuration
   reset_args(args);
   return BUFFER_TOO_SMALL;
 }
+char *no_new_privileges_enabled =
+get_configuration_value("docker.no-new-privileges.enabled",
+CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
+if (no_new_privileges_enabled != NULL &&
+strcasecmp(no_new_privileges_enabled, "True") == 0) {
+  ret = add_to_args(args, "--security-opt=no-new-privileges");
+  if (ret != 0) {
+reset_args(args);
+return BUFFER_TOO_SMALL;
+  }
+}
+free(no_new_privileges_enabled);
   }
   free(privileged);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index e18bf63..613755c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -1545,4 +1545,94 @@ namespace ContainerExecutor {
 
 run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, 
get_docker_volume_command);
   }
+
+  TEST_F(TestDockerUtil, test_docker_no_new_privileges) {
+
+std::string container_executor_contents[] = {"[docker]\n"
+ "  
docker.privileged-containers.registries=hadoop\n"
+ 

[34/50] [abbrv] hadoop git commit: HDDS-44. Ozone: start-ozone.sh fail to start datanode because of incomplete classpaths. Contributed by Mukul Kumar Singh.

2018-05-23 Thread xyao
HDDS-44. Ozone: start-ozone.sh fail to start datanode because of incomplete 
classpaths.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e83b943f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e83b943f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e83b943f

Branch: refs/heads/HDDS-4
Commit: e83b943fed53c8082a699e0601c2f8e8db0f8ffe
Parents: 63fc587
Author: Anu Engineer 
Authored: Wed May 23 09:29:35 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 23 09:29:35 2018 -0700

--
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 116 ++-
 1 file changed, 111 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83b943f/hadoop-ozone/common/src/main/bin/start-ozone.sh
--
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh 
b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index dda0a1c..92bc4a8 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -47,6 +47,26 @@ else
   exit 1
 fi
 
+# get arguments
+if [[ $# -ge 1 ]]; then
+  startOpt="$1"
+  shift
+  case "$startOpt" in
+-upgrade)
+  nameStartOpt="$startOpt"
+;;
+-rollback)
+  dataStartOpt="$startOpt"
+;;
+*)
+  hadoop_exit_with_usage 1
+;;
+  esac
+fi
+
+#Add other possible options
+nameStartOpt="$nameStartOpt $*"
+
 SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
 SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
@@ -65,11 +85,97 @@ fi
 
 #-
 # Start hdfs before starting ozone daemons
-if [[ -f "${bin}/start-dfs.sh" ]]; then
-  "${bin}/start-dfs.sh"
-else
-  echo "ERROR: Cannot execute ${bin}/start-dfs.sh." 2>&1
-  exit 1
+
+#-
+# namenodes
+
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
+
+if [[ -z "${NAMENODES}" ]]; then
+  NAMENODES=$(hostname)
+fi
+
+echo "Starting namenodes on [${NAMENODES}]"
+hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${NAMENODES}" \
+--daemon start \
+namenode ${nameStartOpt}
+
+HADOOP_JUMBO_RETCOUNTER=$?
+
+#-
+# datanodes (using default workers file)
+
+echo "Starting datanodes"
+hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--daemon start \
+datanode ${dataStartOpt}
+(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+
+#-
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf 
-secondarynamenodes 2>/dev/null)
+
+if [[ -n "${SECONDARY_NAMENODES}" ]]; then
+
+  if [[ "${NAMENODES}" =~ , ]]; then
+
+hadoop_error "WARNING: Highly available NameNode is configured."
+hadoop_error "WARNING: Skipping SecondaryNameNode."
+
+  else
+
+if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+  SECONDARY_NAMENODES=$(hostname)
+fi
+
+echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
+
+hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+  --workers \
+  --config "${HADOOP_CONF_DIR}" \
+  --hostnames "${SECONDARY_NAMENODES}" \
+  --daemon start \
+  secondarynamenode
+(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+  fi
+fi
+
+#-
+# quorumjournal nodes (if any)
+
+JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
+
+if [[ "${#JOURNAL_NODES}" != 0 ]]; then
+  echo "Starting journal nodes [${JOURNAL_NODES}]"
+
+  hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
+--workers \
+--config "${HADOOP_CONF_DIR}" \
+--hostnames "${JOURNAL_NODES}" \
+--daemon start \
+journalnode
+   (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
+fi
+
+#-
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey 
dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
+if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
+  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
+
+  hadoop_uservar_su hdfs zkfc 

[23/50] [abbrv] hadoop git commit: HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part 
of the build.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43be9ab4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43be9ab4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43be9ab4

Branch: refs/heads/HDDS-4
Commit: 43be9ab44f27ae847e100efdc6810b192202fc55
Parents: b22f56c
Author: Anu Engineer 
Authored: Tue May 22 14:29:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 14:29:06 2018 -0700

--
 hadoop-ozone/docs/dev-support/bin/generate-site.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43be9ab4/hadoop-ozone/docs/dev-support/bin/generate-site.sh
--
diff --git a/hadoop-ozone/docs/dev-support/bin/generate-site.sh 
b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
index 3323935..374e74b 100755
--- a/hadoop-ozone/docs/dev-support/bin/generate-site.sh
+++ b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
@@ -19,7 +19,7 @@ DOCDIR="$DIR/../.."
 
 if [ ! "$(which hugo)" ]; then
echo "Hugo is not yet installed. Doc generation is skipped."
-   exit -1
+   exit 0
 fi
 
 DESTDIR="$DOCDIR/target/classes/webapps/docs"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDDS-74. Rename name of properties related to configuration tags. Contributed by Sandeep Nemuri.

2018-05-23 Thread xyao
HDDS-74. Rename name of properties related to configuration tags.
Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60821fb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60821fb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60821fb2

Branch: refs/heads/HDDS-4
Commit: 60821fb20ecee55735ddd0a379cb64841ccb1e2e
Parents: 481bfdb
Author: Anu Engineer 
Authored: Tue May 22 11:38:11 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 11:38:11 2018 -0700

--
 .../main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java   | 2 +-
 hadoop-hdds/common/src/main/resources/ozone-default.xml  | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60821fb2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
index b8d0b24..521408b 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -154,7 +154,7 @@ public class HddsConfServlet extends HttpServlet {
 
 switch (cmd) {
 case "getOzoneTags":
-  out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
+  out.write(gson.toJson(config.get("ozone.tags.system").split(",")));
   break;
 case "getPropertyByTag":
   String tags = request.getParameter("tags");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60821fb2/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 648ba05..e0aca67 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1006,12 +1006,12 @@
   
 
   
-hadoop.custom.tags
+hadoop.tags.custom
 
OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE
   
 
   
-ozone.system.tags
+ozone.tags.system
 
OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HDDS-82. Merge ContainerData and ContainerStatus classes. Contributed by Bharat Viswanadham.

2018-05-23 Thread xyao
HDDS-82. Merge ContainerData and ContainerStatus classes. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e881267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e881267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e881267

Branch: refs/heads/HDDS-4
Commit: 5e88126776e6d682a48f737d8ab1ad0e04d3e767
Parents: 0b4c44b
Author: Xiaoyu Yao 
Authored: Mon May 21 16:09:24 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon May 21 16:09:24 2018 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/common/helpers/ContainerData.java | 163 +-
 .../common/impl/ContainerManagerImpl.java   | 144 ++--
 .../container/common/impl/ContainerStatus.java  | 217 ---
 .../RandomContainerDeletionChoosingPolicy.java  |  10 +-
 ...NOrderedContainerDeletionChoosingPolicy.java |  20 +-
 .../ContainerDeletionChoosingPolicy.java|   3 +-
 .../common/impl/TestContainerPersistence.java   |  19 +-
 8 files changed, 257 insertions(+), 320 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index e7e5b2b..95b7cbb 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -152,6 +152,7 @@ enum ContainerLifeCycleState {
 OPEN = 1;
 CLOSING = 2;
 CLOSED = 3;
+INVALID = 4;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 2a079b0..14ee33a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -52,6 +52,17 @@ public class ContainerData {
   private ContainerType containerType;
   private String containerDBType;
 
+
+  /**
+   * Number of pending deletion blocks in container.
+   */
+  private int numPendingDeletionBlocks;
+  private AtomicLong readBytes;
+  private AtomicLong writeBytes;
+  private AtomicLong readCount;
+  private AtomicLong writeCount;
+
+
   /**
* Constructs a  ContainerData Object.
*
@@ -66,6 +77,34 @@ public class ContainerData {
 this.bytesUsed =  new AtomicLong(0L);
 this.containerID = containerID;
 this.state = ContainerLifeCycleState.OPEN;
+this.numPendingDeletionBlocks = 0;
+this.readCount = new AtomicLong(0L);
+this.readBytes =  new AtomicLong(0L);
+this.writeCount =  new AtomicLong(0L);
+this.writeBytes =  new AtomicLong(0L);
+  }
+
+  /**
+   * Constructs a  ContainerData Object.
+   *
+   * @param containerID - ID
+   * @param conf - Configuration
+   * @param state - ContainerLifeCycleState
+   * @param
+   */
+  public ContainerData(long containerID, Configuration conf,
+   ContainerLifeCycleState state) {
+this.metadata = new TreeMap<>();
+this.maxSize = 
conf.getLong(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
+ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB;
+this.bytesUsed =  new AtomicLong(0L);
+this.containerID = containerID;
+this.state = state;
+this.numPendingDeletionBlocks = 0;
+this.readCount = new AtomicLong(0L);
+this.readBytes =  new AtomicLong(0L);
+this.writeCount =  new AtomicLong(0L);
+this.writeBytes =  new AtomicLong(0L);
   }
 
   /**
@@ -293,6 +332,14 @@ public class ContainerData {
   }
 
   /**
+   * checks if the container is invalid.
+   * @return - boolean
+   */
+  public boolean isValid() {
+return !(ContainerLifeCycleState.INVALID == state);
+  }
+
+  /**
* Marks this container as closed.
*/
   public synchronized void closeContainer() {
@@ -317,11 +364,119 @@ public class ContainerData {
 this.bytesUsed.set(used);
   }
 
-  public long addBytesUsed(long delta) {
-return this.bytesUsed.addAndGet(delta);
-  }
-
+  /**
+   * Get the number of bytes used by the container.
+   * @return 

[37/50] [abbrv] hadoop git commit: HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed by Hanisha Koneru

2018-05-23 Thread xyao
HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed 
by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc6d9d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc6d9d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc6d9d4c

Branch: refs/heads/HDDS-4
Commit: bc6d9d4c796d3c9d27dbbe3266031bf2adecde4f
Parents: 699a691
Author: Bharat Viswanadham 
Authored: Wed May 23 10:15:40 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed May 23 10:15:40 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 10 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  9 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  7 ++
 .../ClientNamenodeProtocolTranslatorPB.java | 17 
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 ++
 .../federation/router/RouterRpcServer.java  |  7 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 17 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 97 
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |  2 +
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 67 ++
 13 files changed, 260 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 09154d0..5f1b2bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2341,6 +2341,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * @see ClientProtocol#upgradeStatus()
+   */
+  public boolean upgradeStatus() throws IOException {
+checkOpen();
+try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
+  return namenode.upgradeStatus();
+}
+  }
+
   RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
   throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1e9ed09..82cdd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1534,6 +1534,16 @@ public class DistributedFileSystem extends FileSystem
   }
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  public boolean upgradeStatus() throws IOException {
+return dfs.upgradeStatus();
+  }
+
+  /**
* Rolling upgrade: prepare/finalize/query.
*/
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f5d5e82..7729e10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -941,6 +941,15 @@ public interface ClientProtocol {
   void finalizeUpgrade() throws IOException;
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  @Idempotent
+  boolean upgradeStatus() throws 

[02/50] [abbrv] hadoop git commit: HDDS-71. Send ContainerType to Datanode during container creation. Contributed by Bharat Viswanadham.

2018-05-23 Thread xyao
HDDS-71. Send ContainerType to Datanode during container creation. Contributed 
by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132a547d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132a547d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132a547d

Branch: refs/heads/HDDS-4
Commit: 132a547dea4081948c39c149c59d6453003fa277
Parents: 73e9120
Author: Mukul Kumar Singh 
Authored: Mon May 21 22:57:08 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Mon May 21 23:08:20 2018 +0530

--
 .../scm/storage/ContainerProtocolCalls.java |  2 ++
 .../main/proto/DatanodeContainerProtocol.proto  |  6 
 .../container/common/helpers/ContainerData.java | 36 
 .../common/impl/ContainerManagerImpl.java   |  7 
 4 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 5fbf373..d3af083 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -243,6 +243,8 @@ public final class ContainerProtocolCalls  {
 ContainerProtos.ContainerData.Builder containerData = ContainerProtos
 .ContainerData.newBuilder();
 containerData.setContainerID(containerID);
+containerData.setContainerType(ContainerProtos.ContainerType
+.KeyValueContainer);
 createRequest.setContainerData(containerData.build());
 
 String id = client.getPipeline().getLeader().getUuidString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3479866..e7e5b2b 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -225,6 +225,12 @@ message ContainerData {
   optional int64 size = 7;
   optional int64 keyCount = 8;
   optional ContainerLifeCycleState state = 9 [default = OPEN];
+  optional ContainerType containerType = 10 [default = KeyValueContainer];
+  optional string containerDBType = 11;
+}
+
+enum ContainerType {
+  KeyValueContainer = 1;
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 63111c8..2a079b0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.ContainerType;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.OzoneConsts;
 
@@ -47,6 +49,8 @@ public class ContainerData {
   private long maxSize;
   private long containerID;
   private ContainerLifeCycleState state;
+  private ContainerType containerType;
+  private String containerDBType;
 
   /**
* Constructs a  ContainerData Object.
@@ -99,9 +103,26 @@ public class ContainerData {
 if (protoData.hasSize()) {
   data.setMaxSize(protoData.getSize());
 }
+
+if(protoData.hasContainerType()) {
+  data.setContainerType(protoData.getContainerType());
+}
+
+if(protoData.hasContainerDBType()) {
+  data.setContainerDBType(protoData.getContainerDBType());
+}
+
 return data;
   }
 
+  public String getContainerDBType() {
+return containerDBType;
+  }
+
+  public void 

[03/50] [abbrv] hadoop git commit: YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.

2018-05-23 Thread xyao
YARN-8179: Preemption does not happen due to natural_termination_factor when 
DRF is used. Contributed by Kyungwan Nam.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b4c44bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b4c44bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b4c44bd

Branch: refs/heads/HDDS-4
Commit: 0b4c44bdeef62945b592d5761666ad026b629c0b
Parents: 132a547
Author: Eric E Payne 
Authored: Mon May 21 20:14:58 2018 +
Committer: Eric E Payne 
Committed: Mon May 21 20:14:58 2018 +

--
 .../capacity/PreemptableResourceCalculator.java |  7 ++-
 ...pacityPreemptionPolicyInterQueueWithDRF.java | 56 
 2 files changed, 61 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b4c44bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 2d2cdf6..676c14f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -197,8 +197,11 @@ public class PreemptableResourceCalculator
*/
   Resource resToObtain = qT.toBePreempted;
   if (!isReservedPreemptionCandidatesSelector) {
-resToObtain = Resources.multiply(qT.toBePreempted,
-context.getNaturalTerminationFactor());
+if (Resources.greaterThan(rc, clusterResource, resToObtain,
+Resource.newInstance(0, 0))) {
+  resToObtain = Resources.multiplyAndNormalizeUp(rc, 
qT.toBePreempted,
+  context.getNaturalTerminationFactor(), 
Resource.newInstance(1, 1));
+}
   }
 
   // Only add resToObtain when it >= 0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b4c44bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index 0d6d350..c8a1f0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,15 +18,28 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.junit.Before;
 import org.junit.Test;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
 extends ProportionalCapacityPreemptionPolicyMockFramework {
+
+  @Before
+  public void setup() {
+super.setup();
+rc = new DominantResourceCalculator();
+when(cs.getResourceCalculator()).thenReturn(rc);
+

[35/50] [abbrv] hadoop git commit: HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao Liang.

2018-05-23 Thread xyao
HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0c9b7a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0c9b7a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0c9b7a8

Branch: refs/heads/HDDS-4
Commit: c0c9b7a8ef2618b7641a0452d9277abd26815de2
Parents: e83b943
Author: Inigo Goiri 
Authored: Wed May 23 09:46:35 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 09:46:35 2018 -0700

--
 .../server/datanode/fsdataset/impl/TestFsDatasetImpl.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0c9b7a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index d684950..9270be8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockReader;
@@ -666,7 +667,8 @@ public class TestFsDatasetImpl {
   TimeUnit.MILLISECONDS);
   config.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 
1);
 
-  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+  cluster = new MiniDFSCluster.Builder(config,
+  GenericTestUtils.getRandomizedTestDir()).numDataNodes(1).build();
   cluster.waitActive();
   FileSystem fs = cluster.getFileSystem();
   DataNode dataNode = cluster.getDataNodes().get(0);
@@ -688,7 +690,7 @@ public class TestFsDatasetImpl {
 // Remove write and execute access so that checkDiskErrorThread detects
 // this volume is bad.
 finalizedDir.setExecutable(false);
-finalizedDir.setWritable(false);
+assertTrue(FileUtil.setWritable(finalizedDir, false));
   }
   Assert.assertTrue("Reference count for the volume should be greater "
   + "than 0", volume.getReferenceCount() > 0);
@@ -709,7 +711,7 @@ public class TestFsDatasetImpl {
   } catch (IOException ioe) {
 GenericTestUtils.assertExceptionContains(info.getXferAddr(), ioe);
   }
-  finalizedDir.setWritable(true);
+  assertTrue(FileUtil.setWritable(finalizedDir, true));
   finalizedDir.setExecutable(true);
 } finally {
 cluster.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread xyao
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e99e5bf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e99e5bf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e99e5bf1

Branch: refs/heads/HDDS-4
Commit: e99e5bf104e9664bc1b43a2639d87355d47a77e2
Parents: cddbbe5
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:15:26 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e99e5bf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  syncBarrier.await();
-} catch (BrokenBarrierException e) {
-  e.printStackTrace();
+  nm.init(conf);
+  nm.start();
+  // Start a container and make sure it is in RUNNING state
+  ((TestNodeManager4) nm).startContainer();
+  // Simulate a container 

[49/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

2018-05-23 Thread xyao
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/293d4d69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/293d4d69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/293d4d69

Branch: refs/heads/HDDS-4
Commit: 293d4d69ba77c63755dabbcf1731337786bf5b15
Parents: 97ce8bf
Author: Xiaoyu Yao 
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:53:51 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/293d4d69/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9f7fc84..1857fc4 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,18 +129,6 @@
 
   
   
-dfs.ratis.client.request.timeout.duration
-3s
-OZONE, RATIS, MANAGEMENT
-The timeout duration for ratis client request.
-  
-  
-dfs.ratis.server.request.timeout.duration
-3s
-OZONE, RATIS, MANAGEMENT
-The timeout duration for ratis server request.
-  
-  
 ozone.container.report.interval
 6ms
 OZONE, CONTAINER, MANAGEMENT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map 
b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
new file mode 100644
index 000..6c7fa40
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["less/normalize.less","less/print.less","bootstrap.css","dist/css/bootstrap.css","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","le
 
ss/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"4EAQA,KACE,YAAA,WACA,yBAAA,KACA,qBAAA,KAOF,KACE,OAAA,EAaF,QAAA,MAAA,QAAA,WAAA,OAAA,OAAA,OAAA,OAAA,KAAA,KAAA,IAAA,QAAA,QAaE,QAAA,MAQF,MAAA,OAAA,SAAA,MAIE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SAAA,SAEE,QAAA,KAUF,EACE,iBAAA,YAQF,SAAA,QAEE,QAAA,EAUF,YAC
 
E,cAAA,IAAA,OAOF,EAAA,OAEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,OAAA,MAAA,EACA,UAAA,IAOF,KACE,MAAA,KACA,WAAA,KAOF,MACE,UAAA,IAOF,IAAA,IAEE,SAAA,SACA,UAAA,IACA,YAAA,EACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,OAAA,EAAA,mBAAA,YAAA,gBAAA,YACA,WAAA,YAOF,IACE,SAAA,KAOF,KAAA,IAAA,IAAA,KAIE,YAAA,UAAA,UACA,UAAA,IAkBF,OAAA,MAAA,SAAA,OAAA,SAKE,OAAA,EACA,KAAA,QACA,MAAA,QAOF,OACE,SAAA,QAUF,OAAA,OAEE,eAAA,KAWF,OAAA,wBAAA,kBAAA,mBAIE,mBAAA,OACA,OAAA,QAOF,iBAAA,qBAEE,OAAA,QAOF,yBAAA,wBAEE,QAAA,EACA,OAAA,EAQF,MACE,YAAA,OAWF,qBAAA,kBAEE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CAAA,8CAEE,OAAA,KAQF,mBACE,mBAAA,YACA,gBAAA,YAAA,WAAA,YAAA,mBAAA,UASF,iDAAA,8CAEE,mBAAA,KAOF,SACE,QAAA,MAAA,OAAA,MACA,OAAA,EAAA,IACA,OAAA,IAAA,MAAA,OAQF,OACE,QAAA,EACA,OAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,eAAA,EACA,gBAAA,SAGF,GAAA,GAEE,QAAA,uFCjUF,aA7FI,EAAA,OAAA,QAGI,MAAA,eACA,YAAA,eACA,WAAA,cAAA,mBAAA,eACA,WAAA,eAGJ,EAAA,UAEI,gBAAA,UAGJ,cACI,QAAA,KAAA,WAAA,IAGJ,kBACI
 
,QAAA,KAAA,YAAA,IAKJ,6BAAA,mBAEI,QAAA,GAGJ,WAAA,IAEI,OAAA,IAAA,MAAA,KC4KL,kBAAA,MDvKK,MC0KL,QAAA,mBDrKK,IE8KN,GDLC,kBAAA,MDrKK,ICwKL,UAAA,eCUD,GF5KM,GE2KN,EF1KM,QAAA,ECuKL,OAAA,ECSD,GF3KM,GCsKL,iBAAA,MD/JK,QCkKL,QAAA,KCSD,YFtKU,oBCiKT,iBAAA,eD7JK,OCgKL,OAAA,IAAA,MAAA,KD5JK,OC+JL,gBAAA,mBCSD,UFpKU,UC+JT,iBAAA,eDzJS,mBEkKV,mBDLC,OAAA,IAAA,MAAA,gBEjPD,WACA,YAAA,uBFsPD,IAAA,+CE7OC,IAAK,sDAAuD,4BAA6B,iDAAkD,gBAAiB,gDAAiD,eAAgB,+CAAgD,mBAAoB,2EAA4E,cAE7W,WACA,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EAIkC,uBAAA,YAAW,wBAAA,UACX,2BAAW,QAAA,QAEX,uBDuPlC,QAAS,QCtPyB,sBFiPnC,uBEjP8C,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX
 

[10/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
--
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js 
b/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
new file mode 100644
index 000..e836475
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
@@ -0,0 +1,5 @@
+/*! jQuery v1.12.4 | (c) jQuery Foundation | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof 
module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw
 new Error("jQuery requires a window with a document");return 
b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var 
c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="1.12.4",n=function(a,b){return
 new 
n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return
 
b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return
 e.call(this)},get:function(a){return 
null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var 
b=n.merge(this.constructor(),a);return 
b.prevObject=this,b.context=this.context,b},each:function(a){return 
n.each(this,a)},map:function(a){return 
this.pushStack(n.map(this,function(b,c){return 
a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(thi
 s,arguments))},first:function(){return this.eq(0)},last:function(){return 
this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return 
this.pushStack(c>=0&>c?[this[c]]:[])},end:function(){return 
this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var
 
a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof
 g&&(j=g,g=arguments[h]||{},h++),"object"==typeof 
g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d
 in 
e)a=g[d],c=e[d],g!==c&&(j&&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&(a)?a:[]):f=a&(a)?a:{},g[d]=n.extend(j,f,c)):void
 0!==c&&(g[d]=c));return 
g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw
 new 
Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray||function(a){return"array"===n.type(a)},isWindow:function(a){return
 null!=
 a&==a.window},isNumeric:function(a){var 
b=a&();return!n.isArray(a)&(b)+1>=0},isEmptyObject:function(a){var
 b;for(b in a)return!1;return!0},isPlainObject:function(a){var 
b;if(!a||"object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(!l.ownFirst)for(b
 in a)return k.call(a,b);for(b in a);return void 
0===b||k.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof 
a||"function"==typeof a?i[j.call(a)]||"object":typeof 
a},globalEval:function(b){b&(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return
 a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return 
a.nodeName&()===b.toLowerCase()},each:function(a,b){var 
c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else 
for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){retur
 n null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return 
null!=a&&(s(Object(a))?n.merge(c,"string"==typeof 
a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(h)return 
h.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in 
b&[c]===a)return c}return-1},merge:function(a,b){var 
c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 
0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var 
d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&(a[f]);return 
e},map:function(a,b,c){var 
d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&(e);else
 for(g in a)e=b(a[g],g,c),null!=e&(e);return 
f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof 
b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=e.call(arguments,2),d=function(){return 
a.apply(b||this,c.concat(e.call(arguments)))},d.guid=a.guid=a.guid||n.guid++,d):void
 0},now:function(){return+new Date},support:l}),"fu
 nction"==typeof 
Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number 
String Function Array Date RegExp Object Error Symbol".split(" 
"),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var 
b=!!a&&"length"in 
a&,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof
 b&>0& in a}var t=function(a){var 
b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new 
Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return 

[05/50] [abbrv] hadoop git commit: HADOOP-15474. Rename properties introduced for . Contributed by Zsolt Venczel.

2018-05-23 Thread xyao
HADOOP-15474. Rename properties introduced for . Contributed by Zsolt 
Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57c2feb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57c2feb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57c2feb0

Branch: refs/heads/HDDS-4
Commit: 57c2feb0d3ed0bb4f8642300433a35f5e28071c9
Parents: 5e88126
Author: Mukul Kumar Singh 
Authored: Tue May 22 13:33:31 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue May 22 13:33:31 2018 +0530

--
 .../org/apache/hadoop/conf/Configuration.java | 18 +++---
 .../hadoop/fs/CommonConfigurationKeysPublic.java  | 15 +++
 .../src/main/resources/core-default.xml   | 11 ++-
 .../org/apache/hadoop/conf/TestConfiguration.java |  4 ++--
 4 files changed, 42 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 52f20b0..19bd5da 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -198,8 +198,8 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  * Tags
  *
  * Optionally we can tag related properties together by using tag
- * attributes. System tags are defined by hadoop.system.tags property. Users
- * can define there own custom tags in  hadoop.custom.tags property.
+ * attributes. System tags are defined by hadoop.tags.system property. Users
+ * can define there own custom tags in  hadoop.tags.custom property.
  *
  * For example, we can tag existing property as:
  * 
@@ -3180,12 +3180,24 @@ public class Configuration implements 
Iterable>,
   }
 
   /**
-   * Add tags defined in HADOOP_SYSTEM_TAGS, HADOOP_CUSTOM_TAGS.
+   * Add tags defined in HADOOP_TAGS_SYSTEM, HADOOP_TAGS_CUSTOM.
* @param prop
*/
   public void addTags(Properties prop) {
 // Get all system tags
 try {
+  if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
+String systemTags = prop.getProperty(CommonConfigurationKeys
+.HADOOP_TAGS_SYSTEM);
+Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
+  }
+  // Get all custom tags
+  if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
+String customTags = prop.getProperty(CommonConfigurationKeys
+.HADOOP_TAGS_CUSTOM);
+Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
+  }
+
   if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
 String systemTags = prop.getProperty(CommonConfigurationKeys
 .HADOOP_SYSTEM_TAGS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8cd753a..8837cfb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -881,7 +881,22 @@ public class CommonConfigurationKeysPublic {
   "credential$",
   "oauth.*token$",
   HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
+
+  /**
+   * @deprecated Please use
+   * {@link CommonConfigurationKeysPublic#HADOOP_TAGS_SYSTEM} instead
+   * See https://issues.apache.org/jira/browse/HADOOP-15474
+   */
   public static final String HADOOP_SYSTEM_TAGS = "hadoop.system.tags";
+
+  /**
+   * @deprecated Please use
+   * {@link CommonConfigurationKeysPublic#HADOOP_TAGS_CUSTOM} instead
+   * See https://issues.apache.org/jira/browse/HADOOP-15474
+   */
   public static final String HADOOP_CUSTOM_TAGS = "hadoop.custom.tags";
+
+  public static final String HADOOP_TAGS_SYSTEM = "hadoop.tags.system";
+  public static final String HADOOP_TAGS_CUSTOM = "hadoop.tags.custom";
 }
 


[32/50] [abbrv] hadoop git commit: YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). Contributed by Sunil G.

2018-05-23 Thread xyao
YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). 
Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61e3e75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61e3e75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61e3e75

Branch: refs/heads/HDDS-4
Commit: f61e3e752eb1cf4a08030da04bc3d6c5a2b3926d
Parents: 9837ca9
Author: Rohith Sharma K S 
Authored: Wed May 23 18:31:03 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed May 23 18:31:03 2018 +0530

--
 .../src/main/webapp/app/initializers/loader.js  | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61e3e75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 53f9c44..01daa7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -31,7 +31,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   $.ajax({
 type: 'GET',
 dataType: 'json',
-async: true,
+async: false,
 context: this,
 url: httpUrl,
 success: function(data) {
@@ -44,7 +44,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   application.advanceReadiness();
 }
   });
-  return protocolScheme == "HTTPS_ONLY";
+  return protocolScheme;
 }
 
 function getTimeLineURL(rmhost, isHttpsSchemeEnabled) {
@@ -97,7 +97,9 @@ function updateConfigs(application) {
 
   Ember.Logger.log("RM Address: " + rmhost);
 
-  var isHttpsSchemeEnabled = getYarnHttpProtocolScheme(rmhost, application);
+  var protocolSchemeFromRM = getYarnHttpProtocolScheme(rmhost, application);
+  Ember.Logger.log("Is protocol scheme https? " + (protocolSchemeFromRM == 
"HTTPS_ONLY"));
+  var isHttpsSchemeEnabled = (protocolSchemeFromRM == "HTTPS_ONLY");
   if(!ENV.hosts.timelineWebAddress) {
 var timelinehost = "";
 $.ajax({
@@ -137,7 +139,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getTimeLineV1URL(rmhost, isHttpsSchemeEnabled),
   success: function(data) {
@@ -171,7 +173,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getSecurityURL(rmhost),
   success: function(data) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-8285. Remove unused environment variables from the Docker runtime. Contributed by Eric Badger

2018-05-23 Thread xyao
YARN-8285. Remove unused environment variables from the Docker runtime. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9837ca9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9837ca9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9837ca9c

Branch: refs/heads/HDDS-4
Commit: 9837ca9cc746573571029f9fb996a1be10b588ab
Parents: 34e8b9f
Author: Shane Kumpf 
Authored: Wed May 23 06:43:44 2018 -0600
Committer: Shane Kumpf 
Committed: Wed May 23 06:43:44 2018 -0600

--
 .../linux/runtime/DockerLinuxContainerRuntime.java  | 9 -
 1 file changed, 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9837ca9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 787e892..e131e9d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -106,9 +106,6 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * will be used to launch the Docker container.
  *   
  *   
- * {@code YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE} is currently ignored.
- *   
- *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE} controls
  * whether the Docker container's default command is overridden.  When set
  * to {@code true}, the Docker container's command will be
@@ -198,9 +195,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
   "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE =
-  "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE";
   @InterfaceAudience.Private
@@ -216,9 +210,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_RUN_ENABLE_USER_REMAPPING =
-  "YARN_CONTAINER_RUNTIME_DOCKER_RUN_ENABLE_USER_REMAPPING";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
   @InterfaceAudience.Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread xyao
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c13dea87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c13dea87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c13dea87

Branch: refs/heads/HDDS-4
Commit: c13dea87d9de7a9872fc8b0c939b41b1666a61e5
Parents: 51ce02b
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:36:03 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 2314e22..f936d75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 34a0348..69856ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -93,7 +93,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: Additional check when unpacking archives. Contributed by Jason Lowe and Akira Ajisaka.

2018-05-23 Thread xyao
Additional check when unpacking archives. Contributed by Jason Lowe and Akira 
Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/745f203e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/745f203e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/745f203e

Branch: refs/heads/HDDS-4
Commit: 745f203e577bacb35b042206db94615141fa5e6f
Parents: 1d2640b
Author: Akira Ajisaka 
Authored: Wed May 23 17:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 23 17:16:23 2018 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 18 -
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 40 +---
 2 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 8743be5..5ef78f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -617,11 +617,16 @@ public class FileUtil {
   throws IOException {
 try (ZipInputStream zip = new ZipInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for(ZipEntry entry = zip.getNextEntry();
   entry != null;
   entry = zip.getNextEntry()) {
 if (!entry.isDirectory()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   File parent = file.getParentFile();
   if (!parent.mkdirs() &&
   !parent.isDirectory()) {
@@ -656,12 +661,17 @@ public class FileUtil {
 
 try {
   entries = zipFile.entries();
+  String targetDirPath = unzipDir.getCanonicalPath() + File.separator;
   while (entries.hasMoreElements()) {
 ZipEntry entry = entries.nextElement();
 if (!entry.isDirectory()) {
   InputStream in = zipFile.getInputStream(entry);
   try {
 File file = new File(unzipDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + unzipDir);
+}
 if (!file.getParentFile().mkdirs()) {
   if (!file.getParentFile().isDirectory()) {
 throw new IOException("Mkdirs failed to create " +
@@ -944,6 +954,13 @@ public class FileUtil {
 
   private static void unpackEntries(TarArchiveInputStream tis,
   TarArchiveEntry entry, File outputDir) throws IOException {
+String targetDirPath = outputDir.getCanonicalPath() + File.separator;
+File outputFile = new File(outputDir, entry.getName());
+if (!outputFile.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create entry outside of " + outputDir);
+}
+
 if (entry.isDirectory()) {
   File subDir = new File(outputDir, entry.getName());
   if (!subDir.mkdirs() && !subDir.isDirectory()) {
@@ -966,7 +983,6 @@ public class FileUtil {
   return;
 }
 
-File outputFile = new File(outputDir, entry.getName());
 if (!outputFile.getParentFile().exists()) {
   if (!outputFile.getParentFile().mkdirs()) {
 throw new IOException("Mkdirs failed to create tar internal dir "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 39f2f6b..7218a1b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static 

[46/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

2018-05-23 Thread xyao
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8ab025a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8ab025a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8ab025a

Branch: refs/heads/HDDS-4
Commit: f8ab025a8ed91966d26579ec8ab979de9d0d9f90
Parents: d726156
Author: Xiaoyu Yao 
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:53:51 2018 -0700

--
 .../authentication/util/KerberosUtil.java   |   2 +-
 .../conf/TestConfigurationFieldsBase.java   |   2 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  10 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |   3 +
 .../StorageContainerLocationProtocol.java   |   4 +
 .../protocolPB/ScmBlockLocationProtocolPB.java  |   6 +
 .../StorageContainerLocationProtocolPB.java |   4 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   5 +
 .../common/src/main/resources/ozone-default.xml |  41 +++-
 .../StorageContainerDatanodeProtocol.java   |   4 +
 .../StorageContainerDatanodeProtocolPB.java |   6 +
 .../scm/server/StorageContainerManager.java |  49 -
 .../StorageContainerManagerHttpServer.java  |   5 +-
 .../ozone/client/protocol/ClientProtocol.java   |   3 +
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   7 +
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  13 +-
 hadoop-ozone/integration-test/pom.xml   |   6 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  17 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 205 +++
 .../ozone/TestStorageContainerManager.java  |   4 +-
 21 files changed, 368 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ab025a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
   }
 
   /* Return fqdn of the current host */
-  static String getLocalHostName() throws UnknownHostException {
+  public static String getLocalHostName() throws UnknownHostException {
 return InetAddress.getLocalHost().getCanonicalHostName();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ab025a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 7f27d7d..c20733d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
 // Create XML key/value map
 LOG_XML.debug("Reading XML property files\n");
 xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+// Remove hadoop property set in ozone-default.xml
+xmlKeyValueMap.remove("hadoop.custom.tags");
 LOG_XML.debug("\n=\n");
 
 // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ab025a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..17c99bb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -241,18 +241,7 @@ public final class HddsUtils {
   }
 
   public static boolean isHddsEnabled(Configuration conf) {
-String securityEnabled =
-conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- 

[24/50] [abbrv] hadoop git commit: HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda kumar.

2018-05-23 Thread xyao
HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda 
kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c7fd8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c7fd8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c7fd8e

Branch: refs/heads/HDDS-4
Commit: 68c7fd8e6092e8436ecf96852c608708f311f262
Parents: 43be9ab
Author: Xiaoyu Yao 
Authored: Tue May 22 15:46:59 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue May 22 15:46:59 2018 -0700

--
 .../common/impl/ContainerManagerImpl.java   | 14 +---
 .../common/impl/ContainerReportManagerImpl.java | 43 +++-
 .../common/interfaces/ContainerManager.java |  7 --
 .../interfaces/ContainerReportManager.java  |  8 +--
 .../statemachine/DatanodeStateMachine.java  |  1 -
 .../common/statemachine/StateContext.java   | 38 --
 .../states/endpoint/HeartbeatEndpointTask.java  |  3 +-
 .../container/ozoneimpl/OzoneContainer.java |  9 ---
 .../StorageContainerDatanodeProtocol.java   |  5 +-
 .../protocol/StorageContainerNodeProtocol.java  |  5 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  5 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |  3 +-
 .../StorageContainerDatanodeProtocol.proto  | 39 ---
 .../ozone/container/common/ScmTestMock.java | 13 +---
 .../common/TestDatanodeStateMachine.java|  7 --
 .../hdds/scm/node/HeartbeatQueueItem.java   | 23 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 30 +---
 .../scm/server/SCMDatanodeProtocolServer.java   |  6 +-
 .../hdds/scm/container/MockNodeManager.java |  5 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  9 +--
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 74 +---
 .../ozone/container/common/TestEndPoint.java| 11 +--
 .../testutils/ReplicationNodeManagerMock.java   |  5 +-
 .../ozone/TestStorageContainerManager.java  |  5 +-
 24 files changed, 63 insertions(+), 305 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 3a78c70..faee5d0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
@@ -1072,16 +1070,8 @@ public class ContainerManagerImpl implements 
ContainerManager {
   @Override
   public long getNumKeys(long containerId) {
 ContainerData cData = containerMap.get(containerId);
-return cData.getKeyCount();  }
-
-  /**
-   * Get the container report state to send via HB to SCM.
-   *
-   * @return container report state.
-   */
-  @Override
-  public ReportState getContainerReportState() {
-return containerReportManager.getContainerReportState();
+return cData.getKeyCount();
   }
 
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
index 6c83c66..f1d3f7f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import org.apache.commons.lang3.RandomUtils;
 import 

[01/50] [abbrv] hadoop git commit: Skip the proxy user check if the ugi has not been initialized. Contributed by Daryn Sharp [Forced Update!]

2018-05-23 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 3fa6787c5 -> f25b2357c (forced update)


Skip the proxy user check if the ugi has not been initialized. Contributed by 
Daryn Sharp


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73e9120a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73e9120a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73e9120a

Branch: refs/heads/HDDS-4
Commit: 73e9120ad79c73703de21e0084591861813f3279
Parents: f48fec8
Author: Rushabh Shah 
Authored: Mon May 21 12:33:00 2018 -0500
Committer: Rushabh Shah 
Committed: Mon May 21 12:33:00 2018 -0500

--
 .../src/main/java/org/apache/hadoop/conf/Configuration.java  | 2 +-
 .../org/apache/hadoop/security/UserGroupInformation.java | 8 ++--
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e9120a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index f1e2a9d..52f20b0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -281,7 +281,7 @@ public class Configuration implements 
Iterable>,
 }
 
 private static boolean getRestrictParserDefault(Object resource) {
-  if (resource instanceof String) {
+  if (resource instanceof String || !UserGroupInformation.isInitialized()) 
{
 return false;
   }
   UserGroupInformation user;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e9120a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index cb132b3..3872810 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -288,14 +288,18 @@ public class UserGroupInformation {
   public static final String HADOOP_TOKEN_FILE_LOCATION = 
 "HADOOP_TOKEN_FILE_LOCATION";
   
+  public static boolean isInitialized() {
+return conf != null;
+  }
+
   /** 
* A method to initialize the fields that depend on a configuration.
* Must be called before useKerberos or groups is used.
*/
   private static void ensureInitialized() {
-if (conf == null) {
+if (!isInitialized()) {
   synchronized(UserGroupInformation.class) {
-if (conf == null) { // someone might have beat us
+if (!isInitialized()) { // someone might have beat us
   initialize(new Configuration(), false);
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.

2018-05-23 Thread xyao
HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by 
Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cddbbe5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cddbbe5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cddbbe5f

Branch: refs/heads/HDDS-4
Commit: cddbbe5f690e4617413f6e986adc6fa900629f03
Parents: e30938a
Author: Inigo Goiri 
Authored: Wed May 23 12:12:08 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 12:12:08 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cddbbe5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0ce327a..4349c26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  // HttpServer threads are only used for the web UI and basic servlets, so
+  // set them to the minimum possible
+  private static final int HTTP_SELECTOR_THREADS = 1;
+  private static final int HTTP_ACCEPTOR_THREADS = 1;
+  private static final int HTTP_MAX_THREADS =
+  HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
+HTTP_MAX_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
+HTTP_SELECTOR_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
+HTTP_ACCEPTOR_THREADS);
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
HDDS-89. Create ozone specific inline documentation as part of the build.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481bfdb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481bfdb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481bfdb9

Branch: refs/heads/HDDS-4
Commit: 481bfdb94ff2dd3038fd20b1604358ac78e422d4
Parents: 6176d2b
Author: Anu Engineer 
Authored: Tue May 22 10:49:10 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 10:49:10 2018 -0700

--
 .gitignore  |   2 +
 dev-support/bin/ozone-dist-layout-stitching |   4 +
 hadoop-dist/pom.xml |   6 +
 hadoop-ozone/docs/README.md |  52 ++
 hadoop-ozone/docs/archetypes/default.md |  17 +
 hadoop-ozone/docs/config.toml   |  23 +
 hadoop-ozone/docs/content/CommandShell.md   | 153 +
 hadoop-ozone/docs/content/GettingStarted.md | 352 
 hadoop-ozone/docs/content/Metrics.md| 170 ++
 hadoop-ozone/docs/content/Rest.md   | 553 +++
 hadoop-ozone/docs/content/_index.md | 102 
 .../docs/dev-support/bin/generate-site.sh   |  29 +
 hadoop-ozone/docs/pom.xml   |  58 ++
 hadoop-ozone/docs/static/OzoneOverview.png  | Bin 0 -> 41729 bytes
 hadoop-ozone/docs/static/OzoneOverview.svg  | 225 
 hadoop-ozone/docs/static/SCMBlockDiagram.png| Bin 0 -> 14714 bytes
 .../ozonedoc/layouts/_default/single.html   |  32 ++
 .../docs/themes/ozonedoc/layouts/index.html |  21 +
 .../ozonedoc/layouts/partials/footer.html   |  19 +
 .../ozonedoc/layouts/partials/header.html   |  31 ++
 .../ozonedoc/layouts/partials/navbar.html   |  33 ++
 .../ozonedoc/layouts/partials/sidebar.html  |  43 ++
 .../ozonedoc/static/css/bootstrap-theme.min.css |   6 +
 .../static/css/bootstrap-theme.min.css.map  |   1 +
 .../ozonedoc/static/css/bootstrap.min.css   |   6 +
 .../ozonedoc/static/css/bootstrap.min.css.map   |   1 +
 .../themes/ozonedoc/static/css/ozonedoc.css | 128 +
 .../fonts/glyphicons-halflings-regular.eot  | Bin 0 -> 20127 bytes
 .../fonts/glyphicons-halflings-regular.svg  | 288 ++
 .../fonts/glyphicons-halflings-regular.ttf  | Bin 0 -> 45404 bytes
 .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes
 .../fonts/glyphicons-halflings-regular.woff2| Bin 0 -> 18028 bytes
 .../themes/ozonedoc/static/js/bootstrap.min.js  |   7 +
 .../themes/ozonedoc/static/js/jquery.min.js |   5 +
 .../docs/themes/ozonedoc/static/js/ozonedoc.js  |  23 +
 hadoop-ozone/docs/themes/ozonedoc/theme.toml|   2 +
 .../src/main/site/markdown/OzoneCommandShell.md | 150 -
 .../site/markdown/OzoneGettingStarted.md.vm | 347 
 .../src/main/site/markdown/OzoneMetrics.md  | 166 --
 .../src/main/site/markdown/OzoneOverview.md |  88 ---
 .../src/main/site/markdown/OzoneRest.md | 549 --
 hadoop-ozone/pom.xml|   1 +
 hadoop-project/pom.xml  |   5 +
 43 files changed, 2398 insertions(+), 1300 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 3883ce2..934c009 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,3 +53,5 @@ 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
 log.html
 output.xml
 report.html
+
+hadoop-ozone/docs/public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 1b0b224..ad8abe2 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -145,6 +145,10 @@ run copy 
"${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
 run copy 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}"
 .
 run copy 
"${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
 run copy 
"${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+# Optional documentation, could be missing
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ksm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
+
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn


[07/50] [abbrv] hadoop git commit: HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full situation. Contributed by Arpit Agarwal.

2018-05-23 Thread xyao
HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full 
situation. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc8e76b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc8e76b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc8e76b

Branch: refs/heads/HDDS-4
Commit: bcc8e76badc1341a6cf995c8e44fa5e422158de8
Parents: 5f11288
Author: Kihwal Lee 
Authored: Tue May 22 11:19:15 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 22 11:20:51 2018 -0500

--
 .../org/apache/hadoop/util/DiskChecker.java |  46 -
 .../org/apache/hadoop/util/TestDiskChecker.java | 102 ---
 .../hadoop/util/TestDiskCheckerWithDiskIo.java  | 173 +++
 3 files changed, 217 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc8e76b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index a4fa8fd..595aeed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -74,12 +74,30 @@ public class DiskChecker {
* @throws DiskErrorException
*/
   public static void checkDir(File dir) throws DiskErrorException {
+checkDirInternal(dir);
+  }
+
+  /**
+   * Create the directory if it doesn't exist and check that dir is
+   * readable, writable and executable. Perform some disk IO to
+   * ensure that the disk is usable for writes.
+   *
+   * @param dir
+   * @throws DiskErrorException
+   */
+  public static void checkDirWithDiskIo(File dir)
+  throws DiskErrorException {
+checkDirInternal(dir);
+doDiskIo(dir);
+  }
+
+  private static void checkDirInternal(File dir)
+  throws DiskErrorException {
 if (!mkdirsWithExistsCheck(dir)) {
   throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
 }
 checkAccessByFileMethods(dir);
-doDiskIo(dir);
   }
 
   /**
@@ -94,10 +112,34 @@ public class DiskChecker {
*/
   public static void checkDir(LocalFileSystem localFS, Path dir,
   FsPermission expected)
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+  }
+
+
+  /**
+   * Create the local directory if necessary, also ensure permissions
+   * allow it to be read from and written into. Perform some diskIO
+   * to ensure that the disk is usable for writes. 
+   *
+   * @param localFS local filesystem
+   * @param dir directory
+   * @param expected permission
+   * @throws DiskErrorException
+   * @throws IOException
+   */  
+  public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir,
+FsPermission expected) 
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+doDiskIo(localFS.pathToFile(dir));
+  }  
+
+  private static void checkDirInternal(LocalFileSystem localFS, Path dir,
+   FsPermission expected)
   throws DiskErrorException, IOException {
 mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
 checkAccessByFileMethods(localFS.pathToFile(dir));
-doDiskIo(localFS.pathToFile(dir));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc8e76b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index bd8e1dd..6b6c6c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.util;
 
 import java.io.*;
 import java.nio.file.Files;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.util.DiskChecker.FileIoProvider;
 import org.junit.After;
@@ -214,105 +213,4 @@ public class TestDiskChecker {
 }
 localDir.delete();
   }
-
-  /**
-   * Verify DiskChecker ignores at least 2 transient file creation errors.
-   */
-  @Test(timeout = 3)
- 

[27/50] [abbrv] hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread xyao
HDFS-13601. Optimize ByteString conversions in PBHelper.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d2640b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d2640b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d2640b6

Branch: refs/heads/HDDS-4
Commit: 1d2640b6132e8308c07476badd2d1482be68a298
Parents: 5a91406
Author: Andrew Wang 
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang 
Committed: Tue May 22 23:55:20 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  5 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 118 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 8e2bc94..fa9654b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -91,5 +91,10 @@
 
 
   
+  
+
+
+
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.datanodeUuidBytes = datanodeUuidBytes;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
   }
 
+  private static ByteString getByteString(String str) {
+  

[12/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
--
diff --git 
a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 000..94fb549
--- /dev/null
+++ 
b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,288 @@
+
+http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd; >
+http://www.w3.org/2000/svg;>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

[30/50] [abbrv] hadoop git commit: HDFS-13540. DFSStripedInputStream should only allocate new buffers when reading. Contributed by Xiao Chen.

2018-05-23 Thread xyao
HDFS-13540. DFSStripedInputStream should only allocate new buffers when 
reading. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34e8b9f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34e8b9f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34e8b9f9

Branch: refs/heads/HDDS-4
Commit: 34e8b9f9a86fb03156861482643fba11bdee1dd4
Parents: fed2bef
Author: Sammi Chen 
Authored: Wed May 23 19:10:09 2018 +0800
Committer: Sammi Chen 
Committed: Wed May 23 19:10:09 2018 +0800

--
 .../apache/hadoop/io/ElasticByteBufferPool.java | 12 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 12 +++---
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 45 
 3 files changed, 64 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 023f37f..9dd7771 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -116,4 +116,16 @@ public final class ElasticByteBufferPool implements 
ByteBufferPool {
   // poor granularity.
 }
   }
+
+  /**
+   * Get the size of the buffer pool, for the specified buffer type.
+   *
+   * @param direct Whether the size is returned for direct buffers
+   * @return The size
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public int size(boolean direct) {
+return getBufferTree(direct).size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index f3b16e0..5557a50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -116,12 +116,14 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return decoder.preferDirectBuffer();
   }
 
-  void resetCurStripeBuffer() {
-if (curStripeBuf == null) {
+  private void resetCurStripeBuffer(boolean shouldAllocateBuf) {
+if (shouldAllocateBuf && curStripeBuf == null) {
   curStripeBuf = BUFFER_POOL.getBuffer(useDirectBuffer(),
   cellSize * dataBlkNum);
 }
-curStripeBuf.clear();
+if (curStripeBuf != null) {
+  curStripeBuf.clear();
+}
 curStripeRange = new StripeRange(0, 0);
   }
 
@@ -206,7 +208,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   @Override
   protected void closeCurrentBlockReaders() {
-resetCurStripeBuffer();
+resetCurStripeBuffer(false);
 if (blockReaders ==  null || blockReaders.length == 0) {
   return;
 }
@@ -296,7 +298,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   private void readOneStripe(CorruptedBlocks corruptedBlocks)
   throws IOException {
-resetCurStripeBuffer();
+resetCurStripeBuffer(true);
 
 // compute stripe range based on pos
 final long offsetInBlockGroup = getOffsetInBlockGroup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index cdebee0..422746e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 

[20/50] [abbrv] hadoop git commit: YARN-8290. SystemMetricsPublisher.appACLsUpdated should be invoked after application information is published to ATS to avoid "User is not set in the application rep

2018-05-23 Thread xyao
YARN-8290. SystemMetricsPublisher.appACLsUpdated should be invoked after 
application information is published to ATS to avoid "User is not set in the 
application report" Exception. (Eric Yang via wangda)

Change-Id: I0ac6ddd19740d1aa7dd07111cd11af71ddc2fcaf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd15d239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd15d239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd15d239

Branch: refs/heads/HDDS-4
Commit: bd15d2396ef0c24fb6b60c6393d16b37651b828e
Parents: 523f602
Author: Wangda Tan 
Authored: Tue May 22 13:25:15 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 22 13:33:33 2018 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java | 5 -
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java | 5 +
 .../hadoop/yarn/server/resourcemanager/TestAppManager.java  | 4 
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java   | 1 +
 4 files changed, 6 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd15d239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 2983077..3e64cfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
@@ -466,10 +465,6 @@ public class RMAppManager implements 
EventHandler,
 // Inform the ACLs Manager
 this.applicationACLsManager.addApplication(applicationId,
 submissionContext.getAMContainerSpec().getApplicationACLs());
-String appViewACLs = submissionContext.getAMContainerSpec()
-.getApplicationACLs().get(ApplicationAccessType.VIEW_APP);
-rmContext.getSystemMetricsPublisher().appACLsUpdated(
-application, appViewACLs, System.currentTimeMillis());
 return application;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd15d239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index daf14c4..6aee813 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringInterner;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -2020,6 +2021,10 @@ public class RMAppImpl implements RMApp, Recoverable {
   private void sendATSCreateEvent() {
 rmContext.getRMApplicationHistoryWriter().applicationStarted(this);
 

[26/50] [abbrv] hadoop git commit: HDDS-49. Standalone protocol should use grpc in place of netty. Contributed by Mukul Kumar Singh.

2018-05-23 Thread xyao
HDDS-49. Standalone protocol should use grpc in place of netty.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a914069
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a914069
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a914069

Branch: refs/heads/HDDS-4
Commit: 5a9140690aba295ba1226a3190b52f34347a8372
Parents: 3e5f7ea
Author: Anu Engineer 
Authored: Tue May 22 16:51:43 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 19:56:15 2018 -0700

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 217 +++
 .../hadoop/hdds/scm/XceiverClientManager.java   |  21 +-
 .../hadoop/hdds/scm/XceiverClientMetrics.java   |   8 +-
 .../common/dev-support/findbugsExcludeFile.xml  |   3 +
 hadoop-hdds/common/pom.xml  |  17 ++
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   4 +
 .../main/proto/DatanodeContainerProtocol.proto  |   7 +
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../common/helpers/ContainerMetrics.java|  14 +-
 .../transport/server/GrpcXceiverService.java|  82 +++
 .../transport/server/XceiverServerGrpc.java | 105 +
 .../container/ozoneimpl/OzoneContainer.java |  11 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  10 +-
 .../ozone/scm/TestXceiverClientManager.java |  67 --
 hadoop-project/pom.xml  |   1 +
 15 files changed, 540 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a914069/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
new file mode 100644
index 000..84790e8
--- /dev/null
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.shaded.io.grpc.ManagedChannel;
+import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public class XceiverClientGrpc extends XceiverClientSpi {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
+  private final Pipeline pipeline;
+  private final Configuration config;
+  private XceiverClientProtocolServiceStub asyncStub;
+  private XceiverClientMetrics metrics;
+  private ManagedChannel channel;
+  private final Semaphore semaphore;
+
+  /**
+   * 

[50/50] [abbrv] hadoop git commit: HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.

2018-05-23 Thread xyao
HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f25b2357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f25b2357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f25b2357

Branch: refs/heads/HDDS-4
Commit: f25b2357c63e37bc12a62f363d1eb926525c01c2
Parents: be3b57a
Author: Xiaoyu Yao 
Authored: Tue May 22 13:32:28 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed May 23 14:54:22 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  4 --
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 14 ++---
 .../scm/protocol/ScmBlockLocationProtocol.java  |  2 +-
 .../StorageContainerLocationProtocol.java   |  3 +-
 .../protocolPB/ScmBlockLocationProtocolPB.java  |  4 +-
 .../StorageContainerLocationProtocolPB.java |  2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  8 +--
 .../common/src/main/resources/ozone-default.xml | 54 ++--
 .../StorageContainerDatanodeProtocol.java   |  2 +-
 .../StorageContainerDatanodeProtocolPB.java |  2 +-
 .../scm/server/StorageContainerManager.java | 13 +++--
 .../StorageContainerManagerHttpServer.java  |  4 +-
 .../compose/compose-secure/docker-compose.yaml  |  6 +--
 .../test/compose/compose-secure/docker-config   | 12 ++---
 .../acceptance/ozone-secure.robot   | 12 ++---
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 10 ++--
 .../ksm/protocol/KeySpaceManagerProtocol.java   |  4 +-
 .../protocolPB/KeySpaceManagerProtocolPB.java   |  3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 32 ++--
 .../hadoop/ozone/ksm/KeySpaceManager.java   | 13 ++---
 .../ozone/ksm/KeySpaceManagerHttpServer.java|  4 +-
 22 files changed, 89 insertions(+), 121 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f25b2357/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index a12d6ac..dec2c1c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,8 +20,4 @@ package org.apache.hadoop.hdds;
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
-  public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
-  + "kerberos.keytab.file";
-  public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
-  + ".kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f25b2357/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ba8f310..7929a08 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -132,9 +132,9 @@ public final class ScmConfigKeys {
   "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
   "ozone.scm.https-address";
-  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
-  "ozone.scm.kerberos.keytab.file";
-  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = 
"ozone.scm.kerberos.principal";
+  public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
+  "hdds.scm.kerberos.keytab.file";
+  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = 
"hdds.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -281,10 +281,10 @@ public final class ScmConfigKeys {
   "ozone.scm.container.close.threshold";
   public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
 
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
-  "ozone.scm.web.authentication.kerberos.principal";
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
-  "ozone.scm.web.authentication.kerberos.keytab";
+  public static final String 
HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+  "hdds.scm.web.authentication.kerberos.principal";

hadoop git commit: YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk e99e5bf10 -> d72615611


YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7261561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7261561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7261561

Branch: refs/heads/trunk
Commit: d72615611cfa6bd82756270d4b10136ec1e56741
Parents: e99e5bf
Author: Inigo Goiri 
Authored: Wed May 23 14:43:59 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:43:59 2018 -0700

--
 .../storage/TestHBaseTimelineStorageApps.java| 4 +++-
 .../storage/TestHBaseTimelineStorageDomain.java  | 8 
 .../storage/TestHBaseTimelineStorageEntities.java| 4 +++-
 .../storage/TestHBaseTimelineStorageSchema.java  | 8 
 .../storage/flow/TestHBaseStorageFlowActivity.java   | 4 +++-
 .../storage/flow/TestHBaseStorageFlowRun.java| 4 +++-
 .../storage/flow/TestHBaseStorageFlowRunCompaction.java  | 4 +++-
 7 files changed, 31 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index bc33427..0dee442 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -1936,6 +1936,8 @@ public class TestHBaseTimelineStorageApps {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-util.shutdownMiniCluster();
+if (util != null) {
+  util.shutdownMiniCluster();
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
index 2932e0c..1f59088 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelp
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainRowKey;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -123,4 +124,11 @@ public class TestHBaseTimelineStorageDomain {
 assertEquals("user1,user2 group1,group2", readers);
 assertEquals("writer1,writer2", writers);
   }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+if (util != null) {
+  util.shutdownMiniCluster();
+}
+  }
 }


hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7c5a5f31d -> 0e7ea7735


HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e7ea773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e7ea773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e7ea773

Branch: refs/heads/branch-3.0
Commit: 0e7ea77354406d403f59c92a6ae138bed7587f02
Parents: 7c5a5f3
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 14:25:08 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e7ea773/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit e99e5bf104e9664bc1b43a2639d87355d47a77e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61b5b2f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61b5b2f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61b5b2f4

Branch: refs/heads/branch-3.1
Commit: 61b5b2f4f7de971f5a18e3af206c86ba116dbfe5
Parents: f57e91a
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:16:03 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61b5b2f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  syncBarrier.await();
-} catch (BrokenBarrierException e) {
-  e.printStackTrace();
+  nm.init(conf);
+  nm.start();
+  // Start a container and make sure it is in RUNNING state
+  

[5/5] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit e99e5bf104e9664bc1b43a2639d87355d47a77e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50347eaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50347eaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50347eaf

Branch: refs/heads/branch-2.9
Commit: 50347eaf5696c8395c83a4d5733a258b4da107a3
Parents: 8347a6c
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:17:33 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50347eaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  syncBarrier.await();
-} catch (BrokenBarrierException e) {
-  e.printStackTrace();
+  nm.init(conf);
+  nm.start();
+  // Start a container and make sure it is in RUNNING state
+  

[1/5] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c5ff9553f -> 8f43ade46
  refs/heads/branch-2.9 8347a6cb7 -> 50347eaf5
  refs/heads/branch-3.0 506f61e18 -> 7c5a5f31d
  refs/heads/branch-3.1 f57e91a34 -> 61b5b2f4f
  refs/heads/trunk cddbbe5f6 -> e99e5bf10


YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e99e5bf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e99e5bf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e99e5bf1

Branch: refs/heads/trunk
Commit: e99e5bf104e9664bc1b43a2639d87355d47a77e2
Parents: cddbbe5
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:15:26 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e99e5bf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  

[4/5] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit e99e5bf104e9664bc1b43a2639d87355d47a77e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f43ade4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f43ade4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f43ade4

Branch: refs/heads/branch-2
Commit: 8f43ade46a05d850cd9c4ccf30fdcb6b45f62165
Parents: c5ff955
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:17:20 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f43ade4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  syncBarrier.await();
-} catch (BrokenBarrierException e) {
-  e.printStackTrace();
+  nm.init(conf);
+  nm.start();
+  // Start a container and make sure it is in RUNNING state
+  

[3/5] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix 
testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit e99e5bf104e9664bc1b43a2639d87355d47a77e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c5a5f31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c5a5f31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c5a5f31

Branch: refs/heads/branch-3.0
Commit: 7c5a5f31dc08c2d8f258832ff87fb7a13afe8f40
Parents: 506f61e
Author: Inigo Goiri 
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 14:16:37 2018 -0700

--
 .../nodemanager/TestNodeManagerResync.java  | 87 +++-
 1 file changed, 48 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c5a5f31/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
 testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
   boolean isWorkPreservingRestartEnabled)
   throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
 }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=1)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
   InterruptedException, YarnException {
 NodeManager nm = new TestNodeManager3();
 YarnConfiguration conf = createNMConfig();
-nm.init(conf);
-nm.start();
-Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-nm.getNMDispatcher().getEventHandler()
-.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-synchronized (isNMShutdownCalled) {
-  while (isNMShutdownCalled.get() == false) {
-try {
-  isNMShutdownCalled.wait();
-} catch (InterruptedException e) {
+try {
+  nm.init(conf);
+  nm.start();
+  Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+  nm.getNMDispatcher().getEventHandler()
+  .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+  synchronized (isNMShutdownCalled) {
+while (!isNMShutdownCalled.get()) {
+  try {
+isNMShutdownCalled.wait();
+  } catch (InterruptedException e) {
+  }
 }
   }
-}
 
-Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-nm.stop();
+  Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+} finally {
+  nm.stop();
+}
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=6)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
   throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
 YarnConfiguration conf = createNMConfig();
 conf.setBoolean(
 YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-nm.init(conf);
-nm.start();
-// Start a container and make sure it is in RUNNING state
-((TestNodeManager4)nm).startContainer();
-// Simulate a container resource increase in a separate thread
-((TestNodeManager4)nm).updateContainerResource();
-// Simulate RM restart by sending a RESYNC event
-LOG.info("Sending out RESYNC event");
-nm.getNMDispatcher().getEventHandler().handle(
-new NodeManagerEvent(NodeManagerEventType.RESYNC));
 try {
-  syncBarrier.await();
-} catch (BrokenBarrierException e) {
-  e.printStackTrace();
+  nm.init(conf);
+  nm.start();
+  // Start a container and make sure it is in RUNNING state
+  

hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 f9c69ca3e -> db183f2ea


YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by 
Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db183f2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db183f2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db183f2e

Branch: refs/heads/YARN-7402
Commit: db183f2ea0a34aeb329fbc0d1553a87f7cf103b7
Parents: f9c69ca
Author: Botong Huang 
Authored: Wed May 23 12:45:32 2018 -0700
Committer: Botong Huang 
Committed: Wed May 23 12:45:32 2018 -0700

--
 .../server/globalpolicygenerator/GPGUtils.java  | 31 +---
 1 file changed, 20 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db183f2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
index 429bec4..31cee1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import static javax.servlet.http.HttpServletResponse.SC_OK;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
  * GPGUtils contains utility functions for the GPG.
@@ -53,15 +54,23 @@ public final class GPGUtils {
 T obj = null;
 
 WebResource webResource = client.resource(webAddr);
-ClientResponse response = webResource.path("ws/v1/cluster").path(path)
-.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-if (response.getStatus() == HttpServletResponse.SC_OK) {
-  obj = response.getEntity(returnType);
-} else {
-  throw new YarnRuntimeException("Bad response from remote web service: "
-  + response.getStatus());
+ClientResponse response = null;
+try {
+  response = webResource.path("ws/v1/cluster").path(path)
+  .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+  if (response.getStatus() == SC_OK) {
+obj = response.getEntity(returnType);
+  } else {
+throw new YarnRuntimeException(
+"Bad response from remote web service: " + response.getStatus());
+  }
+  return obj;
+} finally {
+  if (response != null) {
+response.close();
+  }
+  client.destroy();
 }
-return obj;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.

2018-05-23 Thread inigoiri
HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by 
Erik Krogen.

(cherry picked from commit cddbbe5f690e4617413f6e986adc6fa900629f03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/506f61e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/506f61e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/506f61e1

Branch: refs/heads/branch-3.0
Commit: 506f61e186e90a6798de721e01fe43122d31c5c1
Parents: c5f29d6
Author: Inigo Goiri 
Authored: Wed May 23 12:12:08 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 12:13:06 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/506f61e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0ce327a..4349c26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  // HttpServer threads are only used for the web UI and basic servlets, so
+  // set them to the minimum possible
+  private static final int HTTP_SELECTOR_THREADS = 1;
+  private static final int HTTP_ACCEPTOR_THREADS = 1;
+  private static final int HTTP_MAX_THREADS =
+  HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
+HTTP_MAX_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
+HTTP_SELECTOR_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
+HTTP_ACCEPTOR_THREADS);
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c5f29d62e -> 506f61e18
  refs/heads/branch-3.1 a13849637 -> f57e91a34
  refs/heads/trunk e30938af1 -> cddbbe5f6


HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by 
Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cddbbe5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cddbbe5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cddbbe5f

Branch: refs/heads/trunk
Commit: cddbbe5f690e4617413f6e986adc6fa900629f03
Parents: e30938a
Author: Inigo Goiri 
Authored: Wed May 23 12:12:08 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 12:12:08 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cddbbe5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0ce327a..4349c26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  // HttpServer threads are only used for the web UI and basic servlets, so
+  // set them to the minimum possible
+  private static final int HTTP_SELECTOR_THREADS = 1;
+  private static final int HTTP_ACCEPTOR_THREADS = 1;
+  private static final int HTTP_MAX_THREADS =
+  HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
+HTTP_MAX_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
+HTTP_SELECTOR_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
+HTTP_ACCEPTOR_THREADS);
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.

2018-05-23 Thread inigoiri
HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by 
Erik Krogen.

(cherry picked from commit cddbbe5f690e4617413f6e986adc6fa900629f03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f57e91a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f57e91a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f57e91a3

Branch: refs/heads/branch-3.1
Commit: f57e91a3483188089fc9aac1e493c08ec9045b8c
Parents: a138496
Author: Inigo Goiri 
Authored: Wed May 23 12:12:08 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 12:12:40 2018 -0700

--
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f57e91a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0ce327a..4349c26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  // HttpServer threads are only used for the web UI and basic servlets, so
+  // set them to the minimum possible
+  private static final int HTTP_SELECTOR_THREADS = 1;
+  private static final int HTTP_ACCEPTOR_THREADS = 1;
+  private static final int HTTP_MAX_THREADS =
+  HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1;
+
   public DatanodeHttpServer(final Configuration conf,
   final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
@@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable {
 this.conf = conf;
 
 Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
+HTTP_MAX_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
+HTTP_SELECTOR_THREADS);
+confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
+HTTP_ACCEPTOR_THREADS);
 int proxyPort =
 confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk c13dea87d -> e30938af1


YARN-8336. Fix potential connection leak in SchedConfCLI and 
YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e30938af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e30938af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e30938af

Branch: refs/heads/trunk
Commit: e30938af1270e079587e7bc06b755f9e93e660a5
Parents: c13dea8
Author: Inigo Goiri 
Authored: Wed May 23 11:55:31 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:55:31 2018 -0700

--
 .../hadoop/yarn/client/cli/SchedConfCLI.java| 42 
 .../yarn/webapp/util/YarnWebServiceUtils.java   | 17 +---
 2 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index 11bfdd7..a5f3b80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -132,25 +132,35 @@ public class SchedConfCLI extends Configured implements 
Tool {
 }
 
 Client webServiceClient = Client.create();
-WebResource webResource = webServiceClient.resource(WebAppUtils.
-getRMWebAppURLWithScheme(getConf()));
-ClientResponse response = webResource.path("ws").path("v1").path("cluster")
-.path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
-.entity(YarnWebServiceUtils.toJson(updateInfo,
-SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
-.put(ClientResponse.class);
-if (response != null) {
-  if (response.getStatus() == Status.OK.getStatusCode()) {
-System.out.println("Configuration changed successfully.");
-return 0;
+WebResource webResource = webServiceClient
+.resource(WebAppUtils.getRMWebAppURLWithScheme(getConf()));
+ClientResponse response = null;
+
+try {
+  response =
+  webResource.path("ws").path("v1").path("cluster")
+  .path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
+  .entity(YarnWebServiceUtils.toJson(updateInfo,
+  SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
+  .put(ClientResponse.class);
+  if (response != null) {
+if (response.getStatus() == Status.OK.getStatusCode()) {
+  System.out.println("Configuration changed successfully.");
+  return 0;
+} else {
+  System.err.println("Configuration change unsuccessful: "
+  + response.getEntity(String.class));
+}
   } else {
-System.err.println("Configuration change unsuccessful: "
-+ response.getEntity(String.class));
+System.err.println("Configuration change unsuccessful: null response");
   }
-} else {
-  System.err.println("Configuration change unsuccessful: null response");
+  return -1;
+} finally {
+  if (response != null) {
+response.close();
+  }
+  webServiceClient.destroy();
 }
-return -1;
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
index 1cf1e97..e7bca2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
@@ -58,11 +58,18 @@ public final class YarnWebServiceUtils {
 
 WebResource webResource = webServiceClient.resource(webAppAddress);
 
-ClientResponse response = webResource.path("ws").path("v1")
-.path("cluster").path("nodes")

[2/5] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread inigoiri
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

(cherry picked from commit c13dea87d9de7a9872fc8b0c939b41b1666a61e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1384963
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1384963
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1384963

Branch: refs/heads/branch-3.1
Commit: a138496379e5d8f2a908899780075fac2c7de08f
Parents: 76f38f3
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:36:34 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1384963/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 2314e22..f936d75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1384963/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 34a0348..69856ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -93,7 +93,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 50c39db1a -> c5ff9553f
  refs/heads/branch-2.9 3e16a7475 -> 8347a6cb7
  refs/heads/branch-3.0 8d8ef081a -> c5f29d62e
  refs/heads/branch-3.1 76f38f3f8 -> a13849637
  refs/heads/trunk 51ce02bb5 -> c13dea87d


HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c13dea87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c13dea87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c13dea87

Branch: refs/heads/trunk
Commit: c13dea87d9de7a9872fc8b0c939b41b1666a61e5
Parents: 51ce02b
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:36:03 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 2314e22..f936d75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 34a0348..69856ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -93,7 +93,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread inigoiri
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

(cherry picked from commit c13dea87d9de7a9872fc8b0c939b41b1666a61e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8347a6cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8347a6cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8347a6cb

Branch: refs/heads/branch-2.9
Commit: 8347a6cb77554e920bbe3d403d6f16dc5e37686d
Parents: 3e16a74
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:38:09 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8347a6cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 7b974c3..f896bba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8347a6cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 7d770e0..91b1fdb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -94,7 +94,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread inigoiri
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

(cherry picked from commit c13dea87d9de7a9872fc8b0c939b41b1666a61e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5f29d62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5f29d62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5f29d62

Branch: refs/heads/branch-3.0
Commit: c5f29d62eba0cf1bf9b96c09e300d1958156e6da
Parents: 8d8ef08
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:37:27 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5f29d62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 2314e22..f936d75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5f29d62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 34a0348..69856ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -93,7 +93,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

2018-05-23 Thread inigoiri
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

(cherry picked from commit c13dea87d9de7a9872fc8b0c939b41b1666a61e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5ff9553
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5ff9553
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5ff9553

Branch: refs/heads/branch-2
Commit: c5ff9553fb941e0a46b0d240caf54d662c089947
Parents: 50c39db
Author: Inigo Goiri 
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 11:37:55 2018 -0700

--
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 +
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5ff9553/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 7b974c3..f896bba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
 private int numJournalNodes = 3;
 private boolean format = true;
 private final Configuration conf;
+
+static {
+  DefaultMetricsSystem.setMiniClusterMode(true);
+}
 
 public Builder(Configuration conf) {
   this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5ff9553/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 7d770e0..91b1fdb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -94,7 +94,8 @@ public class TestQuorumJournalManager {
 
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 
0);
 
 cluster = new MiniJournalCluster.Builder(conf)
-  .build();
+.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+.build();
 cluster.waitActive();
 
 qjm = createSpyingQJM();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-05-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15407 [created] 51ce02bb5

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50c39db1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50c39db1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50c39db1

Branch: refs/heads/branch-2
Commit: 50c39db1ad60b0cd131c554f989c9ae4c7474251
Parents: 411a2f6
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:30:48 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50c39db1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/806f86a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/806f86a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/806f86a8

Branch: refs/heads/branch-2.8
Commit: 806f86a80460ada3f94f5b3dae0b741501a02f31
Parents: 12258c7
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:31:20 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/806f86a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 636620a..6c62a25 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -387,7 +387,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   public NetworkTopology() {
 clusterMap = new InnerNode(InnerNode.ROOT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 411a2f609 -> 50c39db1a
  refs/heads/branch-2.7 7be1bc52b -> be22e242a
  refs/heads/branch-2.8 12258c7cf -> 806f86a80
  refs/heads/branch-2.9 3265ff69f -> 3e16a7475
  refs/heads/branch-3.1 d0d3d5b2a -> 76f38f3f8
  refs/heads/trunk aa23d49fc -> 51ce02bb5


HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51ce02bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51ce02bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51ce02bb

Branch: refs/heads/trunk
Commit: 51ce02bb54d6047a8191624a86d427b0c9445cb1
Parents: aa23d49
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:30:12 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51ce02bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e16a747
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e16a747
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e16a747

Branch: refs/heads/branch-2.9
Commit: 3e16a7475476a6f07764418bda2d078d53590227
Parents: 3265ff6
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:31:03 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e16a747/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76f38f3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76f38f3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76f38f3f

Branch: refs/heads/branch-3.1
Commit: 76f38f3f8b2553f2d0b4b907d71b30c111ccea08
Parents: d0d3d5b
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:30:37 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f38f3f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[6/6] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

2018-05-23 Thread arp
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be22e242
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be22e242
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be22e242

Branch: refs/heads/branch-2.7
Commit: be22e242ac66322958f47dec14db8424f908e7d3
Parents: 7be1bc5
Author: Arpit Agarwal 
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed May 23 10:31:39 2018 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be22e242/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index d242119..5eeb0d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -389,7 +389,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   public NetworkTopology() {
 clusterMap = new InnerNode(InnerNode.ROOT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed by Hanisha Koneru

2018-05-23 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 0c268a075 -> d0d3d5b2a


HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed 
by Hanisha Koneru

(cherry picked from commit bc6d9d4c796d3c9d27dbbe3266031bf2adecde4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0d3d5b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0d3d5b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0d3d5b2

Branch: refs/heads/branch-3.1
Commit: d0d3d5b2ad03c177ec8927e13faf219d6636492c
Parents: 0c268a0
Author: Bharat Viswanadham 
Authored: Wed May 23 10:15:40 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed May 23 10:23:45 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 10 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  9 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  7 ++
 .../ClientNamenodeProtocolTranslatorPB.java | 17 
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 ++
 .../federation/router/RouterRpcServer.java  |  7 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 17 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 97 
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |  2 +
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 67 ++
 13 files changed, 260 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d3d5b2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 09154d0..5f1b2bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2341,6 +2341,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * @see ClientProtocol#upgradeStatus()
+   */
+  public boolean upgradeStatus() throws IOException {
+checkOpen();
+try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
+  return namenode.upgradeStatus();
+}
+  }
+
   RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
   throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d3d5b2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1e9ed09..82cdd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1534,6 +1534,16 @@ public class DistributedFileSystem extends FileSystem
   }
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  public boolean upgradeStatus() throws IOException {
+return dfs.upgradeStatus();
+  }
+
+  /**
* Rolling upgrade: prepare/finalize/query.
*/
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d3d5b2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f5d5e82..7729e10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -941,6 +941,15 @@ public interface ClientProtocol {
   void finalizeUpgrade() throws IOException;
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is 

hadoop git commit: HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter)

2018-05-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk bc6d9d4c7 -> aa23d49fc


HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa23d49f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa23d49f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa23d49f

Branch: refs/heads/trunk
Commit: aa23d49fc8b9c2537529dbdc13512000e2ab295a
Parents: bc6d9d4
Author: Robert Kanter 
Authored: Wed May 23 10:23:17 2018 -0700
Committer: Robert Kanter 
Committed: Wed May 23 10:24:09 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 79 +++-
 .../org/apache/hadoop/http/TestHttpServer.java  | 61 +++
 2 files changed, 121 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 47ca841..c273c78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -34,6 +34,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -172,10 +174,16 @@ public final class HttpServer2 implements FilterContainer 
{
   private final SignerSecretProvider secretProvider;
   private XFrameOption xFrameOption;
   private boolean xFrameOptionIsEnabled;
-  private static final String X_FRAME_VALUE = "xFrameOption";
-  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
-
-
+  public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
+  private static final String HTTP_HEADER_REGEX =
+  "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
+  static final String X_XSS_PROTECTION  =
+  "X-XSS-Protection:1; mode=block";
+  static final String X_CONTENT_TYPE_OPTIONS =
+  "X-Content-Type-Options:nosniff";
+  private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
+  private static final Pattern PATTERN_HTTP_HEADER_REGEX =
+  Pattern.compile(HTTP_HEADER_REGEX);
   /**
* Class to construct instances of HTTP server with specific options.
*/
@@ -574,10 +582,7 @@ public final class HttpServer2 implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 webServer.setHandler(handlers);
 
-Map xFrameParams = new HashMap<>();
-xFrameParams.put(X_FRAME_ENABLED,
-String.valueOf(this.xFrameOptionIsEnabled));
-xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+Map xFrameParams = setHeaders(conf);
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), 
xFrameParams);
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
@@ -1475,9 +1480,11 @@ public final class HttpServer2 implements 
FilterContainer {
   public static class QuotingInputFilter implements Filter {
 
 private FilterConfig config;
+private Map headerMap;
 
 public static class RequestQuoter extends HttpServletRequestWrapper {
   private final HttpServletRequest rawRequest;
+
   public RequestQuoter(HttpServletRequest rawRequest) {
 super(rawRequest);
 this.rawRequest = rawRequest;
@@ -1566,6 +1573,7 @@ public final class HttpServer2 implements FilterContainer 
{
 @Override
 public void init(FilterConfig config) throws ServletException {
   this.config = config;
+  initHttpHeaderMap();
 }
 
 @Override
@@ -1593,11 +1601,7 @@ public final class HttpServer2 implements 
FilterContainer {
   } else if (mime.startsWith("application/xml")) {
 httpResponse.setContentType("text/xml; charset=utf-8");
   }
-
-  if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) {
-httpResponse.addHeader("X-FRAME-OPTIONS",
-this.config.getInitParameter(X_FRAME_VALUE));
-  }
+  headerMap.forEach((k, v) -> httpResponse.addHeader(k, v));
   chain.doFilter(quoted, httpResponse);
 }
 
@@ -1613,14 +1617,25 @@ public final class HttpServer2 implements 
FilterContainer {
   return (mime == null) ? null : mime;
 }
 
+private void initHttpHeaderMap() {
+ 

hadoop git commit: HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed by Hanisha Koneru

2018-05-23 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 699a6918a -> bc6d9d4c7


HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed 
by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc6d9d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc6d9d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc6d9d4c

Branch: refs/heads/trunk
Commit: bc6d9d4c796d3c9d27dbbe3266031bf2adecde4f
Parents: 699a691
Author: Bharat Viswanadham 
Authored: Wed May 23 10:15:40 2018 -0700
Committer: Bharat Viswanadham 
Committed: Wed May 23 10:15:40 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 10 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  9 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  7 ++
 .../ClientNamenodeProtocolTranslatorPB.java | 17 
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 ++
 .../federation/router/RouterRpcServer.java  |  7 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 17 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 97 
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |  2 +
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 67 ++
 13 files changed, 260 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 09154d0..5f1b2bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2341,6 +2341,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * @see ClientProtocol#upgradeStatus()
+   */
+  public boolean upgradeStatus() throws IOException {
+checkOpen();
+try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
+  return namenode.upgradeStatus();
+}
+  }
+
   RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
   throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1e9ed09..82cdd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1534,6 +1534,16 @@ public class DistributedFileSystem extends FileSystem
   }
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  public boolean upgradeStatus() throws IOException {
+return dfs.upgradeStatus();
+  }
+
+  /**
* Rolling upgrade: prepare/finalize/query.
*/
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f5d5e82..7729e10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -941,6 +941,15 @@ public interface ClientProtocol {
   void finalizeUpgrade() throws IOException;
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * 

[08/18] hadoop git commit: HDDS-49. Standalone protocol should use grpc in place of netty. Contributed by Mukul Kumar Singh.

2018-05-23 Thread hanishakoneru
HDDS-49. Standalone protocol should use grpc in place of netty.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a914069
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a914069
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a914069

Branch: refs/heads/HDDS-48
Commit: 5a9140690aba295ba1226a3190b52f34347a8372
Parents: 3e5f7ea
Author: Anu Engineer 
Authored: Tue May 22 16:51:43 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 19:56:15 2018 -0700

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 217 +++
 .../hadoop/hdds/scm/XceiverClientManager.java   |  21 +-
 .../hadoop/hdds/scm/XceiverClientMetrics.java   |   8 +-
 .../common/dev-support/findbugsExcludeFile.xml  |   3 +
 hadoop-hdds/common/pom.xml  |  17 ++
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   4 +
 .../main/proto/DatanodeContainerProtocol.proto  |   7 +
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../common/helpers/ContainerMetrics.java|  14 +-
 .../transport/server/GrpcXceiverService.java|  82 +++
 .../transport/server/XceiverServerGrpc.java | 105 +
 .../container/ozoneimpl/OzoneContainer.java |  11 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  10 +-
 .../ozone/scm/TestXceiverClientManager.java |  67 --
 hadoop-project/pom.xml  |   1 +
 15 files changed, 540 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a914069/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
new file mode 100644
index 000..84790e8
--- /dev/null
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.shaded.io.grpc.ManagedChannel;
+import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public class XceiverClientGrpc extends XceiverClientSpi {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
+  private final Pipeline pipeline;
+  private final Configuration config;
+  private XceiverClientProtocolServiceStub asyncStub;
+  private XceiverClientMetrics metrics;
+  private ManagedChannel channel;
+  private final Semaphore semaphore;
+
+  /**
+   * 

[12/18] hadoop git commit: HDFS-13540. DFSStripedInputStream should only allocate new buffers when reading. Contributed by Xiao Chen.

2018-05-23 Thread hanishakoneru
HDFS-13540. DFSStripedInputStream should only allocate new buffers when 
reading. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34e8b9f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34e8b9f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34e8b9f9

Branch: refs/heads/HDDS-48
Commit: 34e8b9f9a86fb03156861482643fba11bdee1dd4
Parents: fed2bef
Author: Sammi Chen 
Authored: Wed May 23 19:10:09 2018 +0800
Committer: Sammi Chen 
Committed: Wed May 23 19:10:09 2018 +0800

--
 .../apache/hadoop/io/ElasticByteBufferPool.java | 12 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 12 +++---
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 45 
 3 files changed, 64 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 023f37f..9dd7771 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -116,4 +116,16 @@ public final class ElasticByteBufferPool implements 
ByteBufferPool {
   // poor granularity.
 }
   }
+
+  /**
+   * Get the size of the buffer pool, for the specified buffer type.
+   *
+   * @param direct Whether the size is returned for direct buffers
+   * @return The size
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public int size(boolean direct) {
+return getBufferTree(direct).size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index f3b16e0..5557a50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -116,12 +116,14 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return decoder.preferDirectBuffer();
   }
 
-  void resetCurStripeBuffer() {
-if (curStripeBuf == null) {
+  private void resetCurStripeBuffer(boolean shouldAllocateBuf) {
+if (shouldAllocateBuf && curStripeBuf == null) {
   curStripeBuf = BUFFER_POOL.getBuffer(useDirectBuffer(),
   cellSize * dataBlkNum);
 }
-curStripeBuf.clear();
+if (curStripeBuf != null) {
+  curStripeBuf.clear();
+}
 curStripeRange = new StripeRange(0, 0);
   }
 
@@ -206,7 +208,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   @Override
   protected void closeCurrentBlockReaders() {
-resetCurStripeBuffer();
+resetCurStripeBuffer(false);
 if (blockReaders ==  null || blockReaders.length == 0) {
   return;
 }
@@ -296,7 +298,7 @@ public class DFSStripedInputStream extends DFSInputStream {
*/
   private void readOneStripe(CorruptedBlocks corruptedBlocks)
   throws IOException {
-resetCurStripeBuffer();
+resetCurStripeBuffer(true);
 
 // compute stripe range based on pos
 final long offsetInBlockGroup = getOffsetInBlockGroup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e8b9f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index cdebee0..422746e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 

[05/18] hadoop git commit: HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-89. Addendum Patch-1. Create ozone specific inline documentation as part 
of the build.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43be9ab4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43be9ab4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43be9ab4

Branch: refs/heads/HDDS-48
Commit: 43be9ab44f27ae847e100efdc6810b192202fc55
Parents: b22f56c
Author: Anu Engineer 
Authored: Tue May 22 14:29:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 14:29:06 2018 -0700

--
 hadoop-ozone/docs/dev-support/bin/generate-site.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43be9ab4/hadoop-ozone/docs/dev-support/bin/generate-site.sh
--
diff --git a/hadoop-ozone/docs/dev-support/bin/generate-site.sh 
b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
index 3323935..374e74b 100755
--- a/hadoop-ozone/docs/dev-support/bin/generate-site.sh
+++ b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
@@ -19,7 +19,7 @@ DOCDIR="$DIR/../.."
 
 if [ ! "$(which hugo)" ]; then
echo "Hugo is not yet installed. Doc generation is skipped."
-   exit -1
+   exit 0
 fi
 
 DESTDIR="$DOCDIR/target/classes/webapps/docs"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/18] hadoop git commit: HDDS-89. Addendum Patch. Create ozone specific inline documentation as part of the build Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 60821fb20 -> 699a6918a


HDDS-89. Addendum Patch. Create ozone specific inline documentation as part of 
the build
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/523f602f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/523f602f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/523f602f

Branch: refs/heads/HDDS-48
Commit: 523f602f81eafd56e4adfadd70d7c9a672b5813a
Parents: 60821fb
Author: Anu Engineer 
Authored: Tue May 22 13:20:42 2018 -0700
Committer: Anu Engineer 
Committed: Tue May 22 13:20:42 2018 -0700

--
 hadoop-dist/pom.xml | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/523f602f/hadoop-dist/pom.xml
--
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 41e040f..dfbf818 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -68,13 +68,6 @@
   hadoop-client-integration-tests
   provided
 
-
-  org.apache.hadoop
-  hadoop-ozone-docs
-  provided
-
-
-
 
   
 
@@ -267,6 +260,11 @@
   hadoop-ozone-tools
   provided
 
+
+  org.apache.hadoop
+  hadoop-ozone-docs
+  provided
+
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/18] hadoop git commit: YARN-8332. Incorrect min/max allocation property name in resource types doc. (Weiwei Yang via wangda)

2018-05-23 Thread hanishakoneru
YARN-8332. Incorrect min/max allocation property name in resource types doc. 
(Weiwei Yang via wangda)

Change-Id: If74f1ceed9c045a2cb2d6593741278b65ac44a9f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83f53e5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83f53e5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83f53e5c

Branch: refs/heads/HDDS-48
Commit: 83f53e5c6236de30c213dc41878cebfb02597e26
Parents: bd15d23
Author: Wangda Tan 
Authored: Tue May 22 13:29:21 2018 -0700
Committer: Wangda Tan 
Committed: Tue May 22 13:33:33 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f53e5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
index f968b5f..ac16d53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -49,8 +49,8 @@ The following configuration properties are supported. See 
below for details.
 |: |: |
 | `yarn.resource-types` | Comma-separated list of additional resources. May 
not include `memory`, `memory-mb`, or `vcores` |
 | `yarn.resource-types..units` | Default unit for the specified 
resource type |
-| `yarn.resource-types..minimum` | The minimum request for the 
specified resource type |
-| `yarn.resource-types..maximum` | The maximum request for the 
specified resource type |
+| `yarn.resource-types..minimum-allocation` | The minimum request 
for the specified resource type |
+| `yarn.resource-types..maximum-allocation` | The maximum request 
for the specified resource type |
 
 `node-resources.xml`
 
@@ -127,8 +127,8 @@ set the default unit for the resource type. Valid values 
are:
 
 The property must be named `yarn.resource-types..units`. Each defined
 resource may also have optional minimum and maximum properties. The properties
-must be named `yarn.resource-types..minimum` and
-`yarn.resource-types..maximum`.
+must be named `yarn.resource-types..minimum-allocation` and
+`yarn.resource-types..maximum-allocation`.
 
 The `yarn.resource-types` property and any unit, mimimum, or maximum properties
 may be defined in either the usual `yarn-site.xml` file or in a file named
@@ -147,12 +147,12 @@ may be defined in either the usual `yarn-site.xml` file 
or in a file named
   
 
   
-yarn.resource-types.resource2.minimum
+yarn.resource-types.resource2.minimum-allocation
 1
   
 
   
-yarn.resource-types.resource2.maximum
+yarn.resource-types.resource2.maximum-allocation
 1024
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/18] hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread hanishakoneru
HDFS-13601. Optimize ByteString conversions in PBHelper.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d2640b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d2640b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d2640b6

Branch: refs/heads/HDDS-48
Commit: 1d2640b6132e8308c07476badd2d1482be68a298
Parents: 5a91406
Author: Andrew Wang 
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang 
Committed: Tue May 22 23:55:20 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  5 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 118 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 8e2bc94..fa9654b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -91,5 +91,10 @@
 
 
   
+  
+
+
+
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.datanodeUuidBytes = datanodeUuidBytes;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
   }
 
+  private static ByteString getByteString(String str) {
+ 

[18/18] hadoop git commit: HDDS-110. Checkstyle is not working in the HDDS precommit hook. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-110. Checkstyle is not working in the HDDS precommit hook.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/699a6918
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/699a6918
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/699a6918

Branch: refs/heads/HDDS-48
Commit: 699a6918aca2b57ae9ad0bff2c3aaf5a776da614
Parents: c0c9b7a
Author: Anu Engineer 
Authored: Wed May 23 09:42:21 2018 -0700
Committer: Anu Engineer 
Committed: Wed May 23 10:01:53 2018 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/699a6918/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 0e7b23a..13f9255 100644
--- a/pom.xml
+++ b/pom.xml
@@ -322,7 +322,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   org.apache.hadoop
   hadoop-build-tools
-  ${project.version}
+  ${hadoop.version}
 
 
   com.puppycrawl.tools


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/18] hadoop git commit: YARN-8285. Remove unused environment variables from the Docker runtime. Contributed by Eric Badger

2018-05-23 Thread hanishakoneru
YARN-8285. Remove unused environment variables from the Docker runtime. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9837ca9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9837ca9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9837ca9c

Branch: refs/heads/HDDS-48
Commit: 9837ca9cc746573571029f9fb996a1be10b588ab
Parents: 34e8b9f
Author: Shane Kumpf 
Authored: Wed May 23 06:43:44 2018 -0600
Committer: Shane Kumpf 
Committed: Wed May 23 06:43:44 2018 -0600

--
 .../linux/runtime/DockerLinuxContainerRuntime.java  | 9 -
 1 file changed, 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9837ca9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 787e892..e131e9d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -106,9 +106,6 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * will be used to launch the Docker container.
  *   
  *   
- * {@code YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE} is currently ignored.
- *   
- *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE} controls
  * whether the Docker container's default command is overridden.  When set
  * to {@code true}, the Docker container's command will be
@@ -198,9 +195,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
   "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE =
-  "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE";
   @InterfaceAudience.Private
@@ -216,9 +210,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
   "YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_RUN_ENABLE_USER_REMAPPING =
-  "YARN_CONTAINER_RUNTIME_DOCKER_RUN_ENABLE_USER_REMAPPING";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
   @InterfaceAudience.Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/18] hadoop git commit: HDDS-85. Send Container State Info while sending the container report from Datanode to SCM. Contributed by Shashikant Banerjee.

2018-05-23 Thread hanishakoneru
HDDS-85. Send Container State Info while sending the container report from 
Datanode to SCM. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fed2bef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fed2bef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fed2bef6

Branch: refs/heads/HDDS-48
Commit: fed2bef647d9a15fe020ad5d3bb89fcb77ed30e6
Parents: 745f203
Author: Mukul Kumar Singh 
Authored: Wed May 23 14:15:35 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 14:15:35 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |  1 +
 .../container/common/helpers/ContainerData.java |  8 
 .../common/impl/ContainerManagerImpl.java   | 45 ++--
 .../common/interfaces/ContainerManager.java |  2 +-
 .../commandhandler/ContainerReportHandler.java  |  4 +-
 .../container/ozoneimpl/OzoneContainer.java |  4 +-
 .../common/impl/TestContainerPersistence.java   |  2 +-
 7 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 1138297..53da18a 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -131,6 +131,7 @@ enum Result {
   UNCLOSED_CONTAINER_IO = 25;
   DELETE_ON_OPEN_CONTAINER = 26;
   CLOSED_CONTAINER_RETRY = 27;
+  INVALID_CONTAINER_STATE = 28;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 14ee33a..d1746f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -340,6 +340,14 @@ public class ContainerData {
   }
 
   /**
+   * checks if the container is closed.
+   * @return - boolean
+   */
+  public synchronized  boolean isClosed() {
+return ContainerLifeCycleState.CLOSED == state;
+  }
+
+  /**
* Marks this container as closed.
*/
   public synchronized void closeContainer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed2bef6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index faee5d0..9355364 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -100,6 +102,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNCLOSED_CONTAINER_IO;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+Result.INVALID_CONTAINER_STATE;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 
 /**
@@ -707,6 +711,39 @@ public class ContainerManagerImpl implements 
ContainerManager {
   }
 
   /**
+   * Returns LifeCycle State of the container
+   * @param 

[17/18] hadoop git commit: HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao Liang.

2018-05-23 Thread hanishakoneru
HDFS-13588. Fix TestFsDatasetImpl test failures on Windows. Contributed by Xiao 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0c9b7a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0c9b7a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0c9b7a8

Branch: refs/heads/HDDS-48
Commit: c0c9b7a8ef2618b7641a0452d9277abd26815de2
Parents: e83b943
Author: Inigo Goiri 
Authored: Wed May 23 09:46:35 2018 -0700
Committer: Inigo Goiri 
Committed: Wed May 23 09:46:35 2018 -0700

--
 .../server/datanode/fsdataset/impl/TestFsDatasetImpl.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0c9b7a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index d684950..9270be8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockReader;
@@ -666,7 +667,8 @@ public class TestFsDatasetImpl {
   TimeUnit.MILLISECONDS);
   config.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 
1);
 
-  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+  cluster = new MiniDFSCluster.Builder(config,
+  GenericTestUtils.getRandomizedTestDir()).numDataNodes(1).build();
   cluster.waitActive();
   FileSystem fs = cluster.getFileSystem();
   DataNode dataNode = cluster.getDataNodes().get(0);
@@ -688,7 +690,7 @@ public class TestFsDatasetImpl {
 // Remove write and execute access so that checkDiskErrorThread detects
 // this volume is bad.
 finalizedDir.setExecutable(false);
-finalizedDir.setWritable(false);
+assertTrue(FileUtil.setWritable(finalizedDir, false));
   }
   Assert.assertTrue("Reference count for the volume should be greater "
   + "than 0", volume.getReferenceCount() > 0);
@@ -709,7 +711,7 @@ public class TestFsDatasetImpl {
   } catch (IOException ioe) {
 GenericTestUtils.assertExceptionContains(info.getXferAddr(), ioe);
   }
-  finalizedDir.setWritable(true);
+  assertTrue(FileUtil.setWritable(finalizedDir, true));
   finalizedDir.setExecutable(true);
 } finally {
 cluster.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/18] hadoop git commit: YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). Contributed by Sunil G.

2018-05-23 Thread hanishakoneru
YARN-8297. Incorrect ATS Url used for Wire encrypted cluster.(addendum). 
Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61e3e75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61e3e75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61e3e75

Branch: refs/heads/HDDS-48
Commit: f61e3e752eb1cf4a08030da04bc3d6c5a2b3926d
Parents: 9837ca9
Author: Rohith Sharma K S 
Authored: Wed May 23 18:31:03 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed May 23 18:31:03 2018 +0530

--
 .../src/main/webapp/app/initializers/loader.js  | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61e3e75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 53f9c44..01daa7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -31,7 +31,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   $.ajax({
 type: 'GET',
 dataType: 'json',
-async: true,
+async: false,
 context: this,
 url: httpUrl,
 success: function(data) {
@@ -44,7 +44,7 @@ function getYarnHttpProtocolScheme(rmhost, application) {
   application.advanceReadiness();
 }
   });
-  return protocolScheme == "HTTPS_ONLY";
+  return protocolScheme;
 }
 
 function getTimeLineURL(rmhost, isHttpsSchemeEnabled) {
@@ -97,7 +97,9 @@ function updateConfigs(application) {
 
   Ember.Logger.log("RM Address: " + rmhost);
 
-  var isHttpsSchemeEnabled = getYarnHttpProtocolScheme(rmhost, application);
+  var protocolSchemeFromRM = getYarnHttpProtocolScheme(rmhost, application);
+  Ember.Logger.log("Is protocol scheme https? " + (protocolSchemeFromRM == 
"HTTPS_ONLY"));
+  var isHttpsSchemeEnabled = (protocolSchemeFromRM == "HTTPS_ONLY");
   if(!ENV.hosts.timelineWebAddress) {
 var timelinehost = "";
 $.ajax({
@@ -137,7 +139,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getTimeLineV1URL(rmhost, isHttpsSchemeEnabled),
   success: function(data) {
@@ -171,7 +173,7 @@ function updateConfigs(application) {
 $.ajax({
   type: 'GET',
   dataType: 'json',
-  async: true,
+  async: false,
   context: this,
   url: getSecurityURL(rmhost),
   success: function(data) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/18] hadoop git commit: HDDS-84. The root directory of ozone.tar.gz should contain the version string. Contributed by Elek, Marton.

2018-05-23 Thread hanishakoneru
HDDS-84. The root directory of ozone.tar.gz should contain the version string. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63fc5873
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63fc5873
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63fc5873

Branch: refs/heads/HDDS-48
Commit: 63fc5873cee41b883e988ead00fc6f6cf74fae97
Parents: f61e3e7
Author: Mukul Kumar Singh 
Authored: Wed May 23 21:07:37 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed May 23 21:07:37 2018 +0530

--
 dev-support/bin/ozone-dist-tar-stitching | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63fc5873/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index decfa23..d1116e4 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -41,7 +41,7 @@ function run()
 #To include the version name in the root directory of the tar file
 # we create a symbolic link and dereference it during the tar creation
 ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone"
+run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/18] hadoop git commit: Additional check when unpacking archives. Contributed by Jason Lowe and Akira Ajisaka.

2018-05-23 Thread hanishakoneru
Additional check when unpacking archives. Contributed by Jason Lowe and Akira 
Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/745f203e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/745f203e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/745f203e

Branch: refs/heads/HDDS-48
Commit: 745f203e577bacb35b042206db94615141fa5e6f
Parents: 1d2640b
Author: Akira Ajisaka 
Authored: Wed May 23 17:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed May 23 17:16:23 2018 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 18 -
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 40 +---
 2 files changed, 51 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 8743be5..5ef78f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -617,11 +617,16 @@ public class FileUtil {
   throws IOException {
 try (ZipInputStream zip = new ZipInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for(ZipEntry entry = zip.getNextEntry();
   entry != null;
   entry = zip.getNextEntry()) {
 if (!entry.isDirectory()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   File parent = file.getParentFile();
   if (!parent.mkdirs() &&
   !parent.isDirectory()) {
@@ -656,12 +661,17 @@ public class FileUtil {
 
 try {
   entries = zipFile.entries();
+  String targetDirPath = unzipDir.getCanonicalPath() + File.separator;
   while (entries.hasMoreElements()) {
 ZipEntry entry = entries.nextElement();
 if (!entry.isDirectory()) {
   InputStream in = zipFile.getInputStream(entry);
   try {
 File file = new File(unzipDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + unzipDir);
+}
 if (!file.getParentFile().mkdirs()) {
   if (!file.getParentFile().isDirectory()) {
 throw new IOException("Mkdirs failed to create " +
@@ -944,6 +954,13 @@ public class FileUtil {
 
   private static void unpackEntries(TarArchiveInputStream tis,
   TarArchiveEntry entry, File outputDir) throws IOException {
+String targetDirPath = outputDir.getCanonicalPath() + File.separator;
+File outputFile = new File(outputDir, entry.getName());
+if (!outputFile.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create entry outside of " + outputDir);
+}
+
 if (entry.isDirectory()) {
   File subDir = new File(outputDir, entry.getName());
   if (!subDir.mkdirs() && !subDir.isDirectory()) {
@@ -966,7 +983,6 @@ public class FileUtil {
   return;
 }
 
-File outputFile = new File(outputDir, entry.getName());
 if (!outputFile.getParentFile().exists()) {
   if (!outputFile.getParentFile().mkdirs()) {
 throw new IOException("Mkdirs failed to create tar internal dir "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/745f203e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 39f2f6b..7218a1b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static 

[06/18] hadoop git commit: HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda kumar.

2018-05-23 Thread hanishakoneru
HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda 
kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c7fd8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c7fd8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c7fd8e

Branch: refs/heads/HDDS-48
Commit: 68c7fd8e6092e8436ecf96852c608708f311f262
Parents: 43be9ab
Author: Xiaoyu Yao 
Authored: Tue May 22 15:46:59 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue May 22 15:46:59 2018 -0700

--
 .../common/impl/ContainerManagerImpl.java   | 14 +---
 .../common/impl/ContainerReportManagerImpl.java | 43 +++-
 .../common/interfaces/ContainerManager.java |  7 --
 .../interfaces/ContainerReportManager.java  |  8 +--
 .../statemachine/DatanodeStateMachine.java  |  1 -
 .../common/statemachine/StateContext.java   | 38 --
 .../states/endpoint/HeartbeatEndpointTask.java  |  3 +-
 .../container/ozoneimpl/OzoneContainer.java |  9 ---
 .../StorageContainerDatanodeProtocol.java   |  5 +-
 .../protocol/StorageContainerNodeProtocol.java  |  5 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |  5 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |  3 +-
 .../StorageContainerDatanodeProtocol.proto  | 39 ---
 .../ozone/container/common/ScmTestMock.java | 13 +---
 .../common/TestDatanodeStateMachine.java|  7 --
 .../hdds/scm/node/HeartbeatQueueItem.java   | 23 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 30 +---
 .../scm/server/SCMDatanodeProtocolServer.java   |  6 +-
 .../hdds/scm/container/MockNodeManager.java |  5 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  9 +--
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 74 +---
 .../ozone/container/common/TestEndPoint.java| 11 +--
 .../testutils/ReplicationNodeManagerMock.java   |  5 +-
 .../ozone/TestStorageContainerManager.java  |  5 +-
 24 files changed, 63 insertions(+), 305 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 3a78c70..faee5d0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-.StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
@@ -1072,16 +1070,8 @@ public class ContainerManagerImpl implements 
ContainerManager {
   @Override
   public long getNumKeys(long containerId) {
 ContainerData cData = containerMap.get(containerId);
-return cData.getKeyCount();  }
-
-  /**
-   * Get the container report state to send via HB to SCM.
-   *
-   * @return container report state.
-   */
-  @Override
-  public ReportState getContainerReportState() {
-return containerReportManager.getContainerReportState();
+return cData.getKeyCount();
   }
 
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
index 6c83c66..f1d3f7f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import org.apache.commons.lang3.RandomUtils;
 import 

[04/18] hadoop git commit: YARN-8273. Log aggregation does not warn if HDFS quota in target directory is exceeded (grepas via rkanter)

2018-05-23 Thread hanishakoneru
YARN-8273. Log aggregation does not warn if HDFS quota in target directory is 
exceeded (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b22f56c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b22f56c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b22f56c4

Branch: refs/heads/HDDS-48
Commit: b22f56c4719e63bd4f6edc2a075e0bcdb9442255
Parents: 83f53e5
Author: Robert Kanter 
Authored: Tue May 22 14:24:38 2018 -0700
Committer: Robert Kanter 
Committed: Tue May 22 14:24:38 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  4 ++
 .../logaggregation/AggregatedLogFormat.java | 14 +++-
 .../LogAggregationDFSException.java | 45 
 .../LogAggregationFileController.java   |  4 +-
 .../tfile/LogAggregationTFileController.java| 13 +++-
 .../logaggregation/TestContainerLogsUtils.java  |  4 +-
 .../logaggregation/AppLogAggregatorImpl.java| 49 ++---
 .../TestAppLogAggregatorImpl.java   | 75 +---
 .../nodemanager/webapp/TestNMWebServices.java   |  7 +-
 9 files changed, 183 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index db6c11a..a25c524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -40,6 +40,10 @@
   hadoop-common
   provided
 
+
+  org.apache.hadoop
+  hadoop-hdfs-client
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index af3066e..81d5053 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
@@ -547,7 +548,7 @@ public class AggregatedLogFormat {
 }
 
 @Override
-public void close() {
+public void close() throws DSQuotaExceededException {
   try {
 if (writer != null) {
   writer.close();
@@ -555,7 +556,16 @@ public class AggregatedLogFormat {
   } catch (Exception e) {
 LOG.warn("Exception closing writer", e);
   } finally {
-IOUtils.cleanupWithLogger(LOG, this.fsDataOStream);
+try {
+  this.fsDataOStream.close();
+} catch (DSQuotaExceededException e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+  throw e;
+} catch (Throwable e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
new file mode 100644
index 000..19953e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  

  1   2   >