hadoop git commit: fix build after rebase

2018-07-13 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 9c24328be -> 3e1c46077


fix build after rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e1c4607
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e1c4607
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e1c4607

Branch: refs/heads/YARN-7402
Commit: 3e1c460775e7f7b9be635a41f1c09027e2a8d56c
Parents: 9c24328
Author: Botong Huang 
Authored: Fri Jul 13 21:29:19 2018 -0700
Committer: Botong Huang 
Committed: Fri Jul 13 21:29:19 2018 -0700

--
 .../yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java   | 2 +-
 .../globalpolicygenerator/subclustercleaner/SubClusterCleaner.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1c4607/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index 88b9f2b..1ae07f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -22,7 +22,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.service.CompositeService;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1c4607/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
index dad5121..6410a6d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
@@ -21,7 +21,7 @@ package 
org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
 import java.util.Date;
 import java.util.Map;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: Revert "HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event" This reverts commit a47ec5dac4a1cdfec788ce3296b4f610411911ea. There was a spurious file in this

2018-07-13 Thread botong
Revert "HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event"
This reverts commit a47ec5dac4a1cdfec788ce3296b4f610411911ea.
There was a spurious file in this commit. Revert to clean it.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5678587
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5678587
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5678587

Branch: refs/heads/YARN-7402
Commit: b56785873a4ec9f6f5617e4252888b23837604e2
Parents: 418cc7f
Author: Anu Engineer 
Authored: Wed Jul 11 12:03:42 2018 -0700
Committer: Anu Engineer 
Committed: Wed Jul 11 12:03:42 2018 -0700

--
 .../scm/container/ContainerReportHandler.java   | 47 --
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   | 42 
 .../hadoop/hdds/scm/node/NewNodeHandler.java| 50 ---
 .../hadoop/hdds/scm/node/NodeReportHandler.java | 42 
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  | 42 
 .../common/src/main/bin/ozone-config.sh | 51 
 6 files changed, 274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5678587/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
deleted file mode 100644
index 486162e..000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-.ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-/**
- * Handles container reports from datanode.
- */
-public class ContainerReportHandler implements
-EventHandler {
-
-  private final Mapping containerMapping;
-  private final Node2ContainerMap node2ContainerMap;
-
-  public ContainerReportHandler(Mapping containerMapping,
-Node2ContainerMap node2ContainerMap) {
-this.containerMapping = containerMapping;
-this.node2ContainerMap = node2ContainerMap;
-  }
-
-  @Override
-  public void onMessage(ContainerReportFromDatanode 
containerReportFromDatanode,
-EventPublisher publisher) {
-// TODO: process container report.
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5678587/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
deleted file mode 100644
index 427aef8..000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,

[45/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

2018-07-13 Thread botong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbe70ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
new file mode 100644
index 000..2ff879e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
@@ -0,0 +1,196 @@
+ {
+  "type": "capacityScheduler",
+  "capacity": 100.0,
+  "usedCapacity": 0.0,
+  "maxCapacity": 100.0,
+  "queueName": "root",
+  "queues": {
+"queue": [
+  {
+"type": "capacitySchedulerLeafQueueInfo",
+"capacity": 100.0,
+"usedCapacity": 0.0,
+"maxCapacity": 100.0,
+"absoluteCapacity": 100.0,
+"absoluteMaxCapacity": 100.0,
+"absoluteUsedCapacity": 0.0,
+"numApplications": 484,
+"queueName": "default",
+"state": "RUNNING",
+"resourcesUsed": {
+  "memory": 0,
+  "vCores": 0
+},
+"hideReservationQueues": false,
+"nodeLabels": [
+  "*"
+],
+"numActiveApplications": 484,
+"numPendingApplications": 0,
+"numContainers": 0,
+"maxApplications": 1,
+"maxApplicationsPerUser": 1,
+"userLimit": 100,
+"users": {
+  "user": [
+{
+  "username": "Default",
+  "resourcesUsed": {
+"memory": 0,
+"vCores": 0
+  },
+  "numPendingApplications": 0,
+  "numActiveApplications": 468,
+  "AMResourceUsed": {
+"memory": 30191616,
+"vCores": 468
+  },
+  "userResourceLimit": {
+"memory": 31490048,
+"vCores": 7612
+  }
+}
+  ]
+},
+"userLimitFactor": 1.0,
+"AMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"usedAMResource": {
+  "memory": 30388224,
+  "vCores": 532
+},
+"userAMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"preemptionDisabled": true
+  },
+  {
+"type": "capacitySchedulerLeafQueueInfo",
+"capacity": 100.0,
+"usedCapacity": 0.0,
+"maxCapacity": 100.0,
+"absoluteCapacity": 100.0,
+"absoluteMaxCapacity": 100.0,
+"absoluteUsedCapacity": 0.0,
+"numApplications": 484,
+"queueName": "default2",
+"state": "RUNNING",
+"resourcesUsed": {
+  "memory": 0,
+  "vCores": 0
+},
+"hideReservationQueues": false,
+"nodeLabels": [
+  "*"
+],
+"numActiveApplications": 484,
+"numPendingApplications": 0,
+"numContainers": 0,
+"maxApplications": 1,
+"maxApplicationsPerUser": 1,
+"userLimit": 100,
+"users": {
+  "user": [
+{
+  "username": "Default",
+  "resourcesUsed": {
+"memory": 0,
+"vCores": 0
+  },
+  "numPendingApplications": 0,
+  "numActiveApplications": 468,
+  "AMResourceUsed": {
+"memory": 30191616,
+"vCores": 468
+  },
+  "userResourceLimit": {
+"memory": 31490048,
+"vCores": 7612
+  }
+}
+  ]
+},
+"userLimitFactor": 1.0,
+"AMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"usedAMResource": {
+  "memory": 30388224,
+  "vCores": 532
+},
+"userAMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"preemptionDisabled": true
+  }
+]
+  },
+  "health": {
+"lastrun": 1517951638085,
+"operationsInfo": {
+  "entry": {
+"key": 

[33/50] [abbrv] hadoop git commit: HADOOP-15316. GenericTestUtils can exceed maxSleepTime. Contributed by Adam Antal.

2018-07-13 Thread botong
HADOOP-15316. GenericTestUtils can exceed maxSleepTime. Contributed by Adam 
Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f3f9391
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f3f9391
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f3f9391

Branch: refs/heads/YARN-7402
Commit: 4f3f9391b035d7f7e285c332770c6c1ede9a5a85
Parents: b37074b
Author: Sean Mackrory 
Authored: Thu Jul 12 16:45:07 2018 +0200
Committer: Sean Mackrory 
Committed: Thu Jul 12 17:24:01 2018 +0200

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3f9391/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 3e9da1b..0112894 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -661,7 +661,7 @@ public abstract class GenericTestUtils {
 public Object answer(InvocationOnMock invocation) throws Throwable {
   boolean interrupted = false;
   try {
-Thread.sleep(r.nextInt(maxSleepTime) + minSleepTime);
+Thread.sleep(r.nextInt(maxSleepTime - minSleepTime) + minSleepTime);
   } catch (InterruptedException ie) {
 interrupted = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-15349. S3Guard DDB retryBackoff to be more informative on limits exceeded. Contributed by Gabor Bota.

2018-07-13 Thread botong
HADOOP-15349. S3Guard DDB retryBackoff to be more informative on limits 
exceeded. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a08812a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a08812a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a08812a1

Branch: refs/heads/YARN-7402
Commit: a08812a1b10df059b26f6a216e6339490298ba28
Parents: 4f3f939
Author: Sean Mackrory 
Authored: Thu Jul 12 16:46:02 2018 +0200
Committer: Sean Mackrory 
Committed: Thu Jul 12 17:24:01 2018 +0200

--
 .../org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08812a1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 116827d..43849b1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -655,7 +655,8 @@ public class DynamoDBMetadataStore implements MetadataStore 
{
   retryCount, 0, true);
   if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
 throw new IOException(
-String.format("Max retries exceeded (%d) for DynamoDB",
+String.format("Max retries exceeded (%d) for DynamoDB. This may be"
++ " because write threshold of DynamoDB is set too low.",
 retryCount));
   } else {
 LOG.debug("Sleeping {} msec before next retry", action.delayMillis);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HADOOP-15531. Use commons-text instead of commons-lang in some classes to fix deprecation warnings. Contributed by Takanobu Asanuma.

2018-07-13 Thread botong
HADOOP-15531. Use commons-text instead of commons-lang in some classes to fix 
deprecation warnings. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88625f5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88625f5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88625f5c

Branch: refs/heads/YARN-7402
Commit: 88625f5cd90766136a9ebd76a8d84b45a37e6c99
Parents: 17118f4
Author: Akira Ajisaka 
Authored: Fri Jul 13 11:42:12 2018 -0400
Committer: Akira Ajisaka 
Committed: Fri Jul 13 11:42:12 2018 -0400

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml   |  4 
 hadoop-common-project/hadoop-common/pom.xml   |  5 +
 .../org/apache/hadoop/conf/ReconfigurationServlet.java|  2 +-
 .../hdfs/qjournal/server/GetJournalEditServlet.java   |  2 +-
 .../hadoop/hdfs/server/diskbalancer/command/Command.java  |  6 +++---
 .../hdfs/server/diskbalancer/command/PlanCommand.java |  4 ++--
 .../hdfs/server/diskbalancer/command/ReportCommand.java   | 10 +-
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/tools/CacheAdmin.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDecommission.java |  4 ++--
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java   |  4 ++--
 .../apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java   |  2 +-
 .../apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java |  2 +-
 .../apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java |  2 +-
 .../apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java  |  2 +-
 hadoop-project/pom.xml|  5 +
 .../java/org/apache/hadoop/yarn/client/cli/TopCLI.java|  3 ++-
 .../src/main/java/org/apache/hadoop/yarn/state/Graph.java |  2 +-
 .../org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java  |  2 +-
 .../org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java |  2 +-
 .../java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java |  2 +-
 .../java/org/apache/hadoop/yarn/webapp/view/TextView.java |  2 +-
 .../apache/hadoop/yarn/server/webapp/AppAttemptBlock.java |  2 +-
 .../org/apache/hadoop/yarn/server/webapp/AppBlock.java|  2 +-
 .../org/apache/hadoop/yarn/server/webapp/AppsBlock.java   |  2 +-
 .../resourcemanager/webapp/FairSchedulerAppsBlock.java|  2 +-
 .../server/resourcemanager/webapp/RMAppAttemptBlock.java  |  2 +-
 .../yarn/server/resourcemanager/webapp/RMAppBlock.java|  2 +-
 .../yarn/server/resourcemanager/webapp/RMAppsBlock.java   |  2 +-
 .../hadoop/yarn/server/router/webapp/AppsBlock.java   |  4 ++--
 30 files changed, 52 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 490281a..ea8d680 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -171,6 +171,10 @@
   commons-lang3
 
 
+  org.apache.commons
+  commons-text
+
+
   commons-logging
   commons-logging
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 67a5a54..42554da 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -172,6 +172,11 @@
   compile
 
 
+  org.apache.commons
+  commons-text
+  compile
+
+
   org.slf4j
   slf4j-api
   compile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index c5bdf4e..ef4eac6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.conf;
 
-import org.apache.commons.lang3.StringEscapeUtils;
+import org.apache.commons.text.StringEscapeUtils;
 
 import 

[17/50] [abbrv] hadoop git commit: YARN-8473. Containers being launched as app tears down can leave containers in NEW state. Contributed by Jason Lowe.

2018-07-13 Thread botong
YARN-8473. Containers being launched as app tears down can leave containers in 
NEW state. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/705e2c1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/705e2c1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/705e2c1f

Branch: refs/heads/YARN-7402
Commit: 705e2c1f7cba51496b0d019ecedffbe5fb55c28b
Parents: ca8b80b
Author: Sunil G 
Authored: Tue Jul 10 20:11:47 2018 +0530
Committer: Sunil G 
Committed: Tue Jul 10 20:11:47 2018 +0530

--
 .../application/ApplicationImpl.java| 36 ++---
 .../application/TestApplication.java| 53 
 2 files changed, 71 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/705e2c1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 39be7a7..6d84fb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -211,6 +211,9 @@ public class ApplicationImpl implements Application {
   private static final ContainerDoneTransition CONTAINER_DONE_TRANSITION =
   new ContainerDoneTransition();
 
+  private static final InitContainerTransition INIT_CONTAINER_TRANSITION =
+  new InitContainerTransition();
+
   private static StateMachineFactory stateMachineFactory =
   new StateMachineFactoryhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/705e2c1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
index c8f28e2..cbe19ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
@@ -360,35 +360,66 @@ public class TestApplication {
 }
   }
 
-//TODO Re-work after Application transitions are changed.
-//  @Test
+  @Test
   @SuppressWarnings("unchecked")
-  public void testStartContainerAfterAppFinished() {
+  public void testStartContainerAfterAppRunning() {
 WrappedApplication wa = null;
 try {
-  wa = new WrappedApplication(5, 314159265358979L, "yak", 3);
+  wa = new WrappedApplication(5, 314159265358979L, "yak", 4);
   wa.initApplication();
-  wa.initContainer(-1);
+  wa.initContainer(0);
   assertEquals(ApplicationState.INITING, wa.app.getApplicationState());
   wa.applicationInited();
   assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState());
 
-  reset(wa.localizerBus);
-  wa.containerFinished(0);
-  wa.containerFinished(1);
-  wa.containerFinished(2);
   assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState());
-  assertEquals(0, wa.app.getContainers().size());
+  assertEquals(1, wa.app.getContainers().size());
 
   wa.appFinished();
+  verify(wa.containerBus).handle(
+  argThat(new ContainerKillMatcher(wa.containers.get(0)
+  .getContainerId(;
+  assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,
+  wa.app.getApplicationState());
+
+  wa.initContainer(1);
+  verify(wa.containerBus).handle(
+ 

[25/50] [abbrv] hadoop git commit: HDFS-13726. RBF: Fix RBF configuration links. Contributed by Takanobu Asanuma.

2018-07-13 Thread botong
HDFS-13726. RBF: Fix RBF configuration links. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ae13d41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ae13d41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ae13d41

Branch: refs/heads/YARN-7402
Commit: 2ae13d41dcd4f49e6b4ebc099e5f8bb8280b9872
Parents: 52e1bc8
Author: Yiqun Lin 
Authored: Wed Jul 11 22:11:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jul 11 22:11:59 2018 +0800

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae13d41/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 70c6226..73e0f4a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -175,7 +175,7 @@ Deployment
 
 By default, the Router is ready to take requests and monitor the NameNode in 
the local machine.
 It needs to know the State Store endpoint by setting 
`dfs.federation.router.store.driver.class`.
-The rest of the options are documented in 
[hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
+The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-rbf/hdfs-rbf-default.xml).
 
 Once the Router is configured, it can be started:
 
@@ -290,7 +290,7 @@ Router configuration
 
 
 One can add the configurations for Router-based federation to 
**hdfs-site.xml**.
-The main options are documented in 
[hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
+The main options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-rbf/hdfs-rbf-default.xml).
 The configuration values are described in this section.
 
 ### RPC server


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event and corresponding event handlers in SCM. Contributed by Nanda Kumar.

2018-07-13 Thread botong
HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event
and corresponding event handlers in SCM.
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a47ec5da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a47ec5da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a47ec5da

Branch: refs/heads/YARN-7402
Commit: a47ec5dac4a1cdfec788ce3296b4f610411911ea
Parents: 4e59b92
Author: Anu Engineer 
Authored: Tue Jul 10 15:58:47 2018 -0700
Committer: Anu Engineer 
Committed: Tue Jul 10 15:58:47 2018 -0700

--
 .../scm/container/ContainerReportHandler.java   | 47 ++
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   | 42 
 .../hadoop/hdds/scm/node/NewNodeHandler.java| 50 +++
 .../hadoop/hdds/scm/node/NodeReportHandler.java | 42 
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  | 42 
 .../common/src/main/bin/ozone-config.sh | 51 
 6 files changed, 274 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47ec5da/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
new file mode 100644
index 000..486162e
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+.ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+
+/**
+ * Handles container reports from datanode.
+ */
+public class ContainerReportHandler implements
+EventHandler {
+
+  private final Mapping containerMapping;
+  private final Node2ContainerMap node2ContainerMap;
+
+  public ContainerReportHandler(Mapping containerMapping,
+Node2ContainerMap node2ContainerMap) {
+this.containerMapping = containerMapping;
+this.node2ContainerMap = node2ContainerMap;
+  }
+
+  @Override
+  public void onMessage(ContainerReportFromDatanode 
containerReportFromDatanode,
+EventPublisher publisher) {
+// TODO: process container report.
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47ec5da/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
new file mode 100644
index 000..427aef8
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

[37/50] [abbrv] hadoop git commit: HDFS-13663. Should throw exception when incorrect block size is set. Contributed by Shweta.

2018-07-13 Thread botong
HDFS-13663. Should throw exception when incorrect block size is set. 
Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87eeb26e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87eeb26e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87eeb26e

Branch: refs/heads/YARN-7402
Commit: 87eeb26e7200fa3be0ca62ebf163985b58ad309e
Parents: 1bc106a
Author: Xiao Chen 
Authored: Thu Jul 12 20:19:14 2018 -0700
Committer: Xiao Chen 
Committed: Thu Jul 12 20:24:11 2018 -0700

--
 .../apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87eeb26e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 94835e2..34f6c33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -275,7 +275,9 @@ public class BlockRecoveryWorker {
 }
 // recover() guarantees syncList will have at least one replica with 
RWR
 // or better state.
-assert minLength != Long.MAX_VALUE : "wrong minLength";
+if (minLength == Long.MAX_VALUE) {
+  throw new IOException("Incorrect block size");
+}
 newBlock.setNumBytes(minLength);
 break;
   case RUR:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)

2018-07-13 Thread botong
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa3ee34c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa3ee34c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa3ee34c

Branch: refs/heads/YARN-7402
Commit: fa3ee34c7b74889bcbfd2effb999757c73994dd4
Parents: 43b8c2d
Author: Botong Huang 
Authored: Thu Feb 1 14:43:48 2018 -0800
Committer: Botong Huang 
Committed: Fri Jul 13 17:42:58 2018 -0700

--
 .../dev-support/findbugs-exclude.xml|   5 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  18 +++
 .../src/main/resources/yarn-default.xml |  24 
 .../store/impl/MemoryFederationStateStore.java  |  13 ++
 .../utils/FederationStateStoreFacade.java   |  41 ++-
 .../GlobalPolicyGenerator.java  |  92 ++-
 .../subclustercleaner/SubClusterCleaner.java| 109 +
 .../subclustercleaner/package-info.java |  19 +++
 .../TestSubClusterCleaner.java  | 118 +++
 9 files changed, 409 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3ee34c/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5cc81e5..406a8b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -387,6 +387,11 @@
 
 
   
+  
+
+
+
+  
  
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3ee34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9156c2d..b3a4ccb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3335,6 +3335,24 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
   false;
 
+  private static final String FEDERATION_GPG_PREFIX =
+  FEDERATION_PREFIX + "gpg.";
+
+  // The number of threads to use for the GPG scheduled executor service
+  public static final String GPG_SCHEDULED_EXECUTOR_THREADS =
+  FEDERATION_GPG_PREFIX + "scheduled.executor.threads";
+  public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10;
+
+  // The interval at which the subcluster cleaner runs, -1 means disabled
+  public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS =
+  FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1;
+
+  // The expiration time for a subcluster heartbeat, default is 30 minutes
+  public static final String GPG_SUBCLUSTER_EXPIRATION_MS =
+  FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3ee34c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2cc842f..66493f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3533,6 +3533,30 @@
 
   
 
+  The number of threads to use for the GPG scheduled executor service.
+
+yarn.federation.gpg.scheduled.executor.threads
+10
+  
+
+  
+
+  The interval at which the subcluster cleaner runs, -1 means disabled.
+
+yarn.federation.gpg.subcluster.cleaner.interval-ms
+-1
+  
+
+  
+
+  The expiration time for a subcluster heartbeat, default is 30 minutes.
+
+

[28/50] [abbrv] hadoop git commit: HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

2018-07-13 Thread botong
HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/418cc7f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/418cc7f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/418cc7f3

Branch: refs/heads/YARN-7402
Commit: 418cc7f3aeabedc57c94aa9d4c4248c1476ac90e
Parents: 162228e
Author: Akira Ajisaka 
Authored: Wed Jul 11 14:46:43 2018 -0400
Committer: Akira Ajisaka 
Committed: Wed Jul 11 14:46:43 2018 -0400

--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 .../hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md | 2 +-
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/418cc7f3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 9ed69bf..391b71b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -420,7 +420,7 @@ Runs a HDFS dfsadmin client.
 
 Usage: `hdfs dfsrouter`
 
-Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+Runs the DFS router. See 
[Router](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Router) for more info.
 
 ### `dfsrouteradmin`
 
@@ -449,7 +449,7 @@ Usage:
 | `-nameservice` `disable` `enable` *nameservice* | Disable/enable  a name 
service from the federation. If disabled, requests will not go to that name 
service. |
 | `-getDisabledNameservices` | Get the name services that are disabled in the 
federation. |
 
-The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+The commands for managing Router-based federation. See [Mount table 
management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management)
 for more info.
 
 ### `diskbalancer`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/418cc7f3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
index 01e7076..b8d5321 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
@@ -38,7 +38,7 @@ is limited to creating a *read-only image* of a remote 
namespace that implements
 to serve the image. Specifically, reads from a snapshot of a remote namespace 
are
 supported. Adding a remote namespace to an existing/running namenode, 
refreshing the
 remote snapshot, unmounting, and writes are not available in this release. One
-can use [ViewFs](./ViewFs.html) and [RBF](HDFSRouterFederation.html) to
+can use [ViewFs](./ViewFs.html) and 
[RBF](../hadoop-hdfs-rbf/HDFSRouterFederation.html) to
 integrate namespaces with `PROVIDED` storage into an existing deployment.
 
 Creating HDFS Clusters with `PROVIDED` Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/418cc7f3/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 8b9cfda..438145a 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -225,7 +225,7 @@ cluster for existing HDFS clients.
 
 See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
 HDFS Router-based Federation
-[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
+[documentation](./hadoop-project-dist/hadoop-hdfs-rbf/HDFSRouterFederation.html)
 for
 more details.
 
 API-based configuration of Capacity Scheduler queue configuration


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDDS-208. ozone createVolume command ignores the first character of the volume name argument. Contributed by Lokesh Jain.

2018-07-13 Thread botong
HDDS-208. ozone createVolume command ignores the first character of the volume 
name argument. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e59b927
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e59b927
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e59b927

Branch: refs/heads/YARN-7402
Commit: 4e59b9278463e4f8ccce7100d4582e896154beb8
Parents: 5d0f01e
Author: Xiaoyu Yao 
Authored: Tue Jul 10 14:07:23 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Jul 10 14:07:23 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 26 +---
 .../web/ozShell/volume/CreateVolumeHandler.java | 10 
 2 files changed, 28 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e59b927/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 5082870..a4b30f0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -38,6 +38,7 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
+import com.google.common.base.Strings;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -203,13 +204,32 @@ public class TestOzoneShell {
   public void testCreateVolume() throws Exception {
 LOG.info("Running testCreateVolume");
 String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+testCreateVolume(volumeName, "");
+volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+testCreateVolume("/" + volumeName, "");
+testCreateVolume("/", "Volume name is required to create a volume");
+testCreateVolume("/vol/123",
+"Illegal argument: Bucket or Volume name has an unsupported character 
: /");
+  }
+
+  private void testCreateVolume(String volumeName, String errorMsg) throws 
Exception {
+err.reset();
 String userName = "bilbo";
 String[] args = new String[] {"-createVolume", url + "/" + volumeName,
 "-user", userName, "-root"};
 
-assertEquals(0, ToolRunner.run(shell, args));
-OzoneVolume volumeInfo = client.getVolumeDetails(volumeName);
-assertEquals(volumeName, volumeInfo.getName());
+if (Strings.isNullOrEmpty(errorMsg)) {
+  assertEquals(0, ToolRunner.run(shell, args));
+} else {
+  assertEquals(1, ToolRunner.run(shell, args));
+  assertTrue(err.toString().contains(errorMsg));
+  return;
+}
+
+String truncatedVolumeName =
+volumeName.substring(volumeName.lastIndexOf('/') + 1);
+OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName);
+assertEquals(truncatedVolumeName, volumeInfo.getName());
 assertEquals(userName, volumeInfo.getOwner());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e59b927/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
index 74fdbb0..0057282 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
@@ -60,15 +60,15 @@ public class CreateVolumeHandler extends Handler {
 
 String ozoneURIString = cmd.getOptionValue(Shell.CREATE_VOLUME);
 URI ozoneURI = verifyURI(ozoneURIString);
-if (ozoneURI.getPath().isEmpty()) {
+
+// we need to skip the slash in the URI path
+// getPath returns /volumeName needs to remove the initial slash.
+volumeName = ozoneURI.getPath().replaceAll("^/+", "");
+if (volumeName.isEmpty()) {
   throw new OzoneClientException(
   "Volume name is required to create a volume");
 }
 
-// we need to skip the slash in the URI path
-// getPath returns /volumeName needs to remove the first slash.
-volumeName = ozoneURI.getPath().substring(1);
-
 if (cmd.hasOption(Shell.VERBOSE)) {
   System.out.printf("Volume name 

[39/50] [abbrv] hadoop git commit: HDDS-238. Add Node2Pipeline Map in SCM to track ratis/standalone pipelines. Contributed by Mukul Kumar Singh.

2018-07-13 Thread botong
HDDS-238. Add Node2Pipeline Map in SCM to track ratis/standalone pipelines. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f3f7222
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f3f7222
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f3f7222

Branch: refs/heads/YARN-7402
Commit: 3f3f72221ffd11cc6bfa0e010e3c5b0e14911102
Parents: f89e265
Author: Xiaoyu Yao 
Authored: Thu Jul 12 22:02:57 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 12 22:14:03 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |  11 ++
 .../hdds/scm/container/ContainerMapping.java|  11 +-
 .../scm/container/ContainerStateManager.java|   6 +
 .../scm/container/states/ContainerStateMap.java |  36 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java| 121 +++
 .../hdds/scm/pipelines/PipelineManager.java |  22 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  24 +++-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  11 +-
 .../standalone/StandaloneManagerImpl.java   |   7 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java | 117 ++
 10 files changed, 343 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 9593717..4074b21 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -456,4 +456,15 @@ public class ContainerInfo implements 
Comparator,
   replicationFactor, replicationType);
 }
   }
+
+  /**
+   * Check if a container is in open state, this will check if the
+   * container is either open or allocated or creating. Any containers in
+   * these states is managed as an open container by SCM.
+   */
+  public boolean isContainerOpen() {
+return state == HddsProtos.LifeCycleState.ALLOCATED ||
+state == HddsProtos.LifeCycleState.CREATING ||
+state == HddsProtos.LifeCycleState.OPEN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index abad32c..26f4d86 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -477,7 +477,7 @@ public class ContainerMapping implements Mapping {
 List
 containerInfos = reports.getReportsList();
 
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
+for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
 containerInfos) {
   byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
   lock.lock();
@@ -498,7 +498,9 @@ public class ContainerMapping implements Mapping {
   containerStore.put(dbKey, newState.toByteArray());
 
   // If the container is closed, then state is already written to SCM
-  Pipeline pipeline = 
pipelineSelector.getPipeline(newState.getPipelineName(), 
newState.getReplicationType());
+  Pipeline pipeline =
+  pipelineSelector.getPipeline(newState.getPipelineName(),
+  newState.getReplicationType());
   if(pipeline == null) {
 pipeline = pipelineSelector
 .getReplicationPipeline(newState.getReplicationType(),
@@ -713,4 +715,9 @@ public class ContainerMapping implements Mapping {
   public MetadataStore getContainerStore() {
 return containerStore;
   }
+
+  @VisibleForTesting
+  public PipelineSelector getPipelineSelector() {
+return pipelineSelector;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 

[41/50] [abbrv] hadoop git commit: HDDS-232. Parallel unit test execution for HDDS/Ozone. Contributed by Arpit Agarwal.

2018-07-13 Thread botong
HDDS-232. Parallel unit test execution for HDDS/Ozone. Contributed by Arpit 
Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1850720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1850720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1850720

Branch: refs/heads/YARN-7402
Commit: d18507209e268aa5be0d3e56cec23de24107e7d9
Parents: 1fe5b93
Author: Nanda kumar 
Authored: Fri Jul 13 19:50:52 2018 +0530
Committer: Nanda kumar 
Committed: Fri Jul 13 19:50:52 2018 +0530

--
 .../common/report/TestReportPublisher.java  |  2 +-
 hadoop-hdds/pom.xml | 49 
 hadoop-ozone/pom.xml| 49 
 3 files changed, 99 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index 026e7aa..d4db55b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -111,7 +111,7 @@ public class TestReportPublisher {
 publisher.init(dummyContext, executorService);
 Thread.sleep(150);
 Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-Thread.sleep(150);
+Thread.sleep(100);
 Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount);
 executorService.shutdown();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 573803b..09fac33 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -116,4 +116,53 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+
+  
+
+  parallel-tests
+  
+
+  
+org.apache.hadoop
+hadoop-maven-plugins
+
+  
+parallel-tests-createdir
+
+  parallel-tests-createdir
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  ${testsThreadCount}
+  false
+  ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
+  
+${testsThreadCount}
+
${test.build.data}/${surefire.forkNumber}
+
${test.build.dir}/${surefire.forkNumber}
+
${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+
${test.build.data}
+
+
+
+
+
+
fork-${surefire.forkNumber}
+  
+
+  
+
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index b655088..e82a3d8 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -178,4 +178,53 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+
+  
+
+  parallel-tests
+  
+
+  
+org.apache.hadoop
+hadoop-maven-plugins
+
+  
+parallel-tests-createdir
+
+  parallel-tests-createdir
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  ${testsThreadCount}
+  false
+  ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
+  
+${testsThreadCount}
+
${test.build.data}/${surefire.forkNumber}
+
${test.build.dir}/${surefire.forkNumber}
+
${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+
${test.build.data}
+
+
+
+
+
+
fork-${surefire.forkNumber}

[34/50] [abbrv] hadoop git commit: HDDS-228. Add the ReplicaMaps to ContainerStateManager. Contributed by Ajay Kumar.

2018-07-13 Thread botong
HDDS-228. Add the ReplicaMaps to ContainerStateManager.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee90efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee90efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee90efe

Branch: refs/heads/YARN-7402
Commit: 5ee90efed385db4bf235816145b30a0f691fc91b
Parents: a08812a
Author: Anu Engineer 
Authored: Thu Jul 12 10:43:24 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 10:43:24 2018 -0700

--
 .../scm/container/ContainerStateManager.java| 34 
 .../scm/container/states/ContainerStateMap.java | 86 
 .../container/TestContainerStateManager.java| 79 ++
 3 files changed, 199 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee90efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 870ab1d..223deac 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -488,4 +489,37 @@ public class ContainerStateManager implements Closeable {
   public void close() throws IOException {
   }
 
+  /**
+   * Returns the latest list of DataNodes where replica for given containerId
+   * exist. Throws an SCMException if no entry is found for given containerId.
+   *
+   * @param containerID
+   * @return Set
+   */
+  public Set getContainerReplicas(ContainerID containerID)
+  throws SCMException {
+return containers.getContainerReplicas(containerID);
+  }
+
+  /**
+   * Add a container Replica for given DataNode.
+   *
+   * @param containerID
+   * @param dn
+   */
+  public void addContainerReplica(ContainerID containerID, DatanodeDetails dn) 
{
+containers.addContainerReplica(containerID, dn);
+  }
+
+  /**
+   * Remove a container Replica for given DataNode.
+   *
+   * @param containerID
+   * @param dn
+   * @return True of dataNode is removed successfully else false.
+   */
+  public boolean removeContainerReplica(ContainerID containerID,
+  DatanodeDetails dn) throws SCMException {
+return containers.removeContainerReplica(containerID, dn);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee90efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index c23b1fd..1c92861 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,13 +18,18 @@
 
 package org.apache.hadoop.hdds.scm.container.states;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -83,6 +88,8 @@ public class ContainerStateMap {
   private final ContainerAttribute typeMap;
 
   private final Map containerMap;
+  // Map to hold replicas of given container.
+  

[15/50] [abbrv] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into HDDS-48

2018-07-13 Thread botong
Merge remote-tracking branch 'apache/trunk' into HDDS-48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bd5bef2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bd5bef2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bd5bef2

Branch: refs/heads/YARN-7402
Commit: 9bd5bef297b036b19f7be0c42c5477808ef8c070
Parents: 3584baf 2403231
Author: Arpit Agarwal 
Authored: Mon Jul 9 13:22:58 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Jul 9 13:22:58 2018 -0700

--
 .../hadoop-common/src/main/conf/hadoop-env.sh   |   6 +-
 .../src/main/conf/hadoop-metrics2.properties|   2 +-
 .../crypto/key/kms/KMSClientProvider.java   |   4 +-
 .../src/main/conf/kms-log4j.properties  |   4 +-
 .../src/test/resources/log4j.properties |   4 +-
 hadoop-hdds/framework/pom.xml   |   5 +
 .../hadoop/hdds/server/events/EventQueue.java   | 108 --
 .../hadoop/hdds/server/events/EventWatcher.java |  43 +-
 .../hdds/server/events/EventWatcherMetrics.java |  79 ++
 .../server/events/SingleThreadExecutor.java |  35 +++--
 .../hdds/server/events/TestEventQueue.java  |  35 +
 .../hdds/server/events/TestEventWatcher.java| 107 --
 .../hadoop/yarn/client/AMRMClientUtils.java |  91 
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   9 +-
 .../yarn/server/uam/UnmanagedAMPoolManager.java |  16 ++
 .../server/uam/UnmanagedApplicationManager.java |  40 ++---
 .../yarn/server/MockResourceManagerFacade.java  |  13 +-
 .../amrmproxy/FederationInterceptor.java| 146 ---
 .../amrmproxy/BaseAMRMProxyTest.java|   2 +
 .../amrmproxy/TestFederationInterceptor.java|  17 +++
 20 files changed, 515 insertions(+), 251 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event and corresponding event handlers in SCM. Contributed by Nanda Kumar.

2018-07-13 Thread botong
HDDS-242. Introduce NEW_NODE, STALE_NODE and DEAD_NODE event
and corresponding event handlers in SCM.
Contributed by Nanda Kumar.

Recommitting after making sure that patch is clean.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/632aca57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/632aca57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/632aca57

Branch: refs/heads/YARN-7402
Commit: 632aca5793d391c741c0bce3d2e70ae6e03fe306
Parents: b567858
Author: Anu Engineer 
Authored: Wed Jul 11 12:08:50 2018 -0700
Committer: Anu Engineer 
Committed: Wed Jul 11 12:08:50 2018 -0700

--
 .../container/CloseContainerEventHandler.java   |  7 ++-
 .../hdds/scm/container/ContainerMapping.java|  5 --
 .../scm/container/ContainerReportHandler.java   | 47 ++
 .../hadoop/hdds/scm/container/Mapping.java  |  9 +---
 .../scm/container/closer/ContainerCloser.java   |  1 -
 .../hadoop/hdds/scm/events/SCMEvents.java   | 22 +
 .../hadoop/hdds/scm/node/DatanodeInfo.java  | 11 +
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   | 42 
 .../hadoop/hdds/scm/node/NewNodeHandler.java| 50 +++
 .../hadoop/hdds/scm/node/NodeManager.java   |  4 +-
 .../hadoop/hdds/scm/node/NodeReportHandler.java | 42 
 .../hadoop/hdds/scm/node/NodeStateManager.java  | 32 +++-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 24 ++---
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  | 42 
 .../server/SCMDatanodeHeartbeatDispatcher.java  | 20 ++--
 .../scm/server/SCMDatanodeProtocolServer.java   | 18 ++-
 .../scm/server/StorageContainerManager.java | 51 +++-
 .../hdds/scm/container/MockNodeManager.java |  9 
 .../TestCloseContainerEventHandler.java |  2 +
 .../hdds/scm/node/TestContainerPlacement.java   | 12 -
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 11 -
 .../TestSCMDatanodeHeartbeatDispatcher.java |  8 ++-
 .../testutils/ReplicationNodeManagerMock.java   |  7 +++
 23 files changed, 417 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/632aca57/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index f1053d5..859e5d5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -25,9 +25,12 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
+
 /**
  * In case of a node failure, volume failure, volume out of spapce, node
  * out of space etc, CLOSE_CONTAINER will be triggered.
@@ -73,9 +76,11 @@ public class CloseContainerEventHandler implements 
EventHandler {
 if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
   for (DatanodeDetails datanode :
   containerWithPipeline.getPipeline().getMachines()) {
-
containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
+CommandForDatanode closeContainerCommand = new CommandForDatanode<>(
+datanode.getUuid(),
 new CloseContainerCommand(containerID.getId(),
 info.getReplicationType()));
+publisher.fireEvent(DATANODE_COMMAND, closeContainerCommand);
   }
   try {
 // Finalize event will make sure the state of the container transitions

http://git-wip-us.apache.org/repos/asf/hadoop/blob/632aca57/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index e25c5b4..abad32c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 

[49/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)

2018-07-13 Thread botong
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). 
(Contributed by Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43b8c2da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43b8c2da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43b8c2da

Branch: refs/heads/YARN-7402
Commit: 43b8c2daa0dd71c9b9934ad4b5086a81fae1e58a
Parents: 103f2ee
Author: Carlo Curino 
Authored: Thu Jan 18 17:21:06 2018 -0800
Committer: Botong Huang 
Committed: Fri Jul 13 17:42:58 2018 -0700

--
 hadoop-project/pom.xml  |   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|  55 +---
 .../hadoop-yarn/conf/yarn-env.sh|  12 ++
 .../pom.xml |  98 +
 .../globalpolicygenerator/GPGContext.java   |  31 +
 .../globalpolicygenerator/GPGContextImpl.java   |  41 ++
 .../GlobalPolicyGenerator.java  | 136 +++
 .../globalpolicygenerator/package-info.java |  19 +++
 .../TestGlobalPolicyGenerator.java  |  38 ++
 .../hadoop-yarn/hadoop-yarn-server/pom.xml  |   1 +
 hadoop-yarn-project/pom.xml |   4 +
 12 files changed, 424 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43b8c2da/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 387a3da..ede6af4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -446,6 +446,12 @@
 
   
 org.apache.hadoop
+hadoop-yarn-server-globalpolicygenerator
+${project.version}
+  
+
+  
+org.apache.hadoop
 hadoop-yarn-services-core
 ${hadoop.version}
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43b8c2da/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 69afe6f..8061859 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -39,6 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "container" client "prints container(s) report"
   hadoop_add_subcommand "daemonlog" admin "get/set the log level for each 
daemon"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
+  hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy 
Generator"
   hadoop_add_subcommand "jar " client "run a jar file"
   hadoop_add_subcommand "logs" client "dump container logs"
   hadoop_add_subcommand "node" admin "prints node report(s)"
@@ -103,6 +104,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
   echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
   exit 0
 ;;
+globalpolicygenerator)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator'
+;;
 jar)
   HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43b8c2da/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index e1ac112..bebfd71 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -134,6 +134,10 @@ if "%1" == "--loglevel" (
 set 
CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes
   )
 
+  if exist 
%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes 
(
+set 
CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes
+  )
+
   if exist %HADOOP_YARN_HOME%\build\test\classes (
 set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
   )
@@ -155,7 +159,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt container node queue logs daemonlog 
historyserver ^
- timelineserver timelinereader router classpath
+ timelineserver timelinereader router globalpolicygenerator classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -259,7 +263,13 @@ goto :eof
 :router
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.router.Router
- 

[38/50] [abbrv] hadoop git commit: HDDS-187. Command status publisher for datanode. Contributed by Ajay Kumar.

2018-07-13 Thread botong
HDDS-187. Command status publisher for datanode.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89e2659
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89e2659
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89e2659

Branch: refs/heads/YARN-7402
Commit: f89e265905f39c8e51263a3946a8b8e6ab4ebad9
Parents: 87eeb26
Author: Anu Engineer 
Authored: Thu Jul 12 21:34:32 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 21:35:12 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   8 +
 .../org/apache/hadoop/hdds/HddsIdFactory.java   |  53 ++
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../apache/hadoop/utils/TestHddsIdFactory.java  |  77 +
 .../report/CommandStatusReportPublisher.java|  71 
 .../common/report/ReportPublisher.java  |   9 +
 .../common/report/ReportPublisherFactory.java   |   4 +
 .../statemachine/DatanodeStateMachine.java  |   2 +
 .../common/statemachine/StateContext.java   |  70 
 .../CloseContainerCommandHandler.java   |   5 +-
 .../commandhandler/CommandHandler.java  |  11 ++
 .../DeleteBlocksCommandHandler.java | 166 ++-
 .../ReplicateContainerCommandHandler.java   |   7 +-
 .../commands/CloseContainerCommand.java |  36 ++--
 .../ozone/protocol/commands/CommandStatus.java  | 141 
 .../protocol/commands/DeleteBlocksCommand.java  |  13 +-
 .../commands/ReplicateContainerCommand.java |  20 ++-
 .../protocol/commands/ReregisterCommand.java|  10 ++
 .../ozone/protocol/commands/SCMCommand.java |  19 +++
 .../StorageContainerDatanodeProtocol.proto  |  21 +++
 .../ozone/container/common/ScmTestMock.java |  33 +++-
 .../common/report/TestReportPublisher.java  |  75 -
 .../hadoop/hdds/scm/events/SCMEvents.java   |  57 ---
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  23 ++-
 .../TestSCMDatanodeHeartbeatDispatcher.java |  25 ++-
 .../ozone/container/common/TestEndPoint.java| 111 -
 26 files changed, 935 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89e2659/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..8b449fb 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,7 +17,15 @@
  */
 package org.apache.hadoop.hdds;
 
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+/**
+ * Config class for HDDS.
+ */
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
+  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
+  "hdds.command.status.report.interval";
+  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
+  ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89e2659/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
new file mode 100644
index 000..b244b8c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds;
+
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * HDDS Id generator.
+ */
+public final class HddsIdFactory {
+  private HddsIdFactory() {
+  }
+
+  private static final AtomicLong 

[18/50] [abbrv] hadoop git commit: HADOOP-15541. [s3a] Shouldn't try to drain stream before aborting connection in case of timeout.

2018-07-13 Thread botong
HADOOP-15541. [s3a] Shouldn't try to drain stream before aborting
connection in case of timeout.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d503f65b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d503f65b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d503f65b

Branch: refs/heads/YARN-7402
Commit: d503f65b6689b19278ec2a0cf9da5a8762539de8
Parents: 705e2c1
Author: Sean Mackrory 
Authored: Thu Jul 5 13:52:00 2018 -0600
Committer: Sean Mackrory 
Committed: Tue Jul 10 17:52:57 2018 +0200

--
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 24 +---
 1 file changed, 16 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d503f65b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 440739d..68f98e4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -36,6 +36,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.net.SocketTimeoutException;
 
 import static org.apache.commons.lang3.StringUtils.isNotEmpty;
 
@@ -155,11 +156,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
* @throws IOException on any failure to open the object
*/
   @Retries.OnceTranslated
-  private synchronized void reopen(String reason, long targetPos, long length)
-  throws IOException {
+  private synchronized void reopen(String reason, long targetPos, long length,
+  boolean forceAbort) throws IOException {
 
 if (wrappedStream != null) {
-  closeStream("reopen(" + reason + ")", contentRangeFinish, false);
+  closeStream("reopen(" + reason + ")", contentRangeFinish, forceAbort);
 }
 
 contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
@@ -324,7 +325,7 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
 
   //re-open at specific location if needed
   if (wrappedStream == null) {
-reopen("read from new offset", targetPos, len);
+reopen("read from new offset", targetPos, len, false);
   }
 });
   }
@@ -367,8 +368,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
 b = wrappedStream.read();
   } catch (EOFException e) {
 return -1;
+  } catch (SocketTimeoutException e) {
+onReadFailure(e, 1, true);
+b = wrappedStream.read();
   } catch (IOException e) {
-onReadFailure(e, 1);
+onReadFailure(e, 1, false);
 b = wrappedStream.read();
   }
   return b;
@@ -393,12 +397,13 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
* @throws IOException any exception thrown on the re-open attempt.
*/
   @Retries.OnceTranslated
-  private void onReadFailure(IOException ioe, int length) throws IOException {
+  private void onReadFailure(IOException ioe, int length, boolean forceAbort)
+  throws IOException {
 
 LOG.info("Got exception while trying to read from stream {}" +
 " trying to recover: " + ioe, uri);
 streamStatistics.readException();
-reopen("failure recovery", pos, length);
+reopen("failure recovery", pos, length, forceAbort);
   }
 
   /**
@@ -446,8 +451,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   } catch (EOFException e) {
 // the base implementation swallows EOFs.
 return -1;
+  } catch (SocketTimeoutException e) {
+onReadFailure(e, len, true);
+bytes = wrappedStream.read(buf, off, len);
   } catch (IOException e) {
-onReadFailure(e, len);
+onReadFailure(e, len, false);
 bytes= wrappedStream.read(buf, off, len);
   }
   return bytes;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

2018-07-13 Thread botong
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bbe70ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bbe70ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bbe70ce

Branch: refs/heads/YARN-7402
Commit: 0bbe70ced1a3a895473436e5f7d328e373b1d4ca
Parents: fa3ee34
Author: Botong Huang 
Authored: Fri Mar 23 17:07:10 2018 -0700
Committer: Botong Huang 
Committed: Fri Jul 13 17:42:58 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  36 +-
 .../src/main/resources/yarn-default.xml |  40 +++
 .../utils/FederationStateStoreFacade.java   |  13 +
 .../pom.xml |  18 +
 .../globalpolicygenerator/GPGContext.java   |   4 +
 .../globalpolicygenerator/GPGContextImpl.java   |  10 +
 .../globalpolicygenerator/GPGPolicyFacade.java  | 220 
 .../server/globalpolicygenerator/GPGUtils.java  |  80 +
 .../GlobalPolicyGenerator.java  |  17 +
 .../policygenerator/GlobalPolicy.java   |  76 +
 .../policygenerator/NoOpGlobalPolicy.java   |  36 ++
 .../policygenerator/PolicyGenerator.java| 261 ++
 .../UniformWeightedLocalityGlobalPolicy.java|  71 
 .../policygenerator/package-info.java   |  24 ++
 .../TestGPGPolicyFacade.java| 202 +++
 .../policygenerator/TestPolicyGenerator.java| 338 +++
 .../src/test/resources/schedulerInfo1.json  | 134 
 .../src/test/resources/schedulerInfo2.json  | 196 +++
 18 files changed, 1775 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbe70ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b3a4ccb..fe7cb8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3335,7 +3335,7 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
   false;
 
-  private static final String FEDERATION_GPG_PREFIX =
+  public static final String FEDERATION_GPG_PREFIX =
   FEDERATION_PREFIX + "gpg.";
 
   // The number of threads to use for the GPG scheduled executor service
@@ -3353,6 +3353,40 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
 
+  public static final String FEDERATION_GPG_POLICY_PREFIX =
+  FEDERATION_GPG_PREFIX + "policy.generator.";
+
+  /** The interval at which the policy generator runs, default is one hour. */
+  public static final String GPG_POLICY_GENERATOR_INTERVAL_MS =
+  FEDERATION_GPG_POLICY_PREFIX + "interval-ms";
+  public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1;
+
+  /**
+   * The configured policy generator class, runs NoOpGlobalPolicy by
+   * default.
+   */
+  public static final String GPG_GLOBAL_POLICY_CLASS =
+  FEDERATION_GPG_POLICY_PREFIX + "class";
+  public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS =
+  "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator."
+  + "NoOpGlobalPolicy";
+
+  /**
+   * Whether or not the policy generator is running in read only (won't modify
+   * policies), default is false.
+   */
+  public static final String GPG_POLICY_GENERATOR_READONLY =
+  FEDERATION_GPG_POLICY_PREFIX + "readonly";
+  public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY =
+  false;
+
+  /**
+   * Which sub-clusters the policy generator should blacklist.
+   */
+  public static final String GPG_POLICY_GENERATOR_BLACKLIST =
+  FEDERATION_GPG_POLICY_PREFIX + "blacklist";
+
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbe70ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 

[19/50] [abbrv] hadoop git commit: YARN-8502. Use path strings consistently for webservice endpoints in RMWebServices. Contributed by Szilard Nemeth.

2018-07-13 Thread botong
YARN-8502. Use path strings consistently for webservice endpoints in 
RMWebServices. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82ac3aa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82ac3aa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82ac3aa6

Branch: refs/heads/YARN-7402
Commit: 82ac3aa6d0a83235cfac2805a444dd26efe5f9ce
Parents: d503f65
Author: Giovanni Matteo Fumarola 
Authored: Tue Jul 10 10:36:17 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Tue Jul 10 10:36:17 2018 -0700

--
 .../hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java  | 3 +++
 .../yarn/server/resourcemanager/webapp/RMWebServices.java  | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ac3aa6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 29ae81b..9822878 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -42,6 +42,9 @@ public final class RMWSConsts {
   /** Path for {@code RMWebServiceProtocol#getSchedulerInfo}. */
   public static final String SCHEDULER = "/scheduler";
 
+  /** Path for {@code RMWebServices#updateSchedulerConfiguration}. */
+  public static final String SCHEDULER_CONF = "/scheduler-conf";
+
   /** Path for {@code RMWebServiceProtocol#dumpSchedulerLogs}. */
   public static final String SCHEDULER_LOGS = "/scheduler/logs";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ac3aa6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 864653c..15b58d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -955,7 +955,7 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
   }
 
   @GET
-  @Path("/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}")
+  @Path(RMWSConsts.GET_CONTAINER)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
   MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
@@ -969,7 +969,7 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
   }
 
   @GET
-  @Path("/apps/{appid}/state")
+  @Path(RMWSConsts.APPS_APPID_STATE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
   MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
@@ -2422,7 +2422,7 @@ public class RMWebServices extends WebServices implements 
RMWebServiceProtocol {
   }
 
   @PUT
-  @Path("/scheduler-conf")
+  @Path(RMWSConsts.SCHEDULER_CONF)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
   MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-13723. Occasional "Should be different group" error in TestRefreshUserMappings#testGroupMappingRefresh. Contributed by Siyao Meng.

2018-07-13 Thread botong
HDFS-13723. Occasional "Should be different group" error in 
TestRefreshUserMappings#testGroupMappingRefresh. Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/162228e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/162228e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/162228e8

Branch: refs/heads/YARN-7402
Commit: 162228e8db937d4bdb9cf61d15ed555f1c96368f
Parents: d36ed94
Author: Wei-Chiu Chuang 
Authored: Wed Jul 11 10:02:08 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jul 11 10:02:08 2018 -0700

--
 .../java/org/apache/hadoop/security/Groups.java  |  5 -
 .../hadoop/security/TestRefreshUserMappings.java | 19 +--
 2 files changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/162228e8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index ad09865..63ec9a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -73,7 +73,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class Groups {
-  private static final Logger LOG = LoggerFactory.getLogger(Groups.class);
+  @VisibleForTesting
+  static final Logger LOG = LoggerFactory.getLogger(Groups.class);
   
   private final GroupMappingServiceProvider impl;
 
@@ -308,6 +309,7 @@ public class Groups {
  */
 @Override
 public List load(String user) throws Exception {
+  LOG.debug("GroupCacheLoader - load.");
   TraceScope scope = null;
   Tracer tracer = Tracer.curThreadTracer();
   if (tracer != null) {
@@ -346,6 +348,7 @@ public class Groups {
 public ListenableFuture> reload(final String key,
  List oldValue)
 throws Exception {
+  LOG.debug("GroupCacheLoader - reload (async).");
   if (!reloadGroupsInBackground) {
 return super.reload(key, oldValue);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/162228e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index f511eb1..0e7dfc3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -93,6 +95,8 @@ public class TestRefreshUserMappings {
 FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
 cluster = new MiniDFSCluster.Builder(config).build();
 cluster.waitActive();
+
+GenericTestUtils.setLogLevel(Groups.LOG, Level.DEBUG);
   }
 
   @After
@@ -114,21 +118,24 @@ public class TestRefreshUserMappings {
 String [] args =  new String[]{"-refreshUserToGroupsMappings"};
 Groups groups = Groups.getUserToGroupsMappingService(config);
 String user = UserGroupInformation.getCurrentUser().getUserName();
-System.out.println("first attempt:");
+
+System.out.println("First attempt:");
 List g1 = groups.getGroups(user);
 String [] str_groups = new String [g1.size()];
 g1.toArray(str_groups);
 System.out.println(Arrays.toString(str_groups));
 
-System.out.println("second attempt, should be same:");
+System.out.println("Second attempt, should be the same:");
 List g2 = groups.getGroups(user);
 g2.toArray(str_groups);
 System.out.println(Arrays.toString(str_groups));
 for(int i=0; i g3 = groups.getGroups(user);
 g3.toArray(str_groups);
 System.out.println(Arrays.toString(str_groups));
@@ -137,9 +144,9 @@ public class TestRefreshUserMappings {
  

[42/50] [abbrv] hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread botong
YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17118f44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17118f44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17118f44

Branch: refs/heads/YARN-7402
Commit: 17118f446c2387aa796849da8b69a845d9d307d3
Parents: d185072
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:05:25 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17118f44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 2099ace..6ab522f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -112,6 +113,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-13722. HDFS Native Client Fails Compilation on Ubuntu 18.04 (contributed by Jack Bearden)

2018-07-13 Thread botong
HDFS-13722. HDFS Native Client Fails Compilation on Ubuntu 18.04 (contributed 
by Jack Bearden)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d0f01e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d0f01e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d0f01e1

Branch: refs/heads/YARN-7402
Commit: 5d0f01e1fe988616d53120bad0cb69a825a4dde0
Parents: 82ac3aa
Author: Allen Wittenauer 
Authored: Tue Jul 10 12:17:44 2018 -0700
Committer: Allen Wittenauer 
Committed: Tue Jul 10 12:17:44 2018 -0700

--
 .../src/main/native/libhdfspp/lib/rpc/request.cc   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d0f01e1/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc
index 9157476..2de26fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-
+#include 
 #include "request.h"
 #include "rpc_engine.h"
 #include "sasl_protocol.h"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDDS-234. Add SCM node report handler. Contributed by Ajay Kumar.

2018-07-13 Thread botong
HDDS-234. Add SCM node report handler.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/556d9b36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/556d9b36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/556d9b36

Branch: refs/heads/YARN-7402
Commit: 556d9b36be4b0b759646b8f6030c9e693b97bdb8
Parents: 5ee90ef
Author: Anu Engineer 
Authored: Thu Jul 12 12:09:31 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 12:09:31 2018 -0700

--
 .../hadoop/hdds/scm/node/NodeManager.java   |  9 ++
 .../hadoop/hdds/scm/node/NodeReportHandler.java | 19 +++-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 11 +++
 .../hdds/scm/container/MockNodeManager.java | 11 +++
 .../hdds/scm/node/TestNodeReportHandler.java| 95 
 .../testutils/ReplicationNodeManagerMock.java   | 10 +++
 6 files changed, 152 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/556d9b36/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 5e2969d..deb1628 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -138,4 +139,12 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
* @param command
*/
   void addDatanodeCommand(UUID dnId, SCMCommand command);
+
+  /**
+   * Process node report.
+   *
+   * @param dnUuid
+   * @param nodeReport
+   */
+  void processNodeReport(UUID dnUuid, NodeReportProto nodeReport);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/556d9b36/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
index aa78d53..331bfed 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,25 +18,38 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .NodeReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Handles Node Reports from datanode.
  */
 public class NodeReportHandler implements EventHandler 
{
 
+  private static final Logger LOGGER = LoggerFactory
+  .getLogger(NodeReportHandler.class);
   private final NodeManager nodeManager;
 
   public NodeReportHandler(NodeManager nodeManager) {
+Preconditions.checkNotNull(nodeManager);
 this.nodeManager = nodeManager;
   }
 
   @Override
   public void onMessage(NodeReportFromDatanode nodeReportFromDatanode,
-EventPublisher publisher) {
-//TODO: process node report.
+  EventPublisher publisher) {
+Preconditions.checkNotNull(nodeReportFromDatanode);
+DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails();
+Preconditions.checkNotNull(dn, "NodeReport is "
++ "missing DatanodeDetails.");
+LOGGER.trace("Processing node report for dn: {}", dn);
+nodeManager
+.processNodeReport(dn.getUuid(), nodeReportFromDatanode.getReport());
   }
 }


[48/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.

2018-07-13 Thread botong
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by 
Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a70835e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a70835e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a70835e

Branch: refs/heads/YARN-7402
Commit: 8a70835ecb3c55ca6f78fc5b658131829f01657a
Parents: 0bbe70c
Author: Botong Huang 
Authored: Wed May 23 12:45:32 2018 -0700
Committer: Botong Huang 
Committed: Fri Jul 13 17:42:58 2018 -0700

--
 .../server/globalpolicygenerator/GPGUtils.java  | 31 +---
 1 file changed, 20 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a70835e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
index 429bec4..31cee1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import static javax.servlet.http.HttpServletResponse.SC_OK;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
  * GPGUtils contains utility functions for the GPG.
@@ -53,15 +54,23 @@ public final class GPGUtils {
 T obj = null;
 
 WebResource webResource = client.resource(webAddr);
-ClientResponse response = webResource.path("ws/v1/cluster").path(path)
-.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-if (response.getStatus() == HttpServletResponse.SC_OK) {
-  obj = response.getEntity(returnType);
-} else {
-  throw new YarnRuntimeException("Bad response from remote web service: "
-  + response.getStatus());
+ClientResponse response = null;
+try {
+  response = webResource.path("ws/v1/cluster").path(path)
+  .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+  if (response.getStatus() == SC_OK) {
+obj = response.getEntity(returnType);
+  } else {
+throw new YarnRuntimeException(
+"Bad response from remote web service: " + response.getStatus());
+  }
+  return obj;
+} finally {
+  if (response != null) {
+response.close();
+  }
+  client.destroy();
 }
-return obj;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDDS-253. SCMBlockDeletingService should publish events for delete blocks to EventQueue. Contributed by Lokesh Jain.

2018-07-13 Thread botong
HDDS-253. SCMBlockDeletingService should publish events for delete blocks to 
EventQueue. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fe5b938
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fe5b938
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fe5b938

Branch: refs/heads/YARN-7402
Commit: 1fe5b938435ab49e40cffa66f4dd16ddf1592405
Parents: 3f3f722
Author: Nanda kumar 
Authored: Fri Jul 13 17:18:42 2018 +0530
Committer: Nanda kumar 
Committed: Fri Jul 13 17:18:42 2018 +0530

--
 .../apache/hadoop/hdds/scm/block/BlockManagerImpl.java | 10 ++
 .../hadoop/hdds/scm/block/SCMBlockDeletingService.java | 13 +
 .../hdds/scm/server/StorageContainerManager.java   |  2 +-
 .../apache/hadoop/hdds/scm/block/TestBlockManager.java |  2 +-
 .../apache/hadoop/ozone/scm/TestContainerSQLCli.java   |  3 +--
 5 files changed, 18 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fe5b938/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 953f71e..6825ca4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -87,10 +88,12 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
* @param conf - configuration.
* @param nodeManager - node manager.
* @param containerManager - container manager.
+   * @param eventPublisher - event publisher.
* @throws IOException
*/
   public BlockManagerImpl(final Configuration conf,
-  final NodeManager nodeManager, final Mapping containerManager)
+  final NodeManager nodeManager, final Mapping containerManager,
+  EventPublisher eventPublisher)
   throws IOException {
 this.nodeManager = nodeManager;
 this.containerManager = containerManager;
@@ -120,9 +123,8 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
 OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
 TimeUnit.MILLISECONDS);
 blockDeletingService =
-new SCMBlockDeletingService(
-deletedBlockLog, containerManager, nodeManager, svcInterval,
-serviceTimeout, conf);
+new SCMBlockDeletingService(deletedBlockLog, containerManager,
+nodeManager, eventPublisher, svcInterval, serviceTimeout, conf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fe5b938/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 2c555e0..6f65fdd 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -20,11 +20,14 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.util.Time;
 import 

[10/50] [abbrv] hadoop git commit: HADOOP-15568. fix some typos in the .sh comments. Contributed by Steve Loughran.

2018-07-13 Thread botong
HADOOP-15568. fix some typos in the .sh comments. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a08ddfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a08ddfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a08ddfa

Branch: refs/heads/YARN-7402
Commit: 4a08ddfa68a405bfd97ffd96fafc1e3d48d20d7e
Parents: ea9b608
Author: Akira Ajisaka 
Authored: Mon Jul 9 15:43:38 2018 -0400
Committer: Akira Ajisaka 
Committed: Mon Jul 9 15:43:38 2018 -0400

--
 .../hadoop-common/src/main/conf/hadoop-env.sh  | 6 +++---
 .../hadoop-common/src/main/conf/hadoop-metrics2.properties | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a08ddfa/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 3826f67..6db085a 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -88,7 +88,7 @@
 # Extra Java runtime options for all Hadoop commands. We don't support
 # IPv6 yet/still, so by default the preference is set to IPv4.
 # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-# For Kerberos debugging, an extended option set logs more invormation
+# For Kerberos debugging, an extended option set logs more information
 # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true 
-Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
 
 # Some parts of the shell code may do special things dependent upon
@@ -120,9 +120,9 @@ esac
 #
 # By default, Apache Hadoop overrides Java's CLASSPATH
 # environment variable.  It is configured such
-# that it sarts out blank with new entries added after passing
+# that it starts out blank with new entries added after passing
 # a series of checks (file/dir exists, not already listed aka
-# de-deduplication).  During de-depulication, wildcards and/or
+# de-deduplication).  During de-deduplication, wildcards and/or
 # directories are *NOT* expanded to keep it simple. Therefore,
 # if the computed classpath has two specific mentions of
 # awesome-methods-1.0.jar, only the first one added will be seen.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a08ddfa/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
index 16fdcf0..f061313 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
@@ -47,7 +47,7 @@
 #*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
 
 # Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifiying multiple tags separate them with 
+# If '*' all tags are used. If specifying multiple tags separate them with
 # commas. Note that the last segment of the property name is the context name.
 #
 # A typical use of tags is separating the metrics by the HDFS rpc port


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-8512. ATSv2 entities are not published to HBase from second attempt onwards. Contributed by Rohith Sharma K S.

2018-07-13 Thread botong
YARN-8512. ATSv2 entities are not published to HBase from second attempt 
onwards. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f1d3d0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f1d3d0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f1d3d0e

Branch: refs/heads/YARN-7402
Commit: 7f1d3d0e9dbe328fae0d43421665e0b6907b33fe
Parents: a47ec5d
Author: Sunil G 
Authored: Wed Jul 11 12:26:32 2018 +0530
Committer: Sunil G 
Committed: Wed Jul 11 12:26:32 2018 +0530

--
 .../containermanager/ContainerManagerImpl.java  |  69 
 .../application/ApplicationImpl.java|   7 +-
 .../BaseContainerManagerTest.java   |  25 +
 .../TestContainerManagerRecovery.java   | 106 +--
 4 files changed, 180 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f1d3d0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 3470910..ad63720 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1102,24 +1102,8 @@ public class ContainerManagerImpl extends 
CompositeService implements
   // Create the application
   // populate the flow context from the launch context if the timeline
   // service v.2 is enabled
-  FlowContext flowContext = null;
-  if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-String flowName = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
-String flowVersion = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
-String flowRunIdStr = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
-long flowRunId = 0L;
-if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
-  flowRunId = Long.parseLong(flowRunIdStr);
-}
-flowContext = new FlowContext(flowName, flowVersion, flowRunId);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Flow context: " + flowContext
-  + " created for an application " + applicationID);
-}
-  }
+  FlowContext flowContext =
+  getFlowContext(launchContext, applicationID);
 
   Application application =
   new ApplicationImpl(dispatcher, user, flowContext,
@@ -1138,6 +1122,31 @@ public class ContainerManagerImpl extends 
CompositeService implements
 dispatcher.getEventHandler().handle(new ApplicationInitEvent(
 applicationID, appAcls, logAggregationContext));
   }
+} else if (containerTokenIdentifier.getContainerType()
+== ContainerType.APPLICATION_MASTER) {
+  FlowContext flowContext =
+  getFlowContext(launchContext, applicationID);
+  if (flowContext != null) {
+ApplicationImpl application =
+(ApplicationImpl) context.getApplications().get(applicationID);
+
+// update flowContext reference in ApplicationImpl
+application.setFlowContext(flowContext);
+
+// Required to update state store for recovery.
+context.getNMStateStore().storeApplication(applicationID,
+buildAppProto(applicationID, user, credentials,
+container.getLaunchContext().getApplicationACLs(),
+containerTokenIdentifier.getLogAggregationContext(),
+flowContext));
+
+LOG.info(
+"Updated application reference with flowContext " + flowContext
++ " for app " + applicationID);
+  } else {
+LOG.info("TimelineService V2.0 is not enabled. Skipping updating "
++ "flowContext for application " + applicationID);
+ 

[31/50] [abbrv] hadoop git commit: HDFS-12837. Intermittent failure in TestReencryptionWithKMS.

2018-07-13 Thread botong
HDFS-12837. Intermittent failure in TestReencryptionWithKMS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b37074be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b37074be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b37074be

Branch: refs/heads/YARN-7402
Commit: b37074be5ab35c238e18bb9c3b89db6d7f8d0986
Parents: 632aca5
Author: Xiao Chen 
Authored: Wed Jul 11 20:54:37 2018 -0700
Committer: Xiao Chen 
Committed: Wed Jul 11 21:03:19 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  4 +-
 .../hdfs/server/namenode/TestReencryption.java  | 61 +++-
 2 files changed, 37 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37074be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 5b52c82..b92fe9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -616,7 +616,9 @@ public class ReencryptionHandler implements Runnable {
   while (shouldPauseForTesting) {
 LOG.info("Sleeping in the re-encrypt handler for unit test.");
 synchronized (reencryptionHandler) {
-  reencryptionHandler.wait(3);
+  if (shouldPauseForTesting) {
+reencryptionHandler.wait(3);
+  }
 }
 LOG.info("Continuing re-encrypt handler after pausing.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37074be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 5409f0d..5d34d3c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -68,6 +68,7 @@ import static 
org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -207,8 +208,7 @@ public class TestReencryption {
 ZoneReencryptionStatus zs = it.next();
 assertEquals(zone.toString(), zs.getZoneName());
 assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
-assertTrue(zs.getCompletionTime() > 0);
-assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+verifyZoneCompletionTime(zs);
 assertNotEquals(fei0.getEzKeyVersionName(), zs.getEzKeyVersionName());
 assertEquals(fei1.getEzKeyVersionName(), zs.getEzKeyVersionName());
 assertEquals(10, zs.getFilesReencrypted());
@@ -600,14 +600,27 @@ public class TestReencryption {
 final ZoneReencryptionStatus zs = it.next();
 assertEquals(zone.toString(), zs.getZoneName());
 assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
-assertTrue(zs.getCompletionTime() > 0);
-assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+verifyZoneCompletionTime(zs);
 if (fei != null) {
   assertNotEquals(fei.getEzKeyVersionName(), zs.getEzKeyVersionName());
 }
 assertEquals(expectedFiles, zs.getFilesReencrypted());
   }
 
+  /**
+   * Verify the zone status' completion time is larger than 0, and is no less
+   * than submission time.
+   */
+  private void verifyZoneCompletionTime(final ZoneReencryptionStatus zs) {
+assertNotNull(zs);
+assertTrue("Completion time should be positive. " + zs.getCompletionTime(),
+zs.getCompletionTime() > 0);
+assertTrue("Completion time " + zs.getCompletionTime()
++ " should be no less than submission time "
++ zs.getSubmissionTime(),
+zs.getCompletionTime() >= zs.getSubmissionTime());
+  }
+
   @Test
   public void testReencryptLoadedFromFsimage() throws Exception {
 /*
@@ -1476,7 

[26/50] [abbrv] hadoop git commit: HADOOP-15594. Exclude commons-lang3 from hadoop-client-minicluster. Contributed by Takanobu Asanuma.

2018-07-13 Thread botong
HADOOP-15594. Exclude commons-lang3 from hadoop-client-minicluster. Contributed 
by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d36ed94e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d36ed94e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d36ed94e

Branch: refs/heads/YARN-7402
Commit: d36ed94ee06945fe9122970b196968fd1c997dcc
Parents: 2ae13d4
Author: Akira Ajisaka 
Authored: Wed Jul 11 10:53:08 2018 -0400
Committer: Akira Ajisaka 
Committed: Wed Jul 11 10:53:08 2018 -0400

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36ed94e/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 6fa24b4..490281a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -167,6 +167,10 @@
   commons-io
 
 
+  org.apache.commons
+  commons-lang3
+
+
   commons-logging
   commons-logging
 
@@ -492,6 +496,10 @@
   commons-codec
 
 
+  org.apache.commons
+  commons-lang3
+
+
   commons-logging
   commons-logging
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: Merge trunk into HDDS-48

2018-07-13 Thread botong
Merge trunk into HDDS-48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c275a9a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c275a9a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c275a9a6

Branch: refs/heads/YARN-7402
Commit: c275a9a6a07b2bd889bdba4d05b420027f430b34
Parents: 44e19fc 83cd84b
Author: Bharat Viswanadham 
Authored: Mon Jul 9 12:13:03 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Jul 9 12:13:03 2018 -0700

--
 .gitignore  |4 +
 dev-support/bin/ozone-dist-layout-stitching |2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |6 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |6 +-
 .../org/apache/hadoop/conf/Configuration.java   |  458 +++---
 .../java/org/apache/hadoop/fs/FileContext.java  |9 +-
 .../org/apache/hadoop/fs/LocalDirAllocator.java |7 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |   39 +-
 .../org/apache/hadoop/fs/TestFileContext.java   |   44 +-
 .../apache/hadoop/fs/TestLocalDirAllocator.java |   59 +
 .../src/main/compose/ozone/docker-compose.yaml  |6 +-
 .../src/main/compose/ozone/docker-config|2 +-
 .../src/main/compose/ozoneperf/README.md|4 +-
 .../main/compose/ozoneperf/docker-compose.yaml  |6 +-
 .../src/main/compose/ozoneperf/docker-config|2 +-
 .../scm/client/ContainerOperationClient.java|  117 +-
 hadoop-hdds/common/pom.xml  |   18 +
 .../hadoop/hdds/protocol/DatanodeDetails.java   |   13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |6 +-
 .../hadoop/hdds/scm/client/ScmClient.java   |   43 +-
 .../container/common/helpers/ContainerInfo.java |  167 ++-
 .../common/helpers/ContainerWithPipeline.java   |  131 ++
 .../StorageContainerLocationProtocol.java   |   18 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   34 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|   22 +-
 .../apache/hadoop/ozone/audit/AuditAction.java  |   30 +
 .../hadoop/ozone/audit/AuditEventStatus.java|   36 +
 .../apache/hadoop/ozone/audit/AuditLogger.java  |  128 ++
 .../hadoop/ozone/audit/AuditLoggerType.java |   37 +
 .../apache/hadoop/ozone/audit/AuditMarker.java  |   38 +
 .../apache/hadoop/ozone/audit/Auditable.java|   32 +
 .../apache/hadoop/ozone/audit/package-info.java |  123 ++
 .../org/apache/hadoop/ozone/common/Storage.java |6 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   33 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   10 +-
 .../StorageContainerLocationProtocol.proto  |   34 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|   28 +-
 .../common/src/main/resources/ozone-default.xml |  131 +-
 .../apache/hadoop/ozone/audit/DummyAction.java  |   51 +
 .../apache/hadoop/ozone/audit/DummyEntity.java  |   57 +
 .../ozone/audit/TestOzoneAuditLogger.java   |  147 ++
 .../apache/hadoop/ozone/audit/package-info.java |   23 +
 .../common/src/test/resources/log4j2.properties |   76 +
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |   11 -
 .../DeleteBlocksCommandHandler.java |   30 +-
 .../protocol/StorageContainerNodeProtocol.java  |4 +-
 .../src/main/resources/webapps/static/ozone.js  |4 +-
 .../webapps/static/templates/config.html|4 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   80 +-
 .../block/DatanodeDeletedBlockTransactions.java |   11 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |2 +-
 .../container/CloseContainerEventHandler.java   |   35 +-
 .../hdds/scm/container/ContainerMapping.java|  128 +-
 .../scm/container/ContainerStateManager.java|   30 +-
 .../hadoop/hdds/scm/container/Mapping.java  |   26 +-
 .../scm/container/closer/ContainerCloser.java   |   15 +-
 .../scm/container/states/ContainerStateMap.java |   13 +-
 .../hadoop/hdds/scm/events/SCMEvents.java   |   80 ++
 .../hadoop/hdds/scm/events/package-info.java|   23 +
 .../hadoop/hdds/scm/node/CommandQueue.java  |2 +-
 .../hadoop/hdds/scm/node/DatanodeInfo.java  |  109 ++
 .../hdds/scm/node/HeartbeatQueueItem.java   |   98 --
 .../hadoop/hdds/scm/node/NodeManager.java   |   16 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |  575 
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  511 +--
 .../node/states/NodeAlreadyExistsException.java |   45 +
 .../hdds/scm/node/states/NodeException.java |   44 +
 .../scm/node/states/NodeNotFoundException.java  |   49 +
 .../hdds/scm/node/states/NodeStateMap.java  |  281 
 .../hdds/scm/pipelines/PipelineManager.java |   27 +-
 .../hdds/scm/pipelines/PipelineSelector.java|   16 +
 .../scm/pipelines/ratis/RatisManagerImpl.java   |1 +
 .../standalone/StandaloneManagerImpl.java   |1 +
 .../hdds/scm/server/SCMBlockProtocolServer.java |2 +-
 

[05/50] [abbrv] hadoop git commit: HADOOP-15591. KMSClientProvider should log KMS DT acquisition at INFO level. Contributed by Kitti Nanasi.

2018-07-13 Thread botong
HADOOP-15591. KMSClientProvider should log KMS DT acquisition at INFO level. 
Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def9d94a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def9d94a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def9d94a

Branch: refs/heads/YARN-7402
Commit: def9d94a40e1ff71a0dc5a4db1f042e2704cb84d
Parents: 83cd84b
Author: Xiao Chen 
Authored: Mon Jul 9 12:00:32 2018 -0700
Committer: Xiao Chen 
Committed: Mon Jul 9 12:01:52 2018 -0700

--
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9d94a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 7b46075..11815da 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1036,13 +1036,13 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   public Token run() throws Exception {
 // Not using the cached token here.. Creating a new token here
 // everytime.
-LOG.debug("Getting new token from {}, renewer:{}", url, renewer);
+LOG.info("Getting new token from {}, renewer:{}", url, renewer);
 return authUrl.getDelegationToken(url,
 new DelegationTokenAuthenticatedURL.Token(), renewer, 
doAsUser);
   }
 });
 if (token != null) {
-  LOG.debug("New token received: ({})", token);
+  LOG.info("New token received: ({})", token);
   credentials.addToken(token.getService(), token);
   tokens = new Token[] { token };
 } else {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-13 Thread botong
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0838fe83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0838fe83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0838fe83

Branch: refs/heads/YARN-7402
Commit: 0838fe833738e04f5e6f6408e97866d77bebbf30
Parents: eecb5ba
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jul 9 10:37:20 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index baf0e8b..eff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2379,6 +2379,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2413,7 +2435,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3d32883..a199d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,6 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This test is expected to be executed either by a regular
 // user or by root. If executed by a regular user it doesn't
 // test all the functions that would depend on changing the
@@ -1264,6 +1281,9 @@ int main(int argc, char **argv) {
 
   printf("\nStarting tests\n");
 
+  

[07/50] [abbrv] hadoop git commit: HDDS-224. Create metrics for Event Watcher. Contributed by Elek, Marton.

2018-07-13 Thread botong
HDDS-224. Create metrics for Event Watcher.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e12d93bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e12d93bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e12d93bf

Branch: refs/heads/YARN-7402
Commit: e12d93bfc1a0efd007bc84758e60b5149c3aa663
Parents: 895845e
Author: Anu Engineer 
Authored: Mon Jul 9 12:02:20 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jul 9 12:10:12 2018 -0700

--
 hadoop-hdds/framework/pom.xml   |   5 +
 .../hadoop/hdds/server/events/EventWatcher.java |  43 +++-
 .../hdds/server/events/EventWatcherMetrics.java |  79 ++
 .../hdds/server/events/TestEventWatcher.java| 107 ---
 4 files changed, 220 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/pom.xml
--
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index a497133..6e1927d 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -39,6 +39,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   hadoop-hdds-common
   provided
 
+
+  org.mockito
+  mockito-all
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 19fddde..8c5605a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -26,12 +26,17 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.lease.Lease;
 import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException;
 import org.apache.hadoop.ozone.lease.LeaseExpiredException;
 import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.HashedMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,18 +63,39 @@ public abstract class EventWatcher leaseManager;
 
+  private final EventWatcherMetrics metrics;
+
+  private final String name;
+
   protected final Map trackedEventsByUUID =
   new ConcurrentHashMap<>();
 
   protected final Set trackedEvents = new HashSet<>();
 
-  public EventWatcher(Event startEvent,
+  private final Map startTrackingTimes = new HashedMap();
+
+  public EventWatcher(String name, Event startEvent,
   Event completionEvent,
   LeaseManager leaseManager) {
 this.startEvent = startEvent;
 this.completionEvent = completionEvent;
 this.leaseManager = leaseManager;
+this.metrics = new EventWatcherMetrics();
+Preconditions.checkNotNull(name);
+if (name.equals("")) {
+  name = getClass().getSimpleName();
+}
+if (name.equals("")) {
+  //for anonymous inner classes
+  name = getClass().getName();
+}
+this.name = name;
+  }
 
+  public EventWatcher(Event startEvent,
+  Event completionEvent,
+  LeaseManager leaseManager) {
+this("", startEvent, completionEvent, leaseManager);
   }
 
   public void start(EventQueue queue) {
@@ -87,11 +113,16 @@ public abstract class EventWatcherhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/e12d93bf/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
new file mode 100644
index 000..1db81a9
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright 

[06/50] [abbrv] hadoop git commit: HADOOP-15581. Set default jetty log level to INFO in KMS. Contributed by Kitti Nanasi.

2018-07-13 Thread botong
HADOOP-15581. Set default jetty log level to INFO in KMS. Contributed by Kitti 
Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/895845e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/895845e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/895845e9

Branch: refs/heads/YARN-7402
Commit: 895845e9b0d7ac49da36b5cf773c6330afe4f3e0
Parents: def9d94
Author: Xiao Chen 
Authored: Mon Jul 9 12:06:25 2018 -0700
Committer: Xiao Chen 
Committed: Mon Jul 9 12:06:50 2018 -0700

--
 .../hadoop-kms/src/main/conf/kms-log4j.properties| 4 +++-
 .../hadoop-kms/src/test/resources/log4j.properties   | 4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/895845e9/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
index 04a3cf3..e2afd41 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
@@ -37,4 +37,6 @@ log4j.logger.org.apache.hadoop=INFO
 
log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
 # make zookeeper log level an explicit config, and not changing with 
rootLogger.
 log4j.logger.org.apache.zookeeper=INFO
-log4j.logger.org.apache.curator=INFO
\ No newline at end of file
+log4j.logger.org.apache.curator=INFO
+# make jetty log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.eclipse.jetty=INFO
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/895845e9/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties 
b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
index e319af6..b8e6353 100644
--- a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
@@ -31,4 +31,6 @@ log4j.logger.org.apache.directory.server.core=OFF
 log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF
 # make zookeeper log level an explicit config, and not changing with 
rootLogger.
 log4j.logger.org.apache.zookeeper=INFO
-log4j.logger.org.apache.curator=INFO
\ No newline at end of file
+log4j.logger.org.apache.curator=INFO
+# make jetty log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.eclipse.jetty=INFO
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-13 Thread botong
YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc106a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc106a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc106a7

Branch: refs/heads/YARN-7402
Commit: 1bc106a738a6ce4f7ed025d556bb44c1ede022e3
Parents: 556d9b3
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jul 12 16:38:46 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc106a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index a199d84..5607823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,19 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDDS-224. Create metrics for Event Watcher. Contributed b Elek, Marton.

2018-07-13 Thread botong
HDDS-224. Create metrics for Event Watcher.
Contributed b Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb5e2258
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb5e2258
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb5e2258

Branch: refs/heads/YARN-7402
Commit: cb5e225868a069d6d16244b462ebada44465dce8
Parents: 4a08ddf
Author: Anu Engineer 
Authored: Mon Jul 9 12:52:39 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jul 9 13:02:40 2018 -0700

--
 .../hadoop/hdds/server/events/EventQueue.java   | 108 +++
 .../server/events/SingleThreadExecutor.java |  35 --
 .../hdds/server/events/TestEventQueue.java  |  35 +-
 3 files changed, 91 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5e2258/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 44d85f5..7e29223 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,6 +46,8 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
   private static final Logger LOG =
   LoggerFactory.getLogger(EventQueue.class);
 
+  private static final String EXECUTOR_NAME_SEPARATOR = "For";
+
   private final Map>> executors =
   new HashMap<>();
 
@@ -51,38 +57,74 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
 
   public > void addHandler(
   EVENT_TYPE event, EventHandler handler) {
-
-this.addHandler(event, new SingleThreadExecutor<>(
-event.getName()), handler);
+this.addHandler(event, handler, generateHandlerName(handler));
   }
 
+  /**
+   * Add new handler to the event queue.
+   * 
+   * By default a separated single thread executor will be dedicated to
+   * deliver the events to the registered event handler.
+   *
+   * @param eventTriggering event.
+   * @param handler  Handler of event (will be called from a separated
+   * thread)
+   * @param handlerName  The name of handler (should be unique together with
+   * the event name)
+   * @param The type of the event payload.
+   * @param  The type of the event identifier.
+   */
   public > void addHandler(
-  EVENT_TYPE event,
-  EventExecutor executor,
-  EventHandler handler) {
+  EVENT_TYPE event, EventHandler handler, String handlerName) {
+validateEvent(event);
+Preconditions.checkNotNull(handler, "Handler name should not be null.");
+String executorName =
+StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
++ handlerName;
+this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
+  }
 
-executors.putIfAbsent(event, new HashMap<>());
-executors.get(event).putIfAbsent(executor, new ArrayList<>());
+  private > void validateEvent(EVENT_TYPE event) {
+Preconditions
+.checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
+"Event name should not contain " + EXECUTOR_NAME_SEPARATOR
++ " string.");
 
-executors.get(event)
-.get(executor)
-.add(handler);
+  }
+
+  private  String generateHandlerName(EventHandler handler) {
+if (!"".equals(handler.getClass().getSimpleName())) {
+  return handler.getClass().getSimpleName();
+} else {
+  return handler.getClass().getName();
+}
   }
 
   /**
-   * Creates one executor with multiple event handlers.
+   * Add event handler with custom executor.
+   *
+   * @param eventTriggering event.
+   * @param executor The executor imlementation to deliver events from a
+   * separated threads. Please keep in your mind that
+   * registering metrics is the responsibility of the
+   * caller.
+   * @param handler  Handler of event (will be called from a separated
+   * thread)
+   * @param The type of the event payload.
+   * @param  The type of the event identifier.
*/
-  public void addHandlerGroup(String 

[09/50] [abbrv] hadoop git commit: YARN-7899. [AMRMProxy] Stateful FederationInterceptor for pending requests. Contributed by Botong Huang.

2018-07-13 Thread botong
YARN-7899. [AMRMProxy] Stateful FederationInterceptor for pending requests. 
Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea9b6082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea9b6082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea9b6082

Branch: refs/heads/YARN-7402
Commit: ea9b608237e7f2cf9b1e36b0f78c9674ec84096f
Parents: e12d93b
Author: Giovanni Matteo Fumarola 
Authored: Mon Jul 9 12:27:36 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Jul 9 12:27:36 2018 -0700

--
 .../hadoop/yarn/client/AMRMClientUtils.java |  91 
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   9 +-
 .../yarn/server/uam/UnmanagedAMPoolManager.java |  16 ++
 .../server/uam/UnmanagedApplicationManager.java |  40 ++---
 .../yarn/server/MockResourceManagerFacade.java  |  13 +-
 .../amrmproxy/FederationInterceptor.java| 146 ---
 .../amrmproxy/BaseAMRMProxyTest.java|   2 +
 .../amrmproxy/TestFederationInterceptor.java|  17 +++
 8 files changed, 192 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9b6082/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
index 387e399..5d4ab4a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
@@ -36,19 +36,9 @@ import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
-import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
-import 
org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -68,87 +58,6 @@ public final class AMRMClientUtils {
   }
 
   /**
-   * Handle ApplicationNotRegistered exception and re-register.
-   *
-   * @param appId application Id
-   * @param rmProxy RM proxy instance
-   * @param registerRequest the AM re-register request
-   * @throws YarnException if re-register fails
-   */
-  public static void handleNotRegisteredExceptionAndReRegister(
-  ApplicationId appId, ApplicationMasterProtocol rmProxy,
-  RegisterApplicationMasterRequest registerRequest) throws YarnException {
-LOG.info("App attempt {} not registered, most likely due to RM failover. "
-+ " Trying to re-register.", appId);
-try {
-  rmProxy.registerApplicationMaster(registerRequest);
-} catch (Exception e) {
-  if (e instanceof InvalidApplicationMasterRequestException
-  && e.getMessage().contains(APP_ALREADY_REGISTERED_MESSAGE)) {
-LOG.info("Concurrent thread successfully registered, moving on.");
-  } else {
-LOG.error("Error trying to re-register AM", e);
-throw new YarnException(e);
-  }
-}
-  }
-
-  /**
-   * Helper method for client calling ApplicationMasterProtocol.allocate that
-   * handles re-register if RM fails over.
-   *
-   * @param request allocate request
-   * @param rmProxy RM proxy
-   * @param registerRequest the register request for re-register
-   * @param appId application id
-   * @return allocate response
-   * @throws YarnException if RM call fails
-   * @throws IOException if RM call fails
-   */
-  public static AllocateResponse allocateWithReRegister(AllocateRequest 
request,
-  

[04/50] [abbrv] hadoop git commit: YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

2018-07-13 Thread botong
YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

Change-Id: If304567abb77a01b686d82c769bdf50728484163


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83cd84b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83cd84b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83cd84b7

Branch: refs/heads/YARN-7402
Commit: 83cd84b70bac7b613eb4b2901d5ffe40098692eb
Parents: 0838fe8
Author: Wangda Tan 
Authored: Mon Jul 9 11:30:08 2018 -0700
Committer: Wangda Tan 
Committed: Mon Jul 9 11:30:08 2018 -0700

--
 .../impl/pb/GetApplicationsRequestPBImpl.java   | 44 ++--
 1 file changed, 22 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83cd84b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
index a6abb99..4c5fee0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
@@ -65,7 +65,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
 viaProto = true;
   }
 
-  public GetApplicationsRequestProto getProto() {
+  public synchronized GetApplicationsRequestProto getProto() {
 mergeLocalToProto();
 proto = viaProto ? proto : builder.build();
 viaProto = true;
@@ -175,13 +175,13 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getApplicationTypes() {
+  public synchronized Set getApplicationTypes() {
 initApplicationTypes();
 return this.applicationTypes;
   }
 
   @Override
-  public void setApplicationTypes(Set applicationTypes) {
+  public synchronized void setApplicationTypes(Set applicationTypes) {
 maybeInitBuilder();
 if (applicationTypes == null)
   builder.clearApplicationTypes();
@@ -198,13 +198,13 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getApplicationTags() {
+  public synchronized Set getApplicationTags() {
 initApplicationTags();
 return this.applicationTags;
   }
 
   @Override
-  public void setApplicationTags(Set tags) {
+  public synchronized void setApplicationTags(Set tags) {
 maybeInitBuilder();
 if (tags == null || tags.isEmpty()) {
   builder.clearApplicationTags();
@@ -219,7 +219,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public EnumSet getApplicationStates() {
+  public synchronized EnumSet getApplicationStates() {
 initApplicationStates();
 return this.applicationStates;
   }
@@ -233,12 +233,12 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public ApplicationsRequestScope getScope() {
+  public synchronized ApplicationsRequestScope getScope() {
 initScope();
 return this.scope;
   }
 
-  public void setScope(ApplicationsRequestScope scope) {
+  public synchronized void setScope(ApplicationsRequestScope scope) {
 maybeInitBuilder();
 if (scope == null) {
   builder.clearScope();
@@ -247,7 +247,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(EnumSet 
applicationStates) {
+  public synchronized void setApplicationStates(EnumSet 
applicationStates) {
 maybeInitBuilder();
 if (applicationStates == null) {
   builder.clearApplicationStates();
@@ -256,7 +256,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(Set applicationStates) {
+  public synchronized void setApplicationStates(Set applicationStates) 
{
 EnumSet appStates = null;
 for (YarnApplicationState state : YarnApplicationState.values()) {
   if (applicationStates.contains(
@@ -272,12 +272,12 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getUsers() {
+  public synchronized Set getUsers() {
 initUsers();
 return this.users;
   }
 
-  public void setUsers(Set users) {
+  public synchronized 

[02/50] [abbrv] hadoop git commit: HDDS-213. Single lock to synchronize KeyValueContainer#update.

2018-07-13 Thread botong
HDDS-213. Single lock to synchronize KeyValueContainer#update.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44e19fc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44e19fc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44e19fc7

Branch: refs/heads/YARN-7402
Commit: 44e19fc7f70b5c19f2b626fe247aea5d51ada51c
Parents: cb9574a
Author: Hanisha Koneru 
Authored: Mon Jul 9 09:33:09 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon Jul 9 09:33:09 2018 -0700

--
 .../container/common/impl/ContainerData.java|  28 +++--
 .../common/impl/ContainerDataYaml.java  |  10 +-
 .../container/keyvalue/KeyValueContainer.java   | 124 +++
 .../container/ozoneimpl/ContainerReader.java|  37 +++---
 4 files changed, 87 insertions(+), 112 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 0d217e4..54b186b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -182,12 +182,14 @@ public class ContainerData {
   }
 
   /**
-   * Adds metadata.
+   * Add/Update metadata.
+   * We should hold the container lock before updating the metadata as this
+   * will be persisted on disk. Unless, we are reconstructing ContainerData
+   * from protoBuf or from on disk .container file in which case lock is not
+   * required.
*/
-  public void addMetadata(String key, String value) throws IOException {
-synchronized (this.metadata) {
-  metadata.put(key, value);
-}
+  public void addMetadata(String key, String value) {
+metadata.put(key, value);
   }
 
   /**
@@ -195,9 +197,19 @@ public class ContainerData {
* @return metadata
*/
   public Map getMetadata() {
-synchronized (this.metadata) {
-  return Collections.unmodifiableMap(this.metadata);
-}
+return Collections.unmodifiableMap(this.metadata);
+  }
+
+  /**
+   * Set metadata.
+   * We should hold the container lock before updating the metadata as this
+   * will be persisted on disk. Unless, we are reconstructing ContainerData
+   * from protoBuf or from on disk .container file in which case lock is not
+   * required.
+   */
+  public void setMetadata(Map metadataMap) {
+metadata.clear();
+metadata.putAll(metadataMap);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 70d1615..90af24f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -200,15 +200,7 @@ public final class ContainerDataYaml {
 OzoneConsts.METADATA_PATH));
 kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
 Map meta = (Map) nodes.get(OzoneConsts.METADATA);
-meta.forEach((key, val) -> {
-  try {
-kvData.addMetadata(key, val);
-  } catch (IOException e) {
-throw new IllegalStateException("Unexpected " +
-"Key Value Pair " + "(" + key + "," + val +")in the metadata " 
+
-"for containerId " + (long) nodes.get("containerId"));
-  }
-});
+kvData.setMetadata(meta);
 String state = (String) nodes.get(OzoneConsts.STATE);
 switch (state) {
 case "OPEN":

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e19fc7/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 

[14/50] [abbrv] hadoop git commit: HDDS-48. Fix branch after merging from trunk.

2018-07-13 Thread botong
HDDS-48. Fix branch after merging from trunk.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3584baf2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3584baf2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3584baf2

Branch: refs/heads/YARN-7402
Commit: 3584baf2642816a453402a717a05d16754a6ac52
Parents: c275a9a
Author: Bharat Viswanadham 
Authored: Mon Jul 9 12:30:59 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Jul 9 13:22:30 2018 -0700

--
 .../commandhandler/TestBlockDeletion.java   | 32 +++-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |  4 +--
 2 files changed, 19 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3584baf2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 62059ec..c60c6c4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -34,9 +34,10 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -46,6 +47,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -56,10 +58,11 @@ import java.util.function.Consumer;
 
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 
+@Ignore("Need to be fixed according to ContainerIO")
 public class TestBlockDeletion {
   private static OzoneConfiguration conf = null;
   private static ObjectStore store;
-  private static ContainerManagerImpl dnContainerManager = null;
+  private static ContainerSet dnContainerManager = null;
   private static StorageContainerManager scm = null;
   private static OzoneManager om = null;
   private static Set containerIdsWithDeletedBlocks;
@@ -85,9 +88,8 @@ public class TestBlockDeletion {
 MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
 cluster.waitForClusterToBeReady();
 store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
-dnContainerManager =
-(ContainerManagerImpl) cluster.getHddsDatanodes().get(0)
-.getDatanodeStateMachine().getContainer().getContainerManager();
+dnContainerManager = cluster.getHddsDatanodes().get(0)
+.getDatanodeStateMachine().getContainer().getContainerSet();
 om = cluster.getOzoneManager();
 scm = cluster.getStorageContainerManager();
 containerIdsWithDeletedBlocks = new HashSet<>();
@@ -148,8 +150,8 @@ public class TestBlockDeletion {
 Assert.assertEquals(
 scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
   }
-  Assert.assertEquals(dnContainerManager.readContainer(containerId)
-  .getDeleteTransactionId(),
+  Assert.assertEquals(dnContainerManager.getContainer(containerId)
+  .getContainerData().getDeleteTransactionId(),
   scm.getContainerInfo(containerId).getDeleteTransactionId());
 }
   }
@@ -159,9 +161,9 @@ public class TestBlockDeletion {
   throws IOException {
 return performOperationOnKeyContainers((blockID) -> {
   try {
-MetadataStore db = KeyUtils.getDB(
-dnContainerManager.getContainerMap().get(blockID.getContainerID()),
-conf);
+MetadataStore db = 

[13/50] [abbrv] hadoop git commit: HDDS-240. Implement metrics for EventQueue. Contributed by Elek, Marton.

2018-07-13 Thread botong
HDDS-240. Implement metrics for EventQueue.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2403231c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2403231c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2403231c

Branch: refs/heads/YARN-7402
Commit: 2403231c8c3685ba08cd6bdf715d281cae611e45
Parents: 3c0a66a
Author: Anu Engineer 
Authored: Mon Jul 9 13:04:44 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jul 9 13:04:44 2018 -0700

--
 .../hadoop/hdds/server/events/EventQueue.java   | 108 +++
 .../server/events/SingleThreadExecutor.java |  35 --
 .../hdds/server/events/TestEventQueue.java  |  35 +-
 3 files changed, 91 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2403231c/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 44d85f5..7e29223 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,6 +46,8 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
   private static final Logger LOG =
   LoggerFactory.getLogger(EventQueue.class);
 
+  private static final String EXECUTOR_NAME_SEPARATOR = "For";
+
   private final Map>> executors =
   new HashMap<>();
 
@@ -51,38 +57,74 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
 
   public > void addHandler(
   EVENT_TYPE event, EventHandler handler) {
-
-this.addHandler(event, new SingleThreadExecutor<>(
-event.getName()), handler);
+this.addHandler(event, handler, generateHandlerName(handler));
   }
 
+  /**
+   * Add new handler to the event queue.
+   * 
+   * By default a separated single thread executor will be dedicated to
+   * deliver the events to the registered event handler.
+   *
+   * @param eventTriggering event.
+   * @param handler  Handler of event (will be called from a separated
+   * thread)
+   * @param handlerName  The name of handler (should be unique together with
+   * the event name)
+   * @param The type of the event payload.
+   * @param  The type of the event identifier.
+   */
   public > void addHandler(
-  EVENT_TYPE event,
-  EventExecutor executor,
-  EventHandler handler) {
+  EVENT_TYPE event, EventHandler handler, String handlerName) {
+validateEvent(event);
+Preconditions.checkNotNull(handler, "Handler name should not be null.");
+String executorName =
+StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
++ handlerName;
+this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
+  }
 
-executors.putIfAbsent(event, new HashMap<>());
-executors.get(event).putIfAbsent(executor, new ArrayList<>());
+  private > void validateEvent(EVENT_TYPE event) {
+Preconditions
+.checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
+"Event name should not contain " + EXECUTOR_NAME_SEPARATOR
++ " string.");
 
-executors.get(event)
-.get(executor)
-.add(handler);
+  }
+
+  private  String generateHandlerName(EventHandler handler) {
+if (!"".equals(handler.getClass().getSimpleName())) {
+  return handler.getClass().getSimpleName();
+} else {
+  return handler.getClass().getName();
+}
   }
 
   /**
-   * Creates one executor with multiple event handlers.
+   * Add event handler with custom executor.
+   *
+   * @param eventTriggering event.
+   * @param executor The executor imlementation to deliver events from a
+   * separated threads. Please keep in your mind that
+   * registering metrics is the responsibility of the
+   * caller.
+   * @param handler  Handler of event (will be called from a separated
+   * thread)
+   * @param The type of the event payload.
+   * @param  The type of the event identifier.
*/
-  public void addHandlerGroup(String 

[01/50] [abbrv] hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi. [Forced Update!]

2018-07-13 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 262ca7f16 -> 9c24328be (forced update)


HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed 
by Kitti Nansi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eecb5baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eecb5baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eecb5baa

Branch: refs/heads/YARN-7402
Commit: eecb5ba54599aeae758abd4007e55e5b531f
Parents: 43f7fe8
Author: Andrew Wang 
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 9 15:17:21 2018 +0200

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eecb5baa/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 6dd2d92..384cedf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1289,11 +1289,10 @@
   dfs.image.transfer.timeout
   6
   
-Socket timeout for image transfer in milliseconds. This timeout and 
the related
-dfs.image.transfer.bandwidthPerSec parameter should be configured such
-that normal image transfer can complete successfully.
-This timeout prevents client hangs when the sender fails during
-image transfer. This is socket timeout during image transfer.
+Socket timeout for the HttpURLConnection instance used in the image
+transfer. This is measured in milliseconds.
+This timeout prevents client hangs if the connection is idle
+for this configured timeout, during image transfer.
   
 
 
@@ -1304,9 +1303,7 @@
 Maximum bandwidth used for regular image transfers (instead of
 bootstrapping the standby namenode), in bytes per second.
 This can help keep normal namenode operations responsive during
-checkpointing. The maximum bandwidth and timeout in
-dfs.image.transfer.timeout should be set such that normal image
-transfers can complete successfully.
+checkpointing.
 A default value of 0 indicates that throttling is disabled.
 The maximum bandwidth used for bootstrapping standby namenode is
 configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: Revert "HDDS-224. Create metrics for Event Watcher."

2018-07-13 Thread botong
Revert "HDDS-224. Create metrics for Event Watcher."

This reverts commit cb5e225868a069d6d16244b462ebada44465dce8.
The JIRA number is wrong, reverting to fix it.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0a66ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0a66ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0a66ab

Branch: refs/heads/YARN-7402
Commit: 3c0a66abe632277e89fccd8dced9e71ca5d87df0
Parents: cb5e225
Author: Anu Engineer 
Authored: Mon Jul 9 13:03:57 2018 -0700
Committer: Anu Engineer 
Committed: Mon Jul 9 13:03:57 2018 -0700

--
 .../hadoop/hdds/server/events/EventQueue.java   | 108 ---
 .../server/events/SingleThreadExecutor.java |  35 ++
 .../hdds/server/events/TestEventQueue.java  |  35 +-
 3 files changed, 87 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0a66ab/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 7e29223..44d85f5 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,8 +42,6 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
   private static final Logger LOG =
   LoggerFactory.getLogger(EventQueue.class);
 
-  private static final String EXECUTOR_NAME_SEPARATOR = "For";
-
   private final Map>> executors =
   new HashMap<>();
 
@@ -57,73 +51,37 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
 
   public > void addHandler(
   EVENT_TYPE event, EventHandler handler) {
-this.addHandler(event, handler, generateHandlerName(handler));
-  }
-
-  /**
-   * Add new handler to the event queue.
-   * 
-   * By default a separated single thread executor will be dedicated to
-   * deliver the events to the registered event handler.
-   *
-   * @param eventTriggering event.
-   * @param handler  Handler of event (will be called from a separated
-   * thread)
-   * @param handlerName  The name of handler (should be unique together with
-   * the event name)
-   * @param The type of the event payload.
-   * @param  The type of the event identifier.
-   */
-  public > void addHandler(
-  EVENT_TYPE event, EventHandler handler, String handlerName) {
-validateEvent(event);
-Preconditions.checkNotNull(handler, "Handler name should not be null.");
-String executorName =
-StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR
-+ handlerName;
-this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
-  }
-
-  private > void validateEvent(EVENT_TYPE event) {
-Preconditions
-.checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR),
-"Event name should not contain " + EXECUTOR_NAME_SEPARATOR
-+ " string.");
 
+this.addHandler(event, new SingleThreadExecutor<>(
+event.getName()), handler);
   }
 
-  private  String generateHandlerName(EventHandler handler) {
-if (!"".equals(handler.getClass().getSimpleName())) {
-  return handler.getClass().getSimpleName();
-} else {
-  return handler.getClass().getName();
-}
-  }
-
-  /**
-   * Add event handler with custom executor.
-   *
-   * @param eventTriggering event.
-   * @param executor The executor imlementation to deliver events from a
-   * separated threads. Please keep in your mind that
-   * registering metrics is the responsibility of the
-   * caller.
-   * @param handler  Handler of event (will be called from a separated
-   * thread)
-   * @param The type of the event payload.
-   * @param  The type of the event identifier.
-   */
   public > void addHandler(
-  EVENT_TYPE event, EventExecutor executor,
+  EVENT_TYPE event,
+  EventExecutor executor,
   EventHandler handler) {
-validateEvent(event);
+
 executors.putIfAbsent(event, new HashMap<>());
 

hadoop git commit: HDDS-210. Make "-file" argument optional for ozone getKey command. Contributed by Lokesh Jain.

2018-07-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 88625f5cd -> 103f2eeb5


HDDS-210. Make "-file" argument optional for ozone getKey command. Contributed 
by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/103f2eeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/103f2eeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/103f2eeb

Branch: refs/heads/trunk
Commit: 103f2eeb57dbadd9abbbc25a05bb7c79b48fdc17
Parents: 88625f5
Author: Xiaoyu Yao 
Authored: Fri Jul 13 11:44:24 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Jul 13 11:45:02 2018 -0700

--
 .../org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 12 
 .../hadoop/ozone/web/ozShell/keys/GetKeyHandler.java|  9 ++---
 2 files changed, 18 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/103f2eeb/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index a4b30f0..000d530 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -705,6 +705,18 @@ public class TestOzoneShell {
   randFile.read(dataBytes);
 }
 assertEquals(dataStr, DFSUtil.bytes2String(dataBytes));
+
+tmpPath = baseDir.getAbsolutePath() + File.separatorChar + keyName;
+args = new String[] {"-getKey",
+url + "/" + volumeName + "/" + bucketName + "/" + keyName, "-file",
+baseDir.getAbsolutePath()};
+assertEquals(0, ToolRunner.run(shell, args));
+
+dataBytes = new byte[dataStr.length()];
+try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) {
+  randFile.read(dataBytes);
+}
+assertEquals(dataStr, DFSUtil.bytes2String(dataBytes));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/103f2eeb/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
index 34620b4..2d059e0 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
@@ -98,11 +98,14 @@ public class GetKeyHandler extends Handler {
 Path dataFilePath = Paths.get(fileName);
 File dataFile = new File(fileName);
 
+if (dataFile.exists() && dataFile.isDirectory()) {
+  dataFile = new File(fileName, keyName);
+}
 
 if (dataFile.exists()) {
-  throw new OzoneClientException(fileName +
- "exists. Download will overwrite an " 
+
- "existing file. Aborting.");
+  throw new OzoneClientException(
+  fileName + "exists. Download will overwrite an "
+  + "existing file. Aborting.");
 }
 
 OzoneVolume vol = client.getObjectStore().getVolume(volumeName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15531. Use commons-text instead of commons-lang in some classes to fix deprecation warnings. Contributed by Takanobu Asanuma.

2018-07-13 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 17118f446 -> 88625f5cd


HADOOP-15531. Use commons-text instead of commons-lang in some classes to fix 
deprecation warnings. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88625f5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88625f5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88625f5c

Branch: refs/heads/trunk
Commit: 88625f5cd90766136a9ebd76a8d84b45a37e6c99
Parents: 17118f4
Author: Akira Ajisaka 
Authored: Fri Jul 13 11:42:12 2018 -0400
Committer: Akira Ajisaka 
Committed: Fri Jul 13 11:42:12 2018 -0400

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml   |  4 
 hadoop-common-project/hadoop-common/pom.xml   |  5 +
 .../org/apache/hadoop/conf/ReconfigurationServlet.java|  2 +-
 .../hdfs/qjournal/server/GetJournalEditServlet.java   |  2 +-
 .../hadoop/hdfs/server/diskbalancer/command/Command.java  |  6 +++---
 .../hdfs/server/diskbalancer/command/PlanCommand.java |  4 ++--
 .../hdfs/server/diskbalancer/command/ReportCommand.java   | 10 +-
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../java/org/apache/hadoop/hdfs/tools/CacheAdmin.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestDecommission.java |  4 ++--
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java   |  4 ++--
 .../apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java   |  2 +-
 .../apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java |  2 +-
 .../apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java |  2 +-
 .../apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java  |  2 +-
 hadoop-project/pom.xml|  5 +
 .../java/org/apache/hadoop/yarn/client/cli/TopCLI.java|  3 ++-
 .../src/main/java/org/apache/hadoop/yarn/state/Graph.java |  2 +-
 .../org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java  |  2 +-
 .../org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java |  2 +-
 .../java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java |  2 +-
 .../java/org/apache/hadoop/yarn/webapp/view/TextView.java |  2 +-
 .../apache/hadoop/yarn/server/webapp/AppAttemptBlock.java |  2 +-
 .../org/apache/hadoop/yarn/server/webapp/AppBlock.java|  2 +-
 .../org/apache/hadoop/yarn/server/webapp/AppsBlock.java   |  2 +-
 .../resourcemanager/webapp/FairSchedulerAppsBlock.java|  2 +-
 .../server/resourcemanager/webapp/RMAppAttemptBlock.java  |  2 +-
 .../yarn/server/resourcemanager/webapp/RMAppBlock.java|  2 +-
 .../yarn/server/resourcemanager/webapp/RMAppsBlock.java   |  2 +-
 .../hadoop/yarn/server/router/webapp/AppsBlock.java   |  4 ++--
 30 files changed, 52 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 490281a..ea8d680 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -171,6 +171,10 @@
   commons-lang3
 
 
+  org.apache.commons
+  commons-text
+
+
   commons-logging
   commons-logging
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 67a5a54..42554da 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -172,6 +172,11 @@
   compile
 
 
+  org.apache.commons
+  commons-text
+  compile
+
+
   org.slf4j
   slf4j-api
   compile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88625f5c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index c5bdf4e..ef4eac6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.conf;
 
-import org.apache.commons.lang3.StringEscapeUtils;

hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 42c768654 -> c23744d61


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c23744d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c23744d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c23744d6

Branch: refs/heads/branch-2.8
Commit: c23744d61f7400e1648556844e6dae4ade761293
Parents: 42c7686
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:20:46 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23744d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 62ae3d8..2bdcdef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -28,6 +28,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #define CONF_FILENAME "container-executor.cfg"
@@ -120,6 +121,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 8d14f9b07 -> 402c799f5


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/402c799f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/402c799f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/402c799f

Branch: refs/heads/branch-2.9
Commit: 402c799f5e904068448ce41788891e935b9bfdd3
Parents: 8d14f9b
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:17:07 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/402c799f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 6e32825..b913f16 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -27,6 +27,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -103,6 +104,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ff46db2ea -> 0e6efe06e


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e6efe06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e6efe06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e6efe06

Branch: refs/heads/branch-2
Commit: 0e6efe06ea2dd4029633c9fd98f5fc81049c7db8
Parents: ff46db2
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:11:57 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e6efe06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index a1b5ebc..26bd54a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -28,6 +28,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -104,6 +105,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 883df537d -> 1ae35834a


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ae35834
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ae35834
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ae35834

Branch: refs/heads/branch-3.0
Commit: 1ae35834a28258d4a3d37173c47ec4764e8a3467
Parents: 883df53
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:08:21 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ae35834/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 5c327cf..dafb3d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -106,6 +107,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d5d987550 -> 7cbb9597c


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cbb9597
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cbb9597
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cbb9597

Branch: refs/heads/branch-3.1
Commit: 7cbb9597c43d0e4270a64d28b5521941ce940a1a
Parents: d5d9875
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:06:38 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cbb9597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index c54fd3e..3d7b19a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -112,6 +113,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk d18507209 -> 17118f446


YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17118f44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17118f44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17118f44

Branch: refs/heads/trunk
Commit: 17118f446c2387aa796849da8b69a845d9d307d3
Parents: d185072
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:05:25 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17118f44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 2099ace..6ab522f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -112,6 +113,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-232. Parallel unit test execution for HDDS/Ozone. Contributed by Arpit Agarwal.

2018-07-13 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1fe5b9384 -> d18507209


HDDS-232. Parallel unit test execution for HDDS/Ozone. Contributed by Arpit 
Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1850720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1850720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1850720

Branch: refs/heads/trunk
Commit: d18507209e268aa5be0d3e56cec23de24107e7d9
Parents: 1fe5b93
Author: Nanda kumar 
Authored: Fri Jul 13 19:50:52 2018 +0530
Committer: Nanda kumar 
Committed: Fri Jul 13 19:50:52 2018 +0530

--
 .../common/report/TestReportPublisher.java  |  2 +-
 hadoop-hdds/pom.xml | 49 
 hadoop-ozone/pom.xml| 49 
 3 files changed, 99 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index 026e7aa..d4db55b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -111,7 +111,7 @@ public class TestReportPublisher {
 publisher.init(dummyContext, executorService);
 Thread.sleep(150);
 Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-Thread.sleep(150);
+Thread.sleep(100);
 Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount);
 executorService.shutdown();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 573803b..09fac33 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -116,4 +116,53 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+
+  
+
+  parallel-tests
+  
+
+  
+org.apache.hadoop
+hadoop-maven-plugins
+
+  
+parallel-tests-createdir
+
+  parallel-tests-createdir
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  ${testsThreadCount}
+  false
+  ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
+  
+${testsThreadCount}
+
${test.build.data}/${surefire.forkNumber}
+
${test.build.dir}/${surefire.forkNumber}
+
${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+
${test.build.data}
+
+
+
+
+
+
fork-${surefire.forkNumber}
+  
+
+  
+
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1850720/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index b655088..e82a3d8 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -178,4 +178,53 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+
+  
+
+  parallel-tests
+  
+
+  
+org.apache.hadoop
+hadoop-maven-plugins
+
+  
+parallel-tests-createdir
+
+  parallel-tests-createdir
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  ${testsThreadCount}
+  false
+  ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
+  
+${testsThreadCount}
+
${test.build.data}/${surefire.forkNumber}
+
${test.build.dir}/${surefire.forkNumber}
+
${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+
${test.build.data}
+
+
+
+ 

hadoop git commit: HDDS-253. SCMBlockDeletingService should publish events for delete blocks to EventQueue. Contributed by Lokesh Jain.

2018-07-13 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3f3f72221 -> 1fe5b9384


HDDS-253. SCMBlockDeletingService should publish events for delete blocks to 
EventQueue. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fe5b938
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fe5b938
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fe5b938

Branch: refs/heads/trunk
Commit: 1fe5b938435ab49e40cffa66f4dd16ddf1592405
Parents: 3f3f722
Author: Nanda kumar 
Authored: Fri Jul 13 17:18:42 2018 +0530
Committer: Nanda kumar 
Committed: Fri Jul 13 17:18:42 2018 +0530

--
 .../apache/hadoop/hdds/scm/block/BlockManagerImpl.java | 10 ++
 .../hadoop/hdds/scm/block/SCMBlockDeletingService.java | 13 +
 .../hdds/scm/server/StorageContainerManager.java   |  2 +-
 .../apache/hadoop/hdds/scm/block/TestBlockManager.java |  2 +-
 .../apache/hadoop/ozone/scm/TestContainerSQLCli.java   |  3 +--
 5 files changed, 18 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fe5b938/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 953f71e..6825ca4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -87,10 +88,12 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
* @param conf - configuration.
* @param nodeManager - node manager.
* @param containerManager - container manager.
+   * @param eventPublisher - event publisher.
* @throws IOException
*/
   public BlockManagerImpl(final Configuration conf,
-  final NodeManager nodeManager, final Mapping containerManager)
+  final NodeManager nodeManager, final Mapping containerManager,
+  EventPublisher eventPublisher)
   throws IOException {
 this.nodeManager = nodeManager;
 this.containerManager = containerManager;
@@ -120,9 +123,8 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
 OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
 TimeUnit.MILLISECONDS);
 blockDeletingService =
-new SCMBlockDeletingService(
-deletedBlockLog, containerManager, nodeManager, svcInterval,
-serviceTimeout, conf);
+new SCMBlockDeletingService(deletedBlockLog, containerManager,
+nodeManager, eventPublisher, svcInterval, serviceTimeout, conf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fe5b938/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 2c555e0..6f65fdd 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -20,11 +20,14 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;