hadoop git commit: YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. Contributed by Yang Wang.

2018-04-12 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7be71ec55 -> 84531ad9b


YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. 
Contributed by Yang Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84531ad9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84531ad9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84531ad9

Branch: refs/heads/branch-3.1
Commit: 84531ad9b6c30a30bf1470c970ff493a0e28bd32
Parents: 7be71ec
Author: Weiwei Yang 
Authored: Fri Apr 13 13:17:37 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:29:43 2018 +0800

--
 .../scheduler/ContainerScheduler.java   |  7 ++-
 .../TestContainerManagerRecovery.java   | 51 
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84531ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index d9b713f..57368ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -501,8 +501,11 @@ public class ContainerScheduler extends AbstractService 
implements
 
   private void startContainer(Container container) {
 LOG.info("Starting container [" + container.getContainerId()+ "]");
-runningContainers.put(container.getContainerId(), container);
-this.utilizationTracker.addContainerResources(container);
+// Skip to put into runningContainers and addUtilization when recover
+if (!runningContainers.containsKey(container.getContainerId())) {
+  runningContainers.put(container.getContainerId(), container);
+  this.utilizationTracker.addContainerResources(container);
+}
 if (container.getContainerTokenIdentifier().getExecutionType() ==
 ExecutionType.OPPORTUNISTIC) {
   this.metrics.startOpportunisticContainer(container.getResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84531ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index cad835c..bf8b500 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -91,6 +92,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoo

hadoop git commit: YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. Contributed by Yang Wang.

2018-04-12 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 375654c36 -> 226bedc02


YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. 
Contributed by Yang Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/226bedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/226bedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/226bedc0

Branch: refs/heads/trunk
Commit: 226bedc0239ba23f3ca0c40dac6aab3777d3ada6
Parents: 375654c
Author: Weiwei Yang 
Authored: Fri Apr 13 13:17:37 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:17:37 2018 +0800

--
 .../scheduler/ContainerScheduler.java   |  7 ++-
 .../TestContainerManagerRecovery.java   | 51 
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/226bedc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index d9b713f..57368ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -501,8 +501,11 @@ public class ContainerScheduler extends AbstractService 
implements
 
   private void startContainer(Container container) {
 LOG.info("Starting container [" + container.getContainerId()+ "]");
-runningContainers.put(container.getContainerId(), container);
-this.utilizationTracker.addContainerResources(container);
+// Skip to put into runningContainers and addUtilization when recover
+if (!runningContainers.containsKey(container.getContainerId())) {
+  runningContainers.put(container.getContainerId(), container);
+  this.utilizationTracker.addContainerResources(container);
+}
 if (container.getContainerTokenIdentifier().getExecutionType() ==
 ExecutionType.OPPORTUNISTIC) {
   this.metrics.startOpportunisticContainer(container.getResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/226bedc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index cad835c..bf8b500 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -91,6 +92,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoop.yarn.ser

hadoop git commit: YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by Weiwei Yang.

2018-04-12 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b652e915f -> 7be71ec55


YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7be71ec5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7be71ec5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7be71ec5

Branch: refs/heads/branch-3.1
Commit: 7be71ec55bf523bd3b9cfed03190adf128b417a1
Parents: b652e91
Author: Weiwei Yang 
Authored: Fri Apr 13 13:06:47 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:10:05 2018 +0800

--
 .../src/site/markdown/PlacementConstraints.md.vm | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7be71ec5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
index 6af62e7..cb34c3f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -12,6 +12,9 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+
 Placement Constraints
 =
 
@@ -35,7 +38,7 @@ Quick Guide
 
 We first describe how to enable scheduling with placement constraints and then 
provide examples of how to experiment with this feature using the distributed 
shell, an application that allows to run a given shell command on a set of 
containers.
 
-### Enabling placement constraints
+$H3 Enabling placement constraints
 
 To enable placement constraints, the following property has to be set to 
`placement-processor` or `scheduler` in **conf/yarn-site.xml**:
 
@@ -51,7 +54,7 @@ We now give more details about each of the three placement 
constraint handlers:
 
 The `placement-processor` handler supports a wider range of constraints and 
can allow more containers to be placed, especially when applications have 
demanding constraints or the cluster is highly-utilized (due to considering 
multiple containers at a time). However, if respecting task priority within an 
application is important for the user and the capacity scheduler is used, then 
the `scheduler` handler should be used instead.
 
-### Experimenting with placement constraints using distributed shell
+$H3 Experimenting with placement constraints using distributed shell
 
 Users can experiment with placement constraints by using the distributed shell 
application through the following command:
 
@@ -89,18 +92,18 @@ The above encodes two constraints:
 Defining Placement Constraints
 --
 
-### Allocation tags
+$H3 Allocation tags
 
 Allocation tags are string tags that an application can associate with (groups 
of) its containers. Tags are used to identify components of applications. For 
example, an HBase Master allocation can be tagged with "hbase-m", and Region 
Servers with "hbase-rs". Other examples are "latency-critical" to refer to the 
more general demands of the allocation, or "app_0041" to denote the job ID. 
Allocation tags play a key role in constraints, as they allow to refer to 
multiple allocations that share a common tag.
 
 Note that instead of using the `ResourceRequest` object to define allocation 
tags, we use the new `SchedulingRequest` object. This has many similarities 
with the `ResourceRequest`, but better separates the sizing of the requested 
allocations (number and size of allocations, priority, execution type, etc.), 
and the constraints dictating how these allocations should be placed (resource 
name, relaxed locality). Applications can still use `ResourceRequest` objects, 
but in order to define allocation tags and constraints, they need to use the 
`SchedulingRequest` object. Within a single `AllocateRequest`, an application 
should use either the `ResourceRequest` or the `SchedulingRequest` objects, but 
not both of them.
 
- Differences between node labels, node attributes and allocation tags
+$H4 Differences between node labels, node attributes and allocation tags
 
 The difference between allocation tags and node labels or node attributes 
(YARN-3409), is that allocation tags are attached to allocations and not to 
nodes. When an allocation gets allocated to a node by the scheduler, the set of 
tags of that allocation are automatically added to the node for the duration of 
the allocation. Hence, a nod

hadoop git commit: YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by Weiwei Yang.

2018-04-12 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec1e8c1a8 -> 375654c36


YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/375654c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/375654c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/375654c3

Branch: refs/heads/trunk
Commit: 375654c36a8bfa4337c9011fcd86737462dfa61e
Parents: ec1e8c1
Author: Weiwei Yang 
Authored: Fri Apr 13 13:06:47 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:06:47 2018 +0800

--
 .../src/site/markdown/PlacementConstraints.md.vm | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/375654c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
index 6af62e7..cb34c3f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -12,6 +12,9 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+
 Placement Constraints
 =
 
@@ -35,7 +38,7 @@ Quick Guide
 
 We first describe how to enable scheduling with placement constraints and then 
provide examples of how to experiment with this feature using the distributed 
shell, an application that allows to run a given shell command on a set of 
containers.
 
-### Enabling placement constraints
+$H3 Enabling placement constraints
 
 To enable placement constraints, the following property has to be set to 
`placement-processor` or `scheduler` in **conf/yarn-site.xml**:
 
@@ -51,7 +54,7 @@ We now give more details about each of the three placement 
constraint handlers:
 
 The `placement-processor` handler supports a wider range of constraints and 
can allow more containers to be placed, especially when applications have 
demanding constraints or the cluster is highly-utilized (due to considering 
multiple containers at a time). However, if respecting task priority within an 
application is important for the user and the capacity scheduler is used, then 
the `scheduler` handler should be used instead.
 
-### Experimenting with placement constraints using distributed shell
+$H3 Experimenting with placement constraints using distributed shell
 
 Users can experiment with placement constraints by using the distributed shell 
application through the following command:
 
@@ -89,18 +92,18 @@ The above encodes two constraints:
 Defining Placement Constraints
 --
 
-### Allocation tags
+$H3 Allocation tags
 
 Allocation tags are string tags that an application can associate with (groups 
of) its containers. Tags are used to identify components of applications. For 
example, an HBase Master allocation can be tagged with "hbase-m", and Region 
Servers with "hbase-rs". Other examples are "latency-critical" to refer to the 
more general demands of the allocation, or "app_0041" to denote the job ID. 
Allocation tags play a key role in constraints, as they allow to refer to 
multiple allocations that share a common tag.
 
 Note that instead of using the `ResourceRequest` object to define allocation 
tags, we use the new `SchedulingRequest` object. This has many similarities 
with the `ResourceRequest`, but better separates the sizing of the requested 
allocations (number and size of allocations, priority, execution type, etc.), 
and the constraints dictating how these allocations should be placed (resource 
name, relaxed locality). Applications can still use `ResourceRequest` objects, 
but in order to define allocation tags and constraints, they need to use the 
`SchedulingRequest` object. Within a single `AllocateRequest`, an application 
should use either the `ResourceRequest` or the `SchedulingRequest` objects, but 
not both of them.
 
- Differences between node labels, node attributes and allocation tags
+$H4 Differences between node labels, node attributes and allocation tags
 
 The difference between allocation tags and node labels or node attributes 
(YARN-3409), is that allocation tags are attached to allocations and not to 
nodes. When an allocation gets allocated to a node by the scheduler, the set of 
tags of that allocation are automatically added to the node for the duration of 
the allocation. Hence, a node inherits

hadoop git commit: HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar

2018-04-12 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5531c31f1 -> b652e915f


HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar

(cherry picked from commit ec1e8c1a8ca3d01b82ca82d73ba1132b6625d659)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b652e915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b652e915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b652e915

Branch: refs/heads/branch-3.1
Commit: b652e915fbd92b6969fffd8e177d9966abb4223b
Parents: 5531c31
Author: Bharat Viswanadham 
Authored: Thu Apr 12 21:51:20 2018 -0700
Committer: Bharat Viswanadham 
Committed: Thu Apr 12 21:55:40 2018 -0700

--
 .../main/java/org/apache/hadoop/service/launcher/IrqHandler.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b652e915/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
index 30bb91c..17aa963 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
@@ -86,7 +86,7 @@ public final class IrqHandler implements SignalHandler {
* Bind to the interrupt handler.
* @throws IllegalArgumentException if the exception could not be set
*/
-  void bind() {
+  public void bind() {
 Preconditions.checkState(signal == null, "Handler already bound");
 try {
   signal = new Signal(name);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar

2018-04-12 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9b0b9f2af -> ec1e8c1a8


HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec1e8c1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec1e8c1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec1e8c1a

Branch: refs/heads/trunk
Commit: ec1e8c1a8ca3d01b82ca82d73ba1132b6625d659
Parents: 9b0b9f2
Author: Bharat Viswanadham 
Authored: Thu Apr 12 21:51:20 2018 -0700
Committer: Bharat Viswanadham 
Committed: Thu Apr 12 21:51:20 2018 -0700

--
 .../main/java/org/apache/hadoop/service/launcher/IrqHandler.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec1e8c1a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
index 30bb91c..17aa963 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
@@ -86,7 +86,7 @@ public final class IrqHandler implements SignalHandler {
* Bind to the interrupt handler.
* @throws IllegalArgumentException if the exception could not be set
*/
-  void bind() {
+  public void bind() {
 Preconditions.checkState(signal == null, "Handler already bound");
 try {
   signal = new Signal(name);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13436. Fix javadoc of package-info.java

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ce7ebbe2c -> 64105868e


HDFS-13436. Fix javadoc of package-info.java

(cherry picked from commit 9b0b9f2af2f6827d7430f995d3203c4cb7ef7e48)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64105868
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64105868
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64105868

Branch: refs/heads/branch-3.0
Commit: 64105868e9b9bfa474eff21b87775f92fe5a0a91
Parents: ce7ebbe
Author: Akira Ajisaka 
Authored: Fri Apr 13 13:23:44 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 13 13:25:19 2018 +0900

--
 .../hadoop/hdfs/protocol/datatransfer/package-info.java  | 8 
 .../hdfs/server/diskbalancer/connectors/package-info.java| 3 +--
 .../hdfs/server/diskbalancer/datamodel/package-info.java | 4 ++--
 .../apache/hadoop/hdfs/server/diskbalancer/package-info.java | 2 +-
 .../hdfs/server/diskbalancer/planner/package-info.java   | 2 +-
 5 files changed, 9 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64105868/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
index a13c7d8..13c0c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * This package contains classes related to hdfs data transfer protocol.
- */
\ No newline at end of file
+ */
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64105868/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
index b4b4437..f118c2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
@@ -15,8 +15,6 @@
  * the License.
  */
 
-package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
-
 /**
  * Connectors package is a set of logical connectors that connect
  * to various data sources to read the hadoop cluster information.
@@ -35,3 +33,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
  * we can crate dataNodes on the fly and attach to this connector and
  * ask the diskBalancer Cluster to read data from this source.
  */
+package org.apache.hadoop.hdfs.server.diskbalancer.connectors;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64105868/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/package-info.java
index f72e283..212da55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/package-info.java
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 /**
  * Disk Balancer Data Model is the Data Model for the cluster that
  * Disk Balancer is working against. This information is read
@@ -28,4 +27,5 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
  * DiskBalancerDataNodes is a collecti

hadoop git commit: HDFS-13436. Fix javadoc of package-info.java

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a375fe826 -> 5531c31f1


HDFS-13436. Fix javadoc of package-info.java

(cherry picked from commit 9b0b9f2af2f6827d7430f995d3203c4cb7ef7e48)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5531c31f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5531c31f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5531c31f

Branch: refs/heads/branch-3.1
Commit: 5531c31f161cb46cc6c48598e346768a8a7ccc4b
Parents: a375fe8
Author: Akira Ajisaka 
Authored: Fri Apr 13 13:23:44 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 13 13:24:42 2018 +0900

--
 .../hadoop/hdfs/protocol/datatransfer/package-info.java | 8 
 .../hdfs/server/common/blockaliasmap/package-info.java  | 9 +
 .../hdfs/server/diskbalancer/connectors/package-info.java   | 3 +--
 .../hdfs/server/diskbalancer/datamodel/package-info.java| 4 ++--
 .../hadoop/hdfs/server/diskbalancer/package-info.java   | 2 +-
 .../hdfs/server/diskbalancer/planner/package-info.java  | 2 +-
 6 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5531c31f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
index a13c7d8..13c0c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * This package contains classes related to hdfs data transfer protocol.
- */
\ No newline at end of file
+ */
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5531c31f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
index b906791..d088945 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-package org.apache.hadoop.hdfs.server.common.blockaliasmap;
 
 /**
  * The AliasMap defines mapping of PROVIDED HDFS blocks to data in remote
  * storage systems.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+package org.apache.hadoop.hdfs.server.common.blockaliasmap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5531c31f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
index b4b4437..f118c2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
@@ -15,8 +15,6 @@
  * the License.
  */
 
-package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
-
 /**
  * Connectors package is a set of logical connec

hadoop git commit: HDFS-13436. Fix javadoc of package-info.java

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53b3e5947 -> 9b0b9f2af


HDFS-13436. Fix javadoc of package-info.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b0b9f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b0b9f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b0b9f2a

Branch: refs/heads/trunk
Commit: 9b0b9f2af2f6827d7430f995d3203c4cb7ef7e48
Parents: 53b3e59
Author: Akira Ajisaka 
Authored: Fri Apr 13 13:23:44 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 13 13:23:51 2018 +0900

--
 .../hadoop/hdfs/protocol/datatransfer/package-info.java | 8 
 .../hdfs/server/common/blockaliasmap/package-info.java  | 9 +
 .../hdfs/server/diskbalancer/connectors/package-info.java   | 3 +--
 .../hdfs/server/diskbalancer/datamodel/package-info.java| 4 ++--
 .../hadoop/hdfs/server/diskbalancer/package-info.java   | 2 +-
 .../hdfs/server/diskbalancer/planner/package-info.java  | 2 +-
 6 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
index a13c7d8..13c0c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * This package contains classes related to hdfs data transfer protocol.
- */
\ No newline at end of file
+ */
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
index b906791..d088945 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-package org.apache.hadoop.hdfs.server.common.blockaliasmap;
 
 /**
  * The AliasMap defines mapping of PROVIDED HDFS blocks to data in remote
  * storage systems.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+package org.apache.hadoop.hdfs.server.common.blockaliasmap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
index b4b4437..f118c2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
@@ -15,8 +15,6 @@
  * the License.
  */
 
-package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
-
 /**
  * Connectors package is a set of logical connectors that connect
  * to various data sources to read the hadoop cluster informa

hadoop git commit: HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading mechanism. Contributed by Genmao Yu.

2018-04-12 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d416a0c9b -> ce7ebbe2c


HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading 
mechanism. Contributed by Genmao Yu.

(cherry picked from commit 6542d17ea460ec222137c4b275b13daf15d3fca3)
(cherry picked from commit e96c7bf82de1e9fd97df5fb6b763e211ebad5913)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce7ebbe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce7ebbe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce7ebbe2

Branch: refs/heads/branch-3.0
Commit: ce7ebbe2ccfb0d054e752f811ba6f1ba4ac360a4
Parents: d416a0c
Author: Sammi Chen 
Authored: Fri Mar 30 20:23:05 2018 +0800
Committer: Sammi Chen 
Committed: Fri Apr 13 10:17:46 2018 +0800

--
 .../aliyun/oss/AliyunCredentialsProvider.java   |   3 +-
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 206 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  34 ++-
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 173 
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 111 --
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 115 ---
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  22 +-
 .../oss/TestAliyunOSSBlockOutputStream.java | 115 +++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  10 +-
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 
 .../contract/TestAliyunOSSContractDistCp.java   |   2 +-
 11 files changed, 544 insertions(+), 338 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7ebbe2/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
index b46c67a..58c14a9 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
@@ -35,8 +35,7 @@ import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
 public class AliyunCredentialsProvider implements CredentialsProvider {
   private Credentials credentials = null;
 
-  public AliyunCredentialsProvider(Configuration conf)
-  throws IOException {
+  public AliyunCredentialsProvider(Configuration conf) throws IOException {
 String accessKeyId;
 String accessKeySecret;
 String securityToken;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce7ebbe2/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
new file mode 100644
index 000..12d551b
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import com.aliyun.oss.model.PartETag;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import j

hadoop git commit: YARN-7984. Improved YARN service stop/destroy and clean up. Contributed by Billie Rinaldi

2018-04-12 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 035e0f97e -> a375fe826


YARN-7984. Improved YARN service stop/destroy and clean up.
   Contributed by Billie Rinaldi

(cherry picked from commit d553799030a5a64df328319aceb35734d0b2de20)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a375fe82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a375fe82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a375fe82

Branch: refs/heads/branch-3.1
Commit: a375fe82631920c98fb1d45b13e3e82108365baf
Parents: 035e0f9
Author: Eric Yang 
Authored: Tue Apr 10 17:40:49 2018 -0400
Committer: Billie Rinaldi 
Committed: Thu Apr 12 18:19:58 2018 -0700

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 47 +++---
 .../hadoop/yarn/service/ServiceClientTest.java  |  6 ++
 .../hadoop/yarn/service/TestApiServer.java  | 26 ++
 .../yarn/service/client/ServiceClient.java  | 93 +++-
 .../hadoop/yarn/service/ServiceTestUtils.java   | 15 +++-
 .../yarn/service/TestYarnNativeServices.java| 42 -
 6 files changed, 191 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a375fe82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 59ee05d..14c77f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -231,30 +231,40 @@ public class ApiServer {
   e.getCause().getMessage());
 } catch (YarnException | FileNotFoundException e) {
   return formatResponse(Status.NOT_FOUND, e.getMessage());
-} catch (IOException | InterruptedException e) {
+} catch (Exception e) {
   LOG.error("Fail to stop service: {}", e);
   return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
 }
   }
 
   private Response stopService(String appName, boolean destroy,
-  final UserGroupInformation ugi) throws IOException,
-  InterruptedException, YarnException, FileNotFoundException {
+  final UserGroupInformation ugi) throws Exception {
 int result = ugi.doAs(new PrivilegedExceptionAction() {
   @Override
-  public Integer run() throws IOException, YarnException,
-  FileNotFoundException {
+  public Integer run() throws Exception {
 int result = 0;
 ServiceClient sc = getServiceClient();
 sc.init(YARN_CONFIG);
 sc.start();
-result = sc.actionStop(appName, destroy);
-if (result == EXIT_SUCCESS) {
-  LOG.info("Successfully stopped service {}", appName);
+Exception stopException = null;
+try {
+  result = sc.actionStop(appName, destroy);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully stopped service {}", appName);
+  }
+} catch (Exception e) {
+  LOG.info("Got exception stopping service", e);
+  stopException = e;
 }
 if (destroy) {
   result = sc.actionDestroy(appName);
-  LOG.info("Successfully deleted service {}", appName);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully deleted service {}", appName);
+  }
+} else {
+  if (stopException != null) {
+throw stopException;
+  }
 }
 sc.close();
 return result;
@@ -262,8 +272,21 @@ public class ApiServer {
 });
 ServiceStatus serviceStatus = new ServiceStatus();
 if (destroy) {
-  serviceStatus.setDiagnostics("Successfully destroyed service " +
-  appName);
+  if (result == EXIT_SUCCESS) {
+serviceStatus.setDiagnostics("Successfully destroyed service " +
+appName);
+  } else {
+if (result == EXIT_NOT_FOUND) {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " doesn't exist");
+  return formatResponse(Status.BAD_REQUEST, serviceStatus);
+} else {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " error cleaning up " +
+  "registry");
+  return formatResponse(Statu

hadoop git commit: MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for jobTokenPassword. (Akira Ajisaka via wangda)

2018-04-12 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 150085cc6 -> 035e0f97e


MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for 
jobTokenPassword. (Akira Ajisaka via wangda)

Change-Id: Ie8f01425d58409fa3661f768205b7616128c8aa4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/035e0f97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/035e0f97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/035e0f97

Branch: refs/heads/branch-3.1
Commit: 035e0f97ea44b0495707949a781d8792dcf6ea6b
Parents: 150085c
Author: Wangda Tan 
Authored: Thu Apr 12 14:33:33 2018 -0700
Committer: Wangda Tan 
Committed: Thu Apr 12 14:33:33 2018 -0700

--
 .../apache/hadoop/mapred/pipes/Application.java  |  5 ++---
 .../hadoop/mapred/pipes/TestPipeApplication.java | 19 ---
 2 files changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/035e0f97/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
index 5c8aab9..83d2509 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskLog;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
@@ -104,8 +103,8 @@ class Applicationhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/035e0f97/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 13597e0..88d8f95 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapred.Counters;
@@ -84,10 +83,10 @@ public class TestPipeApplication {
   public void testRunner() throws Exception {
 
 // clean old password files
-JobConf conf = new JobConf();
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
   RecordReader rReader = new 
ReaderPipesMapRunner();
+  JobConf conf = new JobConf();
   conf.set(Submitter.IS_JAVA_RR, "true");
   // for stdour and stderror
 
@@ -163,7 +162,7 @@ public class TestPipeApplication {
 
 TestTaskReporter reporter = new TestTaskReporter();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
 
   conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
@@ -248,7 +247,7 @@ public class TestPipeApplication {
 
 JobConf conf = new JobConf();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 
 System.setProperty("test.build.data",
 "target/tmp/build/TEST_SUBMITTER_MAPPER/data");
@@ -389,8 +388,8 @@ public class TestPipeApplication {
   @Test
   public void testPipesReduser() throws Exception {
 
+File[] psw = cleanTokenPasswordFile();
 JobConf conf = new JobConf();
-

[2/2] hadoop git commit: YARN-8018. Added support for initiating yarn service upgrade. Contributed by Chandni Singh

2018-04-12 Thread eyang
YARN-8018.  Added support for initiating yarn service upgrade.
Contributed by Chandni Singh

(cherry picked from commit 27d60a16342fd39973d43b61008f54a8815a6237)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/150085cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/150085cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/150085cc

Branch: refs/heads/branch-3.1
Commit: 150085cc643d638655066417f0c1f0340e02e787
Parents: 23179c0
Author: Eric Yang 
Authored: Mon Mar 26 18:46:31 2018 -0400
Committer: Eric Yang 
Committed: Thu Apr 12 17:08:27 2018 -0400

--
 .../yarn/service/client/ApiServiceClient.java   |  18 ++
 .../hadoop/yarn/service/webapp/ApiServer.java   |  24 ++
 .../hadoop/yarn/service/ClientAMProtocol.java   |  12 +
 .../hadoop/yarn/service/ClientAMService.java|  24 ++
 .../hadoop/yarn/service/ServiceEvent.java   |  49 
 .../hadoop/yarn/service/ServiceEventType.java   |  28 +++
 .../hadoop/yarn/service/ServiceManager.java | 225 +++
 .../hadoop/yarn/service/ServiceScheduler.java   |  19 ++
 .../yarn/service/UpgradeComponentsFinder.java   | 162 +
 .../service/api/records/ComponentState.java |   2 +-
 .../yarn/service/api/records/ServiceState.java  |   2 +-
 .../yarn/service/client/ServiceClient.java  | 153 ++---
 .../yarn/service/component/Component.java   |  14 ++
 .../yarn/service/component/ComponentEvent.java  |  12 +
 .../service/component/ComponentEventType.java   |   4 +-
 .../yarn/service/component/ComponentState.java  |   3 +-
 .../yarn/service/conf/YarnServiceConstants.java |   2 +
 .../pb/client/ClientAMProtocolPBClientImpl.java |  26 +++
 .../service/ClientAMProtocolPBServiceImpl.java  |  24 ++
 .../yarn/service/utils/CoreFileSystem.java  |  31 ++-
 .../yarn/service/utils/ServiceApiUtil.java  |  28 ++-
 .../src/main/proto/ClientAMProtocol.proto   |  19 +-
 .../hadoop/yarn/service/ServiceTestUtils.java   |  90 +++-
 .../TestDefaultUpgradeComponentsFinder.java |  63 ++
 .../hadoop/yarn/service/TestServiceManager.java | 156 +
 .../yarn/service/TestYarnNativeServices.java|  76 ---
 .../yarn/service/client/TestServiceClient.java  | 125 +++
 .../yarn/service/utils/TestCoreFileSystem.java  |  46 
 .../hadoop/yarn/client/api/AppAdminClient.java  |  16 ++
 29 files changed, 1374 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/150085cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index 49702e3..e4a245d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -469,4 +469,22 @@ public class ApiServiceClient extends AppAdminClient {
 return output;
   }
 
+  @Override
+  public int actionUpgrade(String appName,
+  String fileName) throws IOException, YarnException {
+int result;
+try {
+  Service service =
+  loadAppJsonFromLocalFS(fileName, appName, null, null);
+  service.setState(ServiceState.UPGRADING);
+  String buffer = jsonSerDeser.toJson(service);
+  ClientResponse response = getApiClient()
+  .post(ClientResponse.class, buffer);
+  result = processResponse(response);
+} catch (Exception e) {
+  LOG.error("Failed to upgrade application: ", e);
+  result = EXIT_EXCEPTION_THROWN;
+}
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/150085cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index e7979b8..59ee05d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/h

[1/2] hadoop git commit: YARN-8018. Added support for initiating yarn service upgrade. Contributed by Chandni Singh

2018-04-12 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 23179c06a -> 150085cc6


http://git-wip-us.apache.org/repos/asf/hadoop/blob/150085cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 51a190e..2b40e49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.client.ServiceClient;
-import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.hamcrest.CoreMatchers;
 import org.junit.After;
@@ -86,7 +86,7 @@ public class TestYarnNativeServices extends ServiceTestUtils {
   @Test (timeout = 20)
   public void testCreateFlexStopDestroyService() throws Exception {
 setupInternal(NUM_NMS);
-ServiceClient client = createClient();
+ServiceClient client = createClient(getConf());
 Service exampleApp = createExampleApplication();
 client.actionCreate(exampleApp);
 SliderFileSystem fileSystem = new SliderFileSystem(getConf());
@@ -143,7 +143,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
   @Test (timeout = 20)
   public void testComponentStartOrder() throws Exception {
 setupInternal(NUM_NMS);
-ServiceClient client = createClient();
+ServiceClient client = createClient(getConf());
 Service exampleApp = new Service();
 exampleApp.setName("teststartorder");
 exampleApp.setVersion("v1");
@@ -177,7 +177,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 String userB = "userb";
 
 setupInternal(NUM_NMS);
-ServiceClient client = createClient();
+ServiceClient client = createClient(getConf());
 String origBasePath = getConf().get(YARN_SERVICE_BASE_PATH);
 
 Service userAApp = new Service();
@@ -229,7 +229,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 System.setProperty("user.name", user);
 
 setupInternal(NUM_NMS);
-ServiceClient client = createClient();
+ServiceClient client = createClient(getConf());
 
 Service appA = new Service();
 appA.setName(sameAppName);
@@ -298,7 +298,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 setConf(conf);
 setupInternal(NUM_NMS);
 
-ServiceClient client = createClient();
+ServiceClient client = createClient(getConf());
 Service exampleApp = createExampleApplication();
 client.actionCreate(exampleApp);
 Multimap containersBeforeFailure =
@@ -341,6 +341,28 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 client.actionDestroy(exampleApp.getName());
   }
 
+  @Test(timeout = 20)
+  public void testUpgradeService() throws Exception {
+setupInternal(NUM_NMS);
+ServiceClient client = createClient(getConf());
+
+Service service = createExampleApplication();
+client.actionCreate(service);
+waitForServiceToBeStarted(client, service);
+
+//upgrade the service
+service.setVersion("v2");
+client.actionUpgrade(service);
+
+//wait for service to be in upgrade state
+waitForServiceToBeInState(client, service, ServiceState.UPGRADING);
+SliderFileSystem fs = new SliderFileSystem(getConf());
+Service fromFs = ServiceApiUtil.loadServiceUpgrade(fs,
+service.getName(), service.getVersion());
+Assert.assertEquals(service.getName(), fromFs.getName());
+Assert.assertEquals(service.getVersion(), fromFs.getVersion());
+  }
+
   // Check containers launched are in dependency order
   // Get all containers into a list and sort based on container launch time 
e.g.
   // compa-c1, compa-c2, compb-c1, compb-c2;
@@ -478,16 +500,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
*/
   private void waitForServiceToBeStable(ServiceClient client,
   Service exampleApp) throws TimeoutException, InterruptedException {
-GenericT

hadoop git commit: YARN-8027. Setting hostname of docker container breaks for --net=host in docker 1.13. Contributed by Jim Brennan

2018-04-12 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3717df89e -> d416a0c9b


YARN-8027. Setting hostname of docker container breaks for --net=host in docker 
1.13. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d416a0c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d416a0c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d416a0c9

Branch: refs/heads/branch-3.0
Commit: d416a0c9b79aff44b71e8b0479eb054f95c4d2e6
Parents: 3717df8
Author: Jason Lowe 
Authored: Thu Apr 12 16:04:20 2018 -0500
Committer: Jason Lowe 
Committed: Thu Apr 12 16:04:20 2018 -0500

--
 .../runtime/DockerLinuxContainerRuntime.java|  8 --
 .../runtime/TestDockerContainerRuntime.java | 27 
 2 files changed, 16 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d416a0c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 5e3e15c..06acbf7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -106,7 +106,8 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME} sets the
  * hostname to be used by the Docker container. If not specified, a
- * hostname will be derived from the container ID.
+ * hostname will be derived from the container ID.  This variable is
+ * ignored if the network is 'host'.
  *   
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER}
@@ -584,7 +585,10 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 .detachOnRun()
 .setContainerWorkDir(containerWorkDir.toString())
 .setNetworkType(network);
-setHostname(runCommand, containerIdStr, hostname);
+// Only add hostname if network is not host.
+if (!network.equalsIgnoreCase("host")) {
+  setHostname(runCommand, containerIdStr, hostname);
+}
 runCommand.setCapabilities(capabilities);
 
 if(cgroupsRootDirectory != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d416a0c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index ff0ec1c..a56dbc8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -160,6 +160,8 @@ public class TestDockerContainerRuntime {
   LOG.info("Could not run id -G command: " + e);
 }
 uidGidPair = uid + ":" + gid;
+// Prevent gid threshold failures for these tests
+conf.setInt(YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD, 0);
 
 user = "user";
 appId = "app_id";
@@ -329,7 +331,7 @@ public class TestDockerContainerRuntime {
 List dockerCommands = Files.readAllLines(Paths.get
 (dockerCommandFile), Charset.forName("UTF-8"));
 
-int expected = 14;

hadoop git commit: Preparing for 3.0.3 development

2018-04-12 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 43d2ee9de -> 3717df89e


Preparing for 3.0.3 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3717df89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3717df89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3717df89

Branch: refs/heads/branch-3.0
Commit: 3717df89ee149a5c8f391ba252b4409ae265e257
Parents: 43d2ee9
Author: Lei Xu 
Authored: Thu Apr 12 13:57:46 2018 -0700
Committer: Lei Xu 
Committed: Thu Apr 12 13:57:46 2018 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4

hadoop git commit: YARN-7936. Add default service AM Xmx. Contributed by Jian He

2018-04-12 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9693d8b99 -> 23179c06a


YARN-7936. Add default service AM Xmx. Contributed by Jian He

(cherry picked from commit 53b3e594732e7a567dda4e08b9a8af5f87a4472a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23179c06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23179c06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23179c06

Branch: refs/heads/branch-3.1
Commit: 23179c06a351c9eac764ba826b50b0b95a3ce873
Parents: 9693d8b
Author: Billie Rinaldi 
Authored: Thu Apr 12 11:35:14 2018 -0700
Committer: Billie Rinaldi 
Committed: Thu Apr 12 12:43:30 2018 -0700

--
 .../apache/hadoop/yarn/service/client/ServiceClient.java| 9 +++--
 .../apache/hadoop/yarn/service/conf/YarnServiceConf.java| 2 ++
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23179c06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 5731e11..d714800 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -667,8 +667,13 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException {
 JavaCommandLineBuilder CLI = new JavaCommandLineBuilder();
 CLI.forceIPv4().headless();
-CLI.setJVMOpts(YarnServiceConf.get(YarnServiceConf.JVM_OPTS, null,
-app.getConfiguration(), conf));
+String jvmOpts = YarnServiceConf
+.get(YarnServiceConf.JVM_OPTS, "", app.getConfiguration(), conf);
+if (!jvmOpts.contains("-Xmx")) {
+  jvmOpts += DEFAULT_AM_JVM_XMX;
+}
+
+CLI.setJVMOpts(jvmOpts);
 if (hasSliderAMLog4j) {
   CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME);
   CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23179c06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 21470d4..1506532 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -36,6 +36,8 @@ public class YarnServiceConf {
   public static final String AM_RESOURCE_MEM = 
"yarn.service.am-resource.memory";
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
 
+  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
+
   public static final String YARN_QUEUE = "yarn.service.queue";
 
   public static final String API_SERVER_ADDRESS = 
"yarn.service.api-server.address";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7936. Add default service AM Xmx. Contributed by Jian He

2018-04-12 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk 18844599a -> 53b3e5947


YARN-7936. Add default service AM Xmx. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53b3e594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53b3e594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53b3e594

Branch: refs/heads/trunk
Commit: 53b3e594732e7a567dda4e08b9a8af5f87a4472a
Parents: 1884459
Author: Billie Rinaldi 
Authored: Thu Apr 12 11:35:14 2018 -0700
Committer: Billie Rinaldi 
Committed: Thu Apr 12 12:38:00 2018 -0700

--
 .../apache/hadoop/yarn/service/client/ServiceClient.java| 9 +++--
 .../apache/hadoop/yarn/service/conf/YarnServiceConf.java| 2 ++
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b3e594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 4c7b72d..21fb075 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -758,8 +758,13 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException {
 JavaCommandLineBuilder CLI = new JavaCommandLineBuilder();
 CLI.forceIPv4().headless();
-CLI.setJVMOpts(YarnServiceConf.get(YarnServiceConf.JVM_OPTS, null,
-app.getConfiguration(), conf));
+String jvmOpts = YarnServiceConf
+.get(YarnServiceConf.JVM_OPTS, "", app.getConfiguration(), conf);
+if (!jvmOpts.contains("-Xmx")) {
+  jvmOpts += DEFAULT_AM_JVM_XMX;
+}
+
+CLI.setJVMOpts(jvmOpts);
 if (hasSliderAMLog4j) {
   CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME);
   CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b3e594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 14c4d15..3dd5a7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -38,6 +38,8 @@ public class YarnServiceConf {
   public static final String AM_RESOURCE_MEM = 
"yarn.service.am-resource.memory";
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
 
+  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
+
   public static final String YARN_QUEUE = "yarn.service.queue";
 
   public static final String API_SERVER_ADDRESS = 
"yarn.service.api-server.address";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13413. Ozone: ClusterId and DatanodeUuid should be marked mandatory fields in SCMRegisteredCmdResponseProto. Contributed by Shashikant Banerjee.

2018-04-12 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 dd43835b3 -> c36a850af


HDFS-13413. Ozone: ClusterId and DatanodeUuid should be marked mandatory fields 
in SCMRegisteredCmdResponseProto. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36a850a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36a850a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36a850a

Branch: refs/heads/HDFS-7240
Commit: c36a850af5f554f210010e7fb8039953de283746
Parents: dd43835
Author: Nanda kumar 
Authored: Fri Apr 13 00:47:38 2018 +0530
Committer: Nanda kumar 
Committed: Fri Apr 13 00:47:38 2018 +0530

--
 .../common/states/endpoint/RegisterEndpointTask.java | 8 
 .../hadoop/ozone/protocol/commands/RegisteredCommand.java| 6 ++
 .../src/main/proto/StorageContainerDatanodeProtocol.proto| 4 ++--
 3 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36a850a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index de186a7..ca3bef0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -17,6 +17,8 @@
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -28,6 +30,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
@@ -98,6 +101,11 @@ public final class RegisterEndpointTask implements
   SCMRegisteredCmdResponseProto response = rpcEndPoint.getEndPoint()
   .register(datanodeDetails.getProtoBufMessage(),
   conf.getStrings(ScmConfigKeys.OZONE_SCM_NAMES));
+  Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())
+  .equals(datanodeDetails.getUuid()),
+  "Unexpected datanode ID in the response.");
+  Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()),
+  "Invalid cluster ID in the response.");
   if (response.hasHostname() && response.hasIpAddress()) {
 datanodeDetails.setHostName(response.getHostname());
 datanodeDetails.setIpAddress(response.getIpAddress());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36a850a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
index a7e81d8..593b84b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
@@ -162,13 +162,11 @@ public class RegisteredCommand extends
   Preconditions.checkNotNull(response);
   if (response.hasHostname() && response.hasIpAddress()) {
 return new RegisteredCommand(response.getErrorCode(),
-response.hasDatanodeUUID() ? response.getDatanodeUUID() : "",
-response.hasClusterID() ? response.getClusterID() : "",
+response.getDatanodeUUID(), response.getClusterID(),
 response.getHostname(), response.getIpAddress());
   } else {
 return new RegisteredCommand(response.getErrorCode(),
-response.hasDatanodeUUID() ? response.getDatanodeUUID() : "",
-response.hasClusterID() ? response.getClusterID() : "");
+response.getDatanodeUUID(), response.getClusterID());
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36a850a/hadoop-hdds/container-service/src/main/proto/Stora

hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c01b425ba -> 1548205a8


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1548205a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1548205a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1548205a

Branch: refs/heads/branch-2.8
Commit: 1548205a8450680eae6645aff88b4a8cd83228dd
Parents: c01b425
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 18:37:54 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1548205a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 37b0de4..6d32d5c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -723,7 +723,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -766,8 +766,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -782,19 +786,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 0dd6559a5 -> 48023bda1


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48023bda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48023bda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48023bda

Branch: refs/heads/branch-2.9
Commit: 48023bda10f8fd2e2c48c7c7d0d3a08b664e1942
Parents: 0dd6559
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 18:27:01 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48023bda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 6946f3c..091bcd9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -757,7 +757,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -806,8 +806,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -822,19 +826,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0dfb18da -> 7393020cb


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7393020c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7393020c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7393020c

Branch: refs/heads/branch-2
Commit: 7393020cb35031a406583015591ceedeb6c97bd4
Parents: b0dfb18
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 18:19:12 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7393020c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 6946f3c..091bcd9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -757,7 +757,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -806,8 +806,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -822,19 +826,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 081ea1ec3 -> 43d2ee9de


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43d2ee9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43d2ee9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43d2ee9d

Branch: refs/heads/branch-3.0
Commit: 43d2ee9de6e8b8edbfbb080ac7de78e5841cebfc
Parents: 081ea1e
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 18:10:42 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43d2ee9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 1348b27..68aec27 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1064,7 +1064,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -1113,8 +1113,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -1129,19 +1133,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f955d52f3 -> 9693d8b99


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe

(cherry picked from commit 18844599aef42f79d2af4500aa2eee472dda95cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9693d8b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9693d8b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9693d8b9

Branch: refs/heads/branch-3.1
Commit: 9693d8b99d7a882e7647f1174fd2728d6a201b9b
Parents: f955d52
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 17:58:41 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9693d8b9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 1c50dd3..d66a866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1064,7 +1064,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -1113,8 +1113,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -1129,19 +1133,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/trunk 044341b4e -> 18844599a


YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18844599
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18844599
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18844599

Branch: refs/heads/trunk
Commit: 18844599aef42f79d2af4500aa2eee472dda95cb
Parents: 044341b
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 17:53:57 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18844599/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 1c50dd3..d66a866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1064,7 +1064,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -1113,8 +1113,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -1129,19 +1133,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8048c8fd6 -> c01b425ba


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c01b425b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c01b425b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c01b425b

Branch: refs/heads/branch-2.8
Commit: c01b425ba312d7dc44e59b656391cb2b40b5b979
Parents: 8048c8f
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 17:02:34 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 10 --
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 11 ---
 .../recovery/NMLeveldbStateStoreService.java| 11 ---
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01b425b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index 16366b1..b951525 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -75,7 +75,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -367,13 +366,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01b425b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index c81f25b..235313c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -95,7 +95,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -613,7 +612,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -759,15 +757,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LevelDBLogger.class);
-
-@Override
-public void log(String messa

hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 79962d946 -> 0dd6559a5


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dd6559a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dd6559a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dd6559a

Branch: refs/heads/branch-2.9
Commit: 0dd6559a58c7df5af75deda2587e1d41c2356d5b
Parents: 79962d9
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:57:33 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 10 --
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 11 ---
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd6559a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index 16366b1..b951525 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -75,7 +75,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -367,13 +366,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd6559a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 47eb8b1..0bab750 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -95,7 +95,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,15 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LevelDBLogger.class);
-
-@Override
-public void log(String mess

hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 20472bdfd -> b0dfb18da


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0dfb18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0dfb18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0dfb18d

Branch: refs/heads/branch-2
Commit: b0dfb18daa8b983aa53b136a4989c9668b25e88d
Parents: 20472bd
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:38:50 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 10 --
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 11 ---
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index 16366b1..b951525 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -75,7 +75,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -367,13 +366,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0dfb18d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 47eb8b1..0bab750 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -95,7 +95,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,15 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final Log LOG = LogFactory.getLog(LevelDBLogger.class);
-
-@Override
-public void log(String message)

hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 94daa4f3b -> 081ea1ec3


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/081ea1ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/081ea1ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/081ea1ec

Branch: refs/heads/branch-3.0
Commit: 081ea1ec396ed115236abdc9d8635eff51165d18
Parents: 94daa4f
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:22:48 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 11 ---
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 12 
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/081ea1ec/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index c8741aa..6d2e407 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -74,7 +74,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -366,14 +365,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
-private static final Logger LOG =
-LoggerFactory.getLogger(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/081ea1ec/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index ec992fe..aeda9cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -93,7 +93,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,16 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final org.slf4j.Logger LOG =
-LoggerFactory.getLogger(LevelD

hadoop git commit: HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed by Dibyendu Karmakar.

2018-04-12 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk b5353c75d -> 044341b4e


HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed 
by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/044341b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/044341b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/044341b4

Branch: refs/heads/trunk
Commit: 044341b4e4459b23159e94c7ff0601058398fd70
Parents: b5353c7
Author: Inigo Goiri 
Authored: Thu Apr 12 09:30:11 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 09:30:11 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 19 +-
 .../federation/router/TestRouterMountTable.java | 62 +++-
 2 files changed, 79 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/044341b4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index e6d2f5e..0dc2a69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2328,7 +2328,24 @@ public class RouterRpcServer extends AbstractService
*/
   private Map getMountPointDates(String path) {
 Map ret = new TreeMap<>();
-// TODO add when we have a Mount Table
+if (subclusterResolver instanceof MountTableResolver) {
+  MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
+  String srcPath;
+  try {
+final List children = subclusterResolver.getMountPoints(path);
+for (String child : children) {
+  if (path.equals(Path.SEPARATOR)) {
+srcPath = Path.SEPARATOR + child;
+  } else {
+srcPath = path + Path.SEPARATOR + child;
+  }
+  MountTable entry = mountTable.getMountPoint(srcPath);
+  ret.put(child, entry.getDateModified());
+}
+  } catch (IOException e) {
+LOG.error("Cannot get mount point: {}", e.getMessage());
+  }
+}
 return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/044341b4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index c9e28b1..b33b998 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -17,25 +17,33 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.fede

hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-12 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4571351cc -> b5353c75d


Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit ac32b3576da4cc463dff85118163ccfff02215fc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5353c75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5353c75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5353c75

Branch: refs/heads/trunk
Commit: b5353c75d90b9299f04dba255b9e9af5a8cc19eb
Parents: 4571351
Author: Inigo Goiri 
Authored: Thu Apr 12 09:28:23 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 09:28:23 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5353c75/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 1c38791..7b9cd64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,9 +79,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map, ProxyInfo> proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5353c75/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 4b3fdf9..8d6b02d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,13 +43,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -103,37 +100,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {


-

hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 671e407ce -> f955d52f3


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.

(cherry picked from commit 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f955d52f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f955d52f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f955d52f

Branch: refs/heads/branch-3.1
Commit: f955d52f39c1ecb4aa53cc4f4d18652b4914d8ee
Parents: 671e407
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:11:57 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 11 ---
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 12 
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f955d52f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index c8741aa..6d2e407 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -74,7 +74,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -366,14 +365,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
-private static final Logger LOG =
-LoggerFactory.getLogger(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f955d52f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index ec992fe..aeda9cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -93,7 +93,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,16 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final org.slf4j.Logger LOG =
-LoggerFactory.getLogger(LevelD

hadoop git commit: MAPREDUCE-7069. Add ability to specify user environment variables individually. Contributed by Jim Brennan

2018-04-12 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6bb128dfb -> 4571351cc


MAPREDUCE-7069. Add ability to specify user environment variables individually. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4571351c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4571351c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4571351c

Branch: refs/heads/trunk
Commit: 4571351cccf6d4977469d3d623cf045b06a5f5f0
Parents: 6bb128d
Author: Jason Lowe 
Authored: Thu Apr 12 11:04:22 2018 -0500
Committer: Jason Lowe 
Committed: Thu Apr 12 11:12:46 2018 -0500

--
 .../apache/hadoop/mapred/MapReduceChildJVM.java |  73 +-
 .../v2/app/job/impl/TaskAttemptImpl.java|   8 +-
 .../v2/app/job/impl/TestMapReduceChildJVM.java  |  24 +++-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  10 ++
 .../java/org/apache/hadoop/mapred/JobConf.java  |  18 +++
 .../src/main/resources/mapred-default.xml   |  61 +++--
 .../src/site/markdown/MapReduceTutorial.md  |   6 +
 .../org/apache/hadoop/mapred/YARNRunner.java|  11 +-
 .../apache/hadoop/mapred/TestYARNRunner.java|  26 +++-
 .../java/org/apache/hadoop/yarn/util/Apps.java  | 115 +---
 .../org/apache/hadoop/yarn/util/TestApps.java   | 136 +++
 11 files changed, 407 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4571351c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 936dc5a..d305f9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.net.InetSocketAddress;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Vector;
@@ -28,7 +27,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -42,50 +40,53 @@ public class MapReduceChildJVM {
 filter.toString();
   }
 
-  private static String getChildEnv(JobConf jobConf, boolean isMap) {
+  private static String getChildEnvProp(JobConf jobConf, boolean isMap) {
 if (isMap) {
-  return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
-  jobConf.get(JobConf.MAPRED_TASK_ENV));
+  return JobConf.MAPRED_MAP_TASK_ENV;
 }
-return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
-jobConf.get(JobConf.MAPRED_TASK_ENV));
+return JobConf.MAPRED_REDUCE_TASK_ENV;
+  }
+
+  private static String getChildEnvDefaultValue(JobConf jobConf) {
+// There is no default value for these - use the fallback value instead.
+return jobConf.get(JobConf.MAPRED_TASK_ENV);
   }
 
   public static void setVMEnv(Map environment,
   Task task) {
 
 JobConf conf = task.conf;
-// Add the env variables passed by the user
-String mapredChildEnv = getChildEnv(conf, task.isMapTask());
-MRApps.setEnvFromInputString(environment, mapredChildEnv, conf);
-
-// Set logging level in the environment.
-// This is so that, if the child forks another "bin/hadoop" (common in
-// streaming) it will have the correct loglevel.
-environment.put(
-"HADOOP_ROOT_LOGGER", 
-MRApps.getChildLogLevel(conf, task.isMapTask()) + ",console");
-
-// TODO: The following is useful for instance in streaming tasks. Should be
-// set in ApplicationMaster's env by the RM.
-String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
-if (hadoopClientOpts == null) {
-  hadoopClientOpts = "";
-} else {
-  hadoopClientOpts = hadoopClientOpts + " ";
+boolean isMap = task.isMapTask();
+
+// Remove these before adding the user variables to prevent
+// MRApps.setEnvFromInputProperty() from appending to them.
+String hadoopRootLoggerKey = "HADOOP_ROOT_LOGGER";
+String hadoopClie

hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-12 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/trunk d272056fc -> 6bb128dfb


YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bb128df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bb128df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bb128df

Branch: refs/heads/trunk
Commit: 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7
Parents: d272056
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:04:23 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 11 ---
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 12 
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bb128df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index c8741aa..6d2e407 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -74,7 +74,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -366,14 +365,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
-private static final Logger LOG =
-LoggerFactory.getLogger(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bb128df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index ec992fe..aeda9cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -93,7 +93,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,16 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final org.slf4j.Logger LOG =
-LoggerFactory.getLogger(LevelDBLogger.class);
-
-@Override
-public void log(String message) {
-  L

hadoop git commit: HDFS-13414. Ozone: Update existing Ozone documentation according to the recent changes. Contributed by Elek, Marton.

2018-04-12 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 40398d357 -> dd43835b3


HDFS-13414. Ozone: Update existing Ozone documentation according to the recent 
changes. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd43835b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd43835b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd43835b

Branch: refs/heads/HDFS-7240
Commit: dd43835b3644aab7266718213e6323f38b8ea1bb
Parents: 40398d3
Author: Mukul Kumar Singh 
Authored: Thu Apr 12 21:21:44 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Apr 12 21:21:44 2018 +0530

--
 .../src/main/site/markdown/OzoneCommandShell.md | 38 ++---
 .../site/markdown/OzoneGettingStarted.md.vm | 59 ++--
 .../src/main/site/markdown/OzoneRest.md | 32 +--
 3 files changed, 78 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd43835b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md 
b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
index a274a22..fc63742 100644
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
+++ b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
@@ -25,10 +25,10 @@ The Ozone commands take the following format.
  -root`
 
 The *port* specified in command should match the port mentioned in the config
-property `dfs.datanode.http.address`. This property can be set in 
`hdfs-site.xml`.
-The default value for the port is `9864` and is used in below commands.
+property `hdds.rest.http-address`. This property can be set in 
`ozone-site.xml`.
+The default value for the port is `9880` and is used in below commands.
 
-The *--root* option is a command line short cut that allows *ozone oz*
+The *-root* option is a command line short cut that allows *ozone oz*
 commands to be run as the user that started the cluster. This is useful to
 indicate that you want the commands to be run as some admin user. The only
 reason for this option is that it makes the life of a lazy developer more
@@ -44,37 +44,37 @@ ozone cluster.
 
 Volumes can be created only by Admins. Here is an example of creating a volume.
 
-* `ozone oz -createVolume http://localhost:9864/hive -user bilbo -quota
+* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota
 100TB -root`
 
 The above command creates a volume called `hive` owned by user `bilbo`. The
-`--root` option allows the command to be executed as user `hdfs` which is an
+`-root` option allows the command to be executed as user `hdfs` which is an
 admin in the cluster.
 
 ### Update Volume
 
 Updates information like ownership and quota on an existing volume.
 
-* `ozone oz  -updateVolume  http://localhost:9864/hive -quota 500TB -root`
+* `ozone oz  -updateVolume  http://localhost:9880/hive -quota 500TB -root`
 
 The above command changes the volume quota of hive from 100TB to 500TB.
 
 ### Delete Volume
 Deletes a Volume if it is empty.
 
-* `ozone oz -deleteVolume http://localhost:9864/hive -root`
+* `ozone oz -deleteVolume http://localhost:9880/hive -root`
 
 
 ### Info Volume
 Info volume command allows the owner or the administrator of the cluster to 
read meta-data about a specific volume.
 
-* `ozone oz -infoVolume http://localhost:9864/hive -root`
+* `ozone oz -infoVolume http://localhost:9880/hive -root`
 
 ### List Volumes
 
 List volume command can be used by administrator to list volumes of any user. 
It can also be used by a user to list volumes owned by him.
 
-* `ozone oz -listVolume http://localhost:9864/ -user bilbo -root`
+* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root`
 
 The above command lists all volumes owned by user bilbo.
 
@@ -89,7 +89,7 @@ Following examples assume that these commands are run by the 
owner of the volume
 
 Create bucket call allows the owner of a volume to create a bucket.
 
-* `ozone oz -createBucket http://localhost:9864/hive/january`
+* `ozone oz -createBucket http://localhost:9880/hive/january`
 
 This call creates a bucket called `january` in the volume called `hive`. If
 the volume does not exist, then this call will fail.
@@ -98,23 +98,23 @@ the volume does not exist, then this call will fail.
 ### Update Bucket
 Updates bucket meta-data, like ACLs.
 
-* `ozone oz -updateBucket http://localhost:9864/hive/january  -addAcl
+* `ozone oz -updateBucket http://localhost:9880/hive/january  -addAcl
 user:spark:rw`
 
 ### Delete Bucket
 Deletes a bucket if it is empty.
 
-* `ozone oz -deleteBucket http://localhost:9864/hive/january`
+* `o

hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-12 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f055a53b4 -> 20472bdfd


Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit 7e692425d538454abf69b07f6e8fd686a1171ac8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20472bdf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20472bdf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20472bdf

Branch: refs/heads/branch-2
Commit: 20472bdfd2e8f084e106a8c732498e1601018d0c
Parents: f055a53
Author: Inigo Goiri 
Authored: Thu Apr 12 08:44:17 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 08:44:17 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20472bdf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index f34adce..010e9e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -78,9 +78,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map, ProxyInfo> proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20472bdf/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index a8a5c6e..65fbbf8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,13 +42,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -102,37 +99,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {



hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-12 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5f4762d95 -> 671e407ce


Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit 8c9dab978e2bdcbd6558c9e12b177c5bdca04393.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/671e407c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/671e407c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/671e407c

Branch: refs/heads/branch-3.1
Commit: 671e407ced6821b4acf070959b8d0cddce348d3a
Parents: 5f4762d
Author: Inigo Goiri 
Authored: Thu Apr 12 08:43:12 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 08:43:12 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671e407c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 1c38791..7b9cd64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,9 +79,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map, ProxyInfo> proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/671e407c/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 4b3fdf9..8d6b02d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,13 +43,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -103,37 +100,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {


---

hadoop git commit: Revert "HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed by Dibyendu Karmakar."

2018-04-12 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ed8511ad -> d272056fc


Revert "HDFS-13386. RBF: Wrong date information in list file(-ls) result. 
Contributed by Dibyendu Karmakar."

This reverts commit 18de6f2042b70f9f0d7a2620c60de022768a7b13.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d272056f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d272056f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d272056f

Branch: refs/heads/trunk
Commit: d272056fcb23f6a9252b19d349acd718d7837079
Parents: 7ed8511
Author: Inigo Goiri 
Authored: Thu Apr 12 08:42:19 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 08:42:19 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 19 +-
 .../federation/router/TestRouterMountTable.java | 62 +---
 2 files changed, 2 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d272056f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 0dc2a69..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2328,24 +2328,7 @@ public class RouterRpcServer extends AbstractService
*/
   private Map getMountPointDates(String path) {
 Map ret = new TreeMap<>();
-if (subclusterResolver instanceof MountTableResolver) {
-  MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
-  String srcPath;
-  try {
-final List children = subclusterResolver.getMountPoints(path);
-for (String child : children) {
-  if (path.equals(Path.SEPARATOR)) {
-srcPath = Path.SEPARATOR + child;
-  } else {
-srcPath = path + Path.SEPARATOR + child;
-  }
-  MountTable entry = mountTable.getMountPoint(srcPath);
-  ret.put(child, entry.getDateModified());
-}
-  } catch (IOException e) {
-LOG.error("Cannot get mount point: {}", e.getMessage());
-  }
-}
+// TODO add when we have a Mount Table
 return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d272056f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index b33b998..c9e28b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -17,33 +17,25 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protoco

hadoop git commit: HDFS-13426. Fix javadoc in FsDatasetAsyncDiskService#removeVolume. Contributed by Shashikant Banerjee.

2018-04-12 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/trunk b8597858b -> 7ed8511ad


HDFS-13426. Fix javadoc in FsDatasetAsyncDiskService#removeVolume. Contributed 
by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ed8511a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ed8511a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ed8511a

Branch: refs/heads/trunk
Commit: 7ed8511ad8daff19f765e78e4dca07cdebc2c2b2
Parents: b859785
Author: Mukul Kumar Singh 
Authored: Thu Apr 12 20:12:31 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Apr 12 20:12:31 2018 +0530

--
 .../server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ed8511a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index 9174cb0..4929b5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -137,7 +138,7 @@ class FsDatasetAsyncDiskService {
 
   /**
* Stops AsyncDiskService for a volume.
-   * @param volume the root of the volume.
+   * @param storageId id of {@link StorageDirectory}.
*/
   synchronized void removeVolume(String storageId) {
 if (executors == null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7931. [atsv2 read acls] Include domain table creation as part of schema creator. (Vrushali C via Haibo Chen)

2018-04-12 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 113af12cf -> b8597858b


YARN-7931. [atsv2 read acls] Include domain table creation as part of schema 
creator. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8597858
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8597858
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8597858

Branch: refs/heads/trunk
Commit: b8597858b17e40a99611e3a384cdd241293af83f
Parents: 113af12
Author: Haibo Chen 
Authored: Thu Apr 12 06:38:30 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 12 06:38:30 2018 -0700

--
 .../storage/TimelineSchemaCreator.java  |  10 ++
 .../storage/domain/DomainTableRW.java   |  92 ++
 .../storage/domain/package-info.java|  28 +++
 .../storage/domain/DomainColumn.java| 111 
 .../storage/domain/DomainColumnFamily.java  |  52 ++
 .../storage/domain/DomainRowKey.java| 179 +++
 .../storage/domain/DomainTable.java |  45 +
 .../storage/domain/package-info.java|  28 +++
 .../storage/common/TestRowKeys.java |  32 
 9 files changed, 577 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8597858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index e9e4770..37ed50c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableR
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -357,6 +358,15 @@ public final class TimelineSchemaCreator {
   throw e;
 }
   }
+  try {
+new DomainTableRW().createTable(admin, hbaseConf);
+  } catch (IOException e) {
+if (skipExisting) {
+  LOG.warn("Skip and continue on: " + e.getMessage());
+} else {
+  throw e;
+}
+  }
 } finally {
   if (conn != null) {
 conn.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8597858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
new file mode 100644
index 000..1d58e40
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under 

hadoop git commit: HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading mechanism. Contributed by Genmao Yu.

2018-04-12 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 41e0999b3 -> 79962d946


HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading 
mechanism. Contributed by Genmao Yu.

(cherry picked from commit a7de3cfa712087b3a8476f9ad83c3b1118fa5394)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79962d94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79962d94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79962d94

Branch: refs/heads/branch-2.9
Commit: 79962d946eb4090b1df543086d8a379ac270aa22
Parents: 41e0999
Author: Sammi Chen 
Authored: Tue Apr 10 16:45:53 2018 +0800
Committer: Sammi Chen 
Committed: Thu Apr 12 19:03:43 2018 +0800

--
 .../aliyun/oss/AliyunCredentialsProvider.java   |   3 +-
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 213 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  28 ++-
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 167 ---
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 111 --
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 117 +++---
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  23 +-
 .../oss/TestAliyunOSSBlockOutputStream.java | 115 ++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  10 +-
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 
 .../contract/TestAliyunOSSContractDistCp.java   |   2 +-
 11 files changed, 547 insertions(+), 333 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79962d94/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
index b46c67a..58c14a9 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
@@ -35,8 +35,7 @@ import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
 public class AliyunCredentialsProvider implements CredentialsProvider {
   private Credentials credentials = null;
 
-  public AliyunCredentialsProvider(Configuration conf)
-  throws IOException {
+  public AliyunCredentialsProvider(Configuration conf) throws IOException {
 String accessKeyId;
 String accessKeySecret;
 String securityToken;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79962d94/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
new file mode 100644
index 000..2d9a13b
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import com.aliyun.oss.model.PartETag;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionExce

hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 ef42c2fda -> 41e0999b3


HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho 
and Ted Yu.

(cherry picked from commit 113af12cfb240ea9a7189bb2701693466eb8e993)
(cherry picked from commit f055a53b43322a8d80a4aa362ec2a71d446334e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41e0999b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41e0999b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41e0999b

Branch: refs/heads/branch-2.9
Commit: 41e0999b3f967ac17e0e5fe692c0bcb7e03d8cab
Parents: ef42c2f
Author: Akira Ajisaka 
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:49:07 2018 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41e0999b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 199f459..0c69b6d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -350,7 +350,7 @@ public class DFSck extends Configured implements Tool {
 BufferedReader input = new BufferedReader(new InputStreamReader(
   stream, "UTF-8"));
 String line = null;
-String lastLine = null;
+String lastLine = NamenodeFsck.CORRUPT_STATUS;
 int errCode = -1;
 try {
   while ((line = input.readLine()) != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a48deb155 -> f055a53b4


HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho 
and Ted Yu.

(cherry picked from commit 113af12cfb240ea9a7189bb2701693466eb8e993)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f055a53b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f055a53b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f055a53b

Branch: refs/heads/branch-2
Commit: f055a53b43322a8d80a4aa362ec2a71d446334e7
Parents: a48deb1
Author: Akira Ajisaka 
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:48:45 2018 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f055a53b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 199f459..0c69b6d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -350,7 +350,7 @@ public class DFSck extends Configured implements Tool {
 BufferedReader input = new BufferedReader(new InputStreamReader(
   stream, "UTF-8"));
 String line = null;
-String lastLine = null;
+String lastLine = NamenodeFsck.CORRUPT_STATUS;
 int errCode = -1;
 try {
   while ((line = input.readLine()) != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 832852ce4 -> 113af12cf


HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho 
and Ted Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/113af12c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/113af12c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/113af12c

Branch: refs/heads/trunk
Commit: 113af12cfb240ea9a7189bb2701693466eb8e993
Parents: 832852c
Author: Akira Ajisaka 
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:47:37 2018 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/113af12c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 96fca24..10b0012 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -354,7 +354,7 @@ public class DFSck extends Configured implements Tool {
 BufferedReader input = new BufferedReader(new InputStreamReader(
   stream, "UTF-8"));
 String line = null;
-String lastLine = null;
+String lastLine = NamenodeFsck.CORRUPT_STATUS;
 int errCode = -1;
 try {
   while ((line = input.readLine()) != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 2d82a21b8 -> 5f4762d95


HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho 
and Ted Yu.

(cherry picked from commit 113af12cfb240ea9a7189bb2701693466eb8e993)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4762d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4762d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4762d9

Branch: refs/heads/branch-3.1
Commit: 5f4762d9505f31e2d7cb6ce702b97f779308f53f
Parents: 2d82a21
Author: Akira Ajisaka 
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:48:14 2018 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4762d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 96fca24..10b0012 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -354,7 +354,7 @@ public class DFSck extends Configured implements Tool {
 BufferedReader input = new BufferedReader(new InputStreamReader(
   stream, "UTF-8"));
 String line = null;
-String lastLine = null;
+String lastLine = NamenodeFsck.CORRUPT_STATUS;
 int errCode = -1;
 try {
   while ((line = input.readLine()) != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15350. [JDK10] Update maven plugin tools to fix compile error in hadoop-maven-plugins module

2018-04-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7cd362af -> 832852ce4


HADOOP-15350. [JDK10] Update maven plugin tools to fix compile error in 
hadoop-maven-plugins module

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/832852ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/832852ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/832852ce

Branch: refs/heads/trunk
Commit: 832852ce4ff00d4aa698e89e1df39e5bf0df78b9
Parents: c7cd362
Author: Takanobu Asanuma 
Authored: Thu Apr 12 17:19:35 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:19:35 2018 +0900

--
 hadoop-maven-plugins/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/832852ce/hadoop-maven-plugins/pom.xml
--
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index bd347d6..b31d158 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -27,7 +27,7 @@
   Apache Hadoop Maven Plugins
   
 3.0
-3.4
+3.5.1
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13425. Ozone: Clean-up of ozone related change from hadoop-common-project. Contributed by Lokesh Jain.

2018-04-12 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ea85801ce -> 40398d357


HDFS-13425. Ozone: Clean-up of ozone related change from hadoop-common-project. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40398d35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40398d35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40398d35

Branch: refs/heads/HDFS-7240
Commit: 40398d357b97ce26d0b347ad7d78df3188eab44a
Parents: ea85801
Author: Mukul Kumar Singh 
Authored: Thu Apr 12 13:46:52 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Apr 12 13:46:52 2018 +0530

--
 .../java/org/apache/hadoop/fs/FileUtil.java |  67 +--
 .../main/java/org/apache/hadoop/ipc/RPC.java|   1 +
 .../main/java/org/apache/hadoop/util/Time.java  |   9 --
 .../hadoop/util/concurrent/HadoopExecutors.java |  10 --
 .../org/apache/hadoop/hdds/scm/TestArchive.java | 114 ---
 .../replication/ContainerSupervisor.java|  11 +-
 6 files changed, 12 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 0e349d3..8743be5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -38,7 +38,6 @@ import java.nio.file.FileSystems;
 import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Enumeration;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
@@ -48,19 +47,14 @@ import java.util.concurrent.Future;
 import java.util.jar.Attributes;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
-import java.util.zip.CRC32;
-import java.util.zip.CheckedOutputStream;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
 import java.util.zip.ZipInputStream;
-import java.util.zip.ZipOutputStream;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -75,7 +69,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A collection of file-processing util methods.
+ * A collection of file-processing util methods
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -613,65 +607,6 @@ public class FileUtil {
   }
 
   /**
-   * creates zip archieve of the source dir and writes a zip file.
-   *
-   * @param sourceDir - The directory to zip.
-   * @param archiveName - The destination file, the parent directory is assumed
-   * to exist.
-   * @return Checksum of the Archive.
-   * @throws IOException - Throws if zipFileName already exists or if the
-   * sourceDir does not exist.
-   */
-  public static Long zip(File sourceDir, File archiveName) throws IOException {
-Preconditions.checkNotNull(sourceDir, "source directory cannot be null");
-Preconditions.checkState(sourceDir.exists(), "source directory must " +
-"exist");
-
-Preconditions.checkNotNull(archiveName, "Destination file cannot be null");
-Preconditions.checkNotNull(archiveName.getParent(), "Destination " +
-"directory cannot be null");
-Preconditions.checkState(new File(archiveName.getParent()).exists(),
-"Destination directory must exist");
-Preconditions.checkState(!archiveName.exists(), "Destination file " +
-"already exists. Refusing to overwrite existing file.");
-
-CheckedOutputStream checksum;
-try (FileOutputStream outputStream =
- new FileOutputStream(archiveName)) {
-  checksum = new CheckedOutputStream(outputStream, new CRC32());
-  byte[] data = new byte[BUFFER_SIZE];
-  try (ZipOutputStream out =
-   new ZipOutputStream(new BufferedOutputStream(checksum))) {
-
-Iterator fileIter = FileUtils.iterateFiles(sourceDir, null, 
true);
-while (fileIter.hasNext()) {
-  File file = fileIter.next();
-  LOG.debug("Compressing file : " + file.getPath());
-