[hadoop] branch trunk updated: HDDS-918. Expose SCMMXBean as a MetricsSource. Contributed by Siddharth Wagle.

2019-03-22 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 43e421a  HDDS-918. Expose SCMMXBean as a MetricsSource. Contributed by 
Siddharth Wagle.
43e421a is described below

commit 43e421afef48ef8130184882607d9eea36eb1367
Author: Bharat Viswanadham 
AuthorDate: Fri Mar 22 16:39:39 2019 -0700

HDDS-918. Expose SCMMXBean as a MetricsSource. Contributed by Siddharth 
Wagle.
---
 .../hdds/scm/container/ContainerManager.java   |  8 ++
 .../hdds/scm/container/ContainerStateManager.java  | 60 +--
 .../hdds/scm/container/SCMContainerManager.java| 10 +++
 .../hdds/scm/server/SCMContainerMetrics.java   | 87 ++
 .../hdds/scm/server/StorageContainerManager.java   | 18 -
 .../hdds/scm/server/TestSCMContainerMetrics.java   | 81 
 6 files changed, 237 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 717d58d..02b1353 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -57,6 +57,14 @@ public interface ContainerManager extends Closeable {
   List getContainers(HddsProtos.LifeCycleState state);
 
   /**
+   * Returns number of containers in the given,
+   *  {@link org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState}.
+   *
+   * @return Number of containers
+   */
+  Integer getContainerCountByState(HddsProtos.LifeCycleState state);
+
+  /**
* Returns the ContainerInfo from the container ID.
*
* @param containerID - ID of container.
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 423ac78..4af8678 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -17,41 +17,41 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+.FAILED_TO_CHANGE_CONTAINER_STATE;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerState;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.ozone.common.statemachine
 .InvalidStateTransitionException;
 import org.apache.hadoop.ozone.common.statemachine.StateMachine;
 import org.apache.hadoop.util.Time;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-.FAILED_TO_CHANGE_CONTAINER_STATE;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AtomicLongMap;
 
 /**
  * A container state manager keeps 

[hadoop] branch trunk updated: Revert "HDDS-1310. In datanode once a container becomes unhealthy, datanode restart fails. Contributed by Sandeep Nemuri."

2019-03-22 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 771ea6b  Revert "HDDS-1310. In datanode once a container becomes 
unhealthy, datanode restart fails. Contributed by  Sandeep Nemuri."
771ea6b is described below

commit 771ea6b5e788c0baf9fbc1e5de08e2dc21747c2a
Author: Ajay Kumar 
AuthorDate: Fri Mar 22 16:20:44 2019 -0700

Revert "HDDS-1310. In datanode once a container becomes unhealthy, datanode 
restart fails. Contributed by  Sandeep Nemuri."

This reverts commit efad5717ec1facbbe8a5a2c7adcaa47d5c1592ac.
---
 .../hadoop/ozone/container/keyvalue/KeyValueContainer.java  |  3 ---
 .../ozone/container/keyvalue/TestKeyValueContainer.java | 13 -
 2 files changed, 16 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 26b0ce1..47af110 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -623,9 +623,6 @@ public class KeyValueContainer implements 
Container {
 case CLOSED:
   state = ContainerReplicaProto.State.CLOSED;
   break;
-case UNHEALTHY:
-  state = ContainerReplicaProto.State.UNHEALTHY;
-  break;
 default:
   throw new StorageContainerException("Invalid Container state found: " +
   containerData.getContainerID(), INVALID_CONTAINER_STATE);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 1aa7361..c7c08b0 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -333,19 +333,6 @@ public class TestKeyValueContainer {
   }
 
   @Test
-  public void testReportOfUnhealthyContainer() throws Exception {
-keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-Assert.assertNotNull(keyValueContainer.getContainerReport());
-keyValueContainer.markContainerUnhealthy();
-File containerFile = keyValueContainer.getContainerFile();
-keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
-.readContainerFile(containerFile);
-assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
-keyValueContainerData.getState());
-Assert.assertNotNull(keyValueContainer.getContainerReport());
-  }
-
-  @Test
   public void testUpdateContainer() throws IOException {
 keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 Map metadata = new HashMap<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1310. In datanode once a container becomes unhealthy, datanode restart fails. Contributed by Sandeep Nemuri.

2019-03-22 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new efad571  HDDS-1310. In datanode once a container becomes unhealthy, 
datanode restart fails. Contributed by  Sandeep Nemuri.
efad571 is described below

commit efad5717ec1facbbe8a5a2c7adcaa47d5c1592ac
Author: Ajay Kumar 
AuthorDate: Fri Mar 22 15:53:12 2019 -0700

HDDS-1310. In datanode once a container becomes unhealthy, datanode restart 
fails. Contributed by  Sandeep Nemuri.
---
 .../hadoop/ozone/container/keyvalue/KeyValueContainer.java  |  3 +++
 .../ozone/container/keyvalue/TestKeyValueContainer.java | 13 +
 2 files changed, 16 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 47af110..26b0ce1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -623,6 +623,9 @@ public class KeyValueContainer implements 
Container {
 case CLOSED:
   state = ContainerReplicaProto.State.CLOSED;
   break;
+case UNHEALTHY:
+  state = ContainerReplicaProto.State.UNHEALTHY;
+  break;
 default:
   throw new StorageContainerException("Invalid Container state found: " +
   containerData.getContainerID(), INVALID_CONTAINER_STATE);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index c7c08b0..1aa7361 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -333,6 +333,19 @@ public class TestKeyValueContainer {
   }
 
   @Test
+  public void testReportOfUnhealthyContainer() throws Exception {
+keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+keyValueContainer.markContainerUnhealthy();
+File containerFile = keyValueContainer.getContainerFile();
+keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
+.readContainerFile(containerFile);
+assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY,
+keyValueContainerData.getState());
+Assert.assertNotNull(keyValueContainer.getContainerReport());
+  }
+
+  @Test
   public void testUpdateContainer() throws IOException {
 keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 Map metadata = new HashMap<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1302. Fix SCM CLI does not list container with id 1.

2019-03-22 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 6b3e0b3  HDDS-1302. Fix SCM CLI does not list container with id 1.
6b3e0b3 is described below

commit 6b3e0b3edce70fb008282a8ae1ea9ee48297f087
Author: Vivek Ratnavel Subramanian 
AuthorDate: Fri Mar 22 15:15:01 2019 -0700

HDDS-1302. Fix SCM CLI does not list container with id 1.

(cherry picked from commit 73f7b04e2b8f9a4b06a1e5b5c62eadd074555205)
---
 .../org/apache/hadoop/hdds/scm/container/SCMContainerManager.java  | 2 +-
 .../org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java | 7 ++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 2615289..374772d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -190,7 +190,7 @@ public class SCMContainerManager implements 
ContainerManager {
   Collections.sort(containersIds);
 
   return containersIds.stream()
-  .filter(id -> id.getId() >= startId)
+  .filter(id -> id.getId() > startId)
   .limit(count)
   .map(id -> {
 try {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 1024fa3..5196603 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -296,8 +296,13 @@ public class SCMClientProtocolServer implements
 auditMap.put("startContainerID", String.valueOf(startContainerID));
 auditMap.put("count", String.valueOf(count));
 try {
+  // To allow startcontainerId to take the value "0",
+  // "null" is assigned, so that its handled in the
+  // scm.getContainerManager().listContainer method
+  final ContainerID containerId = startContainerID != 0 ? ContainerID
+  .valueof(startContainerID) : null;
   return scm.getContainerManager().
-  listContainer(ContainerID.valueof(startContainerID), count);
+  listContainer(containerId, count);
 } catch (Exception ex) {
   auditSuccess = false;
   AUDIT.logReadFailure(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1302. Fix SCM CLI does not list container with id 1.

2019-03-22 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 73f7b04  HDDS-1302. Fix SCM CLI does not list container with id 1.
73f7b04 is described below

commit 73f7b04e2b8f9a4b06a1e5b5c62eadd074555205
Author: Vivek Ratnavel Subramanian 
AuthorDate: Fri Mar 22 15:15:01 2019 -0700

HDDS-1302. Fix SCM CLI does not list container with id 1.
---
 .../org/apache/hadoop/hdds/scm/container/SCMContainerManager.java  | 2 +-
 .../org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java | 7 ++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 6dd1949..728ac52 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -200,7 +200,7 @@ public class SCMContainerManager implements 
ContainerManager {
   Collections.sort(containersIds);
 
   return containersIds.stream()
-  .filter(id -> id.getId() >= startId)
+  .filter(id -> id.getId() > startId)
   .limit(count)
   .map(id -> {
 try {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index e85da54..8330f70 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -294,8 +294,13 @@ public class SCMClientProtocolServer implements
 auditMap.put("startContainerID", String.valueOf(startContainerID));
 auditMap.put("count", String.valueOf(count));
 try {
+  // To allow startcontainerId to take the value "0",
+  // "null" is assigned, so that its handled in the
+  // scm.getContainerManager().listContainer method
+  final ContainerID containerId = startContainerID != 0 ? ContainerID
+  .valueof(startContainerID) : null;
   return scm.getContainerManager().
-  listContainer(ContainerID.valueof(startContainerID), count);
+  listContainer(containerId, count);
 } catch (Exception ex) {
   auditSuccess = false;
   AUDIT.logReadFailure(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to avoid reading from stale pipeline. Contributed by Xiaoyu Yao. (#639)

2019-03-22 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 9b989c4  HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to 
avoid reading from stale pipeline. Contributed by Xiaoyu Yao. (#639)
9b989c4 is described below

commit 9b989c49c9c919f91a8c1ecbfec63a7d511b465a
Author: Xiaoyu Yao 
AuthorDate: Fri Mar 22 15:08:28 2019 -0700

HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to avoid reading 
from stale pipeline. Contributed by Xiaoyu Yao. (#639)


(cherry picked from commit dea6f2a065271d2e37eda05bbcb98fb1bb5ed2a6)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 2 ++
 .../apache/hadoop/ozone/TestStorageContainerManagerHelper.java| 1 +
 .../test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java   | 1 +
 .../ozone/client/rpc/TestCloseContainerHandlingByClient.java  | 7 +++
 .../hadoop/ozone/client/rpc/TestFailureHandlingByClient.java  | 7 +++
 .../apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java | 1 +
 .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java   | 5 +++--
 .../java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java  | 2 +-
 .../apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java  | 1 +
 .../common/statemachine/commandhandler/TestBlockDeletion.java | 4 +++-
 .../statemachine/commandhandler/TestCloseContainerByPipeline.java | 8 +---
 .../statemachine/commandhandler/TestCloseContainerHandler.java| 4 +++-
 .../statemachine/commandhandler/TestDeleteContainerHandler.java   | 4 +++-
 .../org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java   | 1 +
 .../java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java| 2 ++
 .../hadoop/ozone/web/storage/DistributedStorageHandler.java   | 3 +++
 .../hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java   | 2 ++
 17 files changed, 46 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 6723bfe..d3c8f68 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -664,6 +664,7 @@ public class RpcClient implements ClientProtocol {
 .setVolumeName(volumeName)
 .setBucketName(bucketName)
 .setKeyName(keyName)
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
 LengthInputStream lengthInputStream =
@@ -739,6 +740,7 @@ public class RpcClient implements ClientProtocol {
 .setVolumeName(volumeName)
 .setBucketName(bucketName)
 .setKeyName(keyName)
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index c52490f..5b55119 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -107,6 +107,7 @@ public class TestStorageContainerManagerHelper {
   .setVolumeName(volume)
   .setBucketName(bucket)
   .setKeyName(key)
+  .setRefreshPipeline(true)
   .build();
   OmKeyInfo location = cluster.getOzoneManager()
   .lookupKey(arg);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index 78023ef..623b11d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -117,6 +117,7 @@ public class TestBCSID {
 OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
 setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
 .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
 List keyLocationInfos =
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 

[hadoop] branch trunk updated: HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to avoid reading from stale pipeline. Contributed by Xiaoyu Yao. (#639)

2019-03-22 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new dea6f2a  HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to 
avoid reading from stale pipeline. Contributed by Xiaoyu Yao. (#639)
dea6f2a is described below

commit dea6f2a065271d2e37eda05bbcb98fb1bb5ed2a6
Author: Xiaoyu Yao 
AuthorDate: Fri Mar 22 15:08:28 2019 -0700

HDDS-1291. Set OmKeyArgs#refreshPipeline flag properly to avoid reading 
from stale pipeline. Contributed by Xiaoyu Yao. (#639)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 2 ++
 .../apache/hadoop/ozone/TestStorageContainerManagerHelper.java| 1 +
 .../test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java   | 1 +
 .../ozone/client/rpc/TestCloseContainerHandlingByClient.java  | 7 +++
 .../hadoop/ozone/client/rpc/TestFailureHandlingByClient.java  | 7 +++
 .../apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java | 1 +
 .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java   | 5 +++--
 .../java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java  | 2 +-
 .../apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java  | 1 +
 .../common/statemachine/commandhandler/TestBlockDeletion.java | 4 +++-
 .../statemachine/commandhandler/TestCloseContainerByPipeline.java | 8 +---
 .../statemachine/commandhandler/TestCloseContainerHandler.java| 4 +++-
 .../statemachine/commandhandler/TestDeleteContainerHandler.java   | 4 +++-
 .../org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java   | 1 +
 .../java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java| 2 ++
 .../hadoop/ozone/web/storage/DistributedStorageHandler.java   | 3 +++
 .../hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java   | 2 ++
 17 files changed, 46 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index d8fa468..5bc6d15 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -664,6 +664,7 @@ public class RpcClient implements ClientProtocol {
 .setVolumeName(volumeName)
 .setBucketName(bucketName)
 .setKeyName(keyName)
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
 LengthInputStream lengthInputStream =
@@ -739,6 +740,7 @@ public class RpcClient implements ClientProtocol {
 .setVolumeName(volumeName)
 .setBucketName(bucketName)
 .setKeyName(keyName)
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index c52490f..5b55119 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -107,6 +107,7 @@ public class TestStorageContainerManagerHelper {
   .setVolumeName(volume)
   .setBucketName(bucket)
   .setKeyName(key)
+  .setRefreshPipeline(true)
   .build();
   OmKeyInfo location = cluster.getOzoneManager()
   .lookupKey(arg);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index 78023ef..623b11d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -117,6 +117,7 @@ public class TestBCSID {
 OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).
 setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
 .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis")
+.setRefreshPipeline(true)
 .build();
 OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
 List keyLocationInfos =
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 396351d..3124e77 100644
--- 

[hadoop] branch trunk updated: HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED containers. Contributed by Nanda kumar. (#620)

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f854a89  HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED 
containers. Contributed by Nanda kumar. (#620)
f854a89 is described below

commit f854a89190bd2453ccb1bfaa123d63d546e913cd
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 14:36:29 2019 -0700

HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED containers. 
Contributed by Nanda kumar. (#620)
---
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  12 +
 .../common/src/main/resources/ozone-default.xml|  20 +
 .../hdds/scm/container/ContainerManager.java   |   8 +
 .../hdds/scm/container/ReplicationManager.java | 748 +
 .../hdds/scm/container/SCMContainerManager.java|  10 +
 .../scm/container/states/ContainerStateMap.java|   9 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |  53 +-
 .../scm/container/TestContainerReportHandler.java  |   8 +-
 .../scm/container/TestContainerReportHelper.java   |  40 --
 .../TestIncrementalContainerReportHandler.java |   6 +-
 .../hdds/scm/container/TestReplicationManager.java | 625 +
 11 files changed, 1485 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 4e197d3..3b45b89 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -348,6 +348,18 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
   "10m";
 
+  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL =
+  "hdds.scm.replication.thread.interval";
+
+  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL_DEFAULT =
+  "5m";
+
+  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT =
+  "hdds.scm.replication.event.timeout";
+
+  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT_DEFAULT =
+  "10m";
+
   public static final String
   HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
   "hdds.scm.http.kerberos.principal";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 462a07b..9fd4ef3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2357,4 +2357,24 @@
   Request to flush the OM DB before taking checkpoint snapshot.
 
   
+  
+hdds.scm.replication.thread.interval
+5m
+OZONE, SCM
+
+  There is a replication monitor thread running inside SCM which
+  takes care of replicating the containers in the cluster. This
+  property is used to configure the interval in which that thread
+  runs.
+
+  
+  
+hdds.scm.replication.event.timeout
+10m
+OZONE, SCM
+
+  Timeout for the container replication/deletion commands sent
+  to datanodes. After this timeout the command will be retried.
+
+  
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index b2fe4b4..717d58d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -34,6 +34,14 @@ import java.util.Set;
  */
 public interface ContainerManager extends Closeable {
 
+
+  /**
+   * Returns all the container Ids managed by ContainerManager.
+   *
+   * @return Set of ContainerID
+   */
+  Set getContainerIDs();
+
   /**
* Returns all the containers managed by ContainerManager.
*
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
new file mode 100644
index 000..97c600b
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -0,0 +1,748 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * 

[hadoop] branch trunk updated: YARN-9404. TestApplicationLifetimeMonitor#testApplicationLifetimeMonitor fails intermittent. Contributed by Prabhu Joseph.

2019-03-22 Thread gifuma
This is an automated email from the ASF dual-hosted git repository.

gifuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 509b20b  YARN-9404. 
TestApplicationLifetimeMonitor#testApplicationLifetimeMonitor fails 
intermittent. Contributed by Prabhu Joseph.
509b20b is described below

commit 509b20b292465ea0c8a2a0908995421e29e71da4
Author: Giovanni Matteo Fumarola 
AuthorDate: Fri Mar 22 11:45:39 2019 -0700

YARN-9404. TestApplicationLifetimeMonitor#testApplicationLifetimeMonitor 
fails intermittent. Contributed by Prabhu Joseph.
---
 .../resourcemanager/rmapp/TestApplicationLifetimeMonitor.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
index 26499a7..8d40936 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
@@ -204,13 +204,12 @@ public class TestApplicationLifetimeMonitor {
 // app4 submitted exceeding queue max lifetime,
 // so killed after queue max lifetime.
 rm.waitForState(app4.getApplicationId(), RMAppState.KILLED);
-long totalTimeRun =
-(app4.getFinishTime() - app4.getSubmitTime()) / 1000;
+long totalTimeRun = app4.getFinishTime() - app4.getSubmitTime();
 Assert.assertTrue("Application killed before lifetime value",
-totalTimeRun > maxLifetime);
+totalTimeRun > (maxLifetime * 1000));
 Assert.assertTrue(
 "Application killed before lifetime value " + totalTimeRun,
-totalTimeRun < maxLifetime + 10L);
+totalTimeRun < ((maxLifetime + 10L) * 1000));
   }
 } finally {
   stopRM(rm);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by Supratim Deka.

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 6d73e7b  HDDS-1323. Ignore unit test TestFailureHandlingByClient. 
Contributed by Supratim Deka.
6d73e7b is described below

commit 6d73e7bdf432fadf5a46dad8a249ded656b3a1aa
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 11:29:56 2019 -0700

HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by 
Supratim Deka.

(cherry picked from commit 1d389ecb24482c2c4b41df898e8f9bc937cc524d)
---
 .../apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 092c56f..aaf238b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -51,7 +52,9 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
+ * XXX Disabled [HDDS-1323]
  */
+@Ignore
 public class TestFailureHandlingByClient {
 
   private MiniOzoneCluster cluster;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by Supratim Deka.

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1d389ec  HDDS-1323. Ignore unit test TestFailureHandlingByClient. 
Contributed by Supratim Deka.
1d389ec is described below

commit 1d389ecb24482c2c4b41df898e8f9bc937cc524d
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 11:29:56 2019 -0700

HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by 
Supratim Deka.
---
 .../apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 092c56f..aaf238b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -51,7 +52,9 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
+ * XXX Disabled [HDDS-1323]
  */
+@Ignore
 public class TestFailureHandlingByClient {
 
   private MiniOzoneCluster cluster;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16201: S3AFileSystem#innerMkdirs builds needless lists (#636)

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ae2eb2d  HADOOP-16201: S3AFileSystem#innerMkdirs builds needless lists 
(#636)
ae2eb2d is described below

commit ae2eb2dd4261e5fef964384efd168d9867eb94af
Author: Lokesh Jain 
AuthorDate: Fri Mar 22 17:12:00 2019 +0530

HADOOP-16201: S3AFileSystem#innerMkdirs builds needless lists (#636)
---
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java| 12 
 1 file changed, 12 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 1f560d0..f4c3d50 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2295,10 +2295,6 @@ public class S3AFileSystem extends FileSystem implements 
StreamCapabilities,
 LOG.debug("Making directory: {}", f);
 entryPoint(INVOCATION_MKDIRS);
 FileStatus fileStatus;
-List metadataStoreDirs = null;
-if (hasMetadataStore()) {
-  metadataStoreDirs = new ArrayList<>();
-}
 
 try {
   fileStatus = getFileStatus(f);
@@ -2311,9 +2307,6 @@ public class S3AFileSystem extends FileSystem implements 
StreamCapabilities,
 } catch (FileNotFoundException e) {
   // Walk path to root, ensuring closest ancestor is a directory, not file
   Path fPart = f.getParent();
-  if (metadataStoreDirs != null) {
-metadataStoreDirs.add(f);
-  }
   while (fPart != null) {
 try {
   fileStatus = getFileStatus(fPart);
@@ -2327,11 +2320,6 @@ public class S3AFileSystem extends FileSystem implements 
StreamCapabilities,
   }
 } catch (FileNotFoundException fnfe) {
   instrumentation.errorIgnored();
-  // We create all missing directories in MetadataStore; it does not
-  // infer directories exist by prefix like S3.
-  if (metadataStoreDirs != null) {
-metadataStoreDirs.add(fPart);
-  }
 }
 fPart = fPart.getParent();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9358. Add javadoc to new methods introduced in FSQueueMetrics with YARN-9322 (Contributed by Zoltan Siegl via Daniel Templeton)

2019-03-22 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce5eb9c  YARN-9358. Add javadoc to new methods introduced in 
FSQueueMetrics with YARN-9322 (Contributed by Zoltan Siegl via Daniel Templeton)
ce5eb9c is described below

commit ce5eb9cb2e04baf2e94fdc7dcdb57d0404cf6e76
Author: Zoltan Siegl 
AuthorDate: Fri Mar 22 11:23:50 2019 +0100

YARN-9358. Add javadoc to new methods introduced in FSQueueMetrics with 
YARN-9322
(Contributed by Zoltan Siegl via Daniel Templeton)

Change-Id: I92d52c0ca630e71afb26b2b7587cbdbe79254a05
---
 .../scheduler/fair/FSQueueMetrics.java | 69 +-
 1 file changed, 67 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index d0ddd42..5fa84f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -52,6 +52,15 @@ public class FSQueueMetrics extends QueueMetrics {
   private final FSQueueMetricsForCustomResources customResources;
   private String schedulingPolicy;
 
+  /**
+   * Constructor for {@link FairScheduler} queue metrics data object.
+   *
+   * @param ms the MetricSystem to register with
+   * @param queueName the queue name
+   * @param parent the parent {@link Queue}
+   * @param enableUserMetrics store metrics on user level
+   * @param conf the {@link Configuration} object to build buckets upon
+   */
   FSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
   boolean enableUserMetrics, Configuration conf) {
 super(ms, queueName, parent, enableUserMetrics, conf);
@@ -72,6 +81,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return fairShareVCores.value();
   }
 
+  /**
+   * Get instantaneous fair share of the queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getFairShare() {
 if (customResources != null) {
   return Resource.newInstance(fairShareMB.value(),
@@ -82,6 +96,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) fairShareVCores.value());
   }
 
+  /**
+   * Set instantaneous fair share of the queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setFairShare(Resource resource) {
 fairShareMB.set(resource.getMemorySize());
 fairShareVCores.set(resource.getVirtualCores());
@@ -98,6 +118,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return steadyFairShareVCores.value();
   }
 
+  /**
+   * Get steady fair share for queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getSteadyFairShare() {
 if (customResources != null) {
   return Resource.newInstance(steadyFairShareMB.value(),
@@ -108,6 +133,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) steadyFairShareVCores.value());
   }
 
+  /**
+   * Set steady fair share for queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setSteadyFairShare(Resource resource) {
 steadyFairShareMB.set(resource.getMemorySize());
 steadyFairShareVCores.set(resource.getVirtualCores());
@@ -124,6 +155,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return minShareVCores.value();
   }
 
+  /**
+   * Get minimum required resource share for queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getMinShare() {
 if (customResources != null) {
   return Resource.newInstance(minShareMB.value(),
@@ -134,6 +170,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) minShareVCores.value());
   }
 
+  /**
+   * Set minimum required resource share for queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setMinShare(Resource resource) {
 minShareMB.set(resource.getMemorySize());
 minShareVCores.set(resource.getVirtualCores());
@@ -150,6 +192,11 @@ public class FSQueueMetrics 

[hadoop] branch trunk updated: HADOOP-16147. Allow CopyListing sequence file keys and values to be more easily customized.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new faba359  HADOOP-16147. Allow CopyListing sequence file keys and values 
to be more easily customized.
faba359 is described below

commit faba3591d32f2e4808c2faeb9472348d52619c8a
Author: Andrew Olson 
AuthorDate: Fri Mar 22 10:35:30 2019 +

HADOOP-16147. Allow CopyListing sequence file keys and values to be more 
easily customized.

Author:Andrew Olson
---
 .../java/org/apache/hadoop/tools/CopyListing.java  | 23 ++
 .../org/apache/hadoop/tools/SimpleCopyListing.java |  4 ++--
 2 files changed, 25 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index e018b0b..6f8aa34 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -249,6 +249,29 @@ public abstract class CopyListing extends Configured {
   }
 
   /**
+   * Returns the key for an entry in the copy listing sequence file.
+   * @param sourcePathRoot the root source path for determining the relative
+   *   target path
+   * @param fileStatus the copy listing file status
+   * @return the key for the sequence file entry
+   */
+  protected Text getFileListingKey(Path sourcePathRoot,
+  CopyListingFileStatus fileStatus) {
+return new Text(DistCpUtils.getRelativePath(sourcePathRoot,
+fileStatus.getPath()));
+  }
+
+  /**
+   * Returns the value for an entry in the copy listing sequence file.
+   * @param fileStatus the copy listing file status
+   * @return the value for the sequence file entry
+   */
+  protected CopyListingFileStatus getFileListingValue(
+  CopyListingFileStatus fileStatus) {
+return fileStatus;
+  }
+
+  /**
* Public Factory method with which the appropriate CopyListing 
implementation may be retrieved.
* @param configuration The input configuration.
* @param credentials Credentials object on which the FS delegation tokens 
are cached
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index a908e12..7e5a26a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -718,8 +718,8 @@ public class SimpleCopyListing extends CopyListing {
   return;
 }
 
-fileListWriter.append(new Text(DistCpUtils.getRelativePath(sourcePathRoot,
-fileStatus.getPath())), fileStatus);
+fileListWriter.append(getFileListingKey(sourcePathRoot, fileStatus),
+getFileListingValue(fileStatus));
 fileListWriter.sync();
 
 if (!fileStatus.isDirectory()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16147. Allow CopyListing sequence file keys and values to be more easily customized.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new ade3af6  HADOOP-16147. Allow CopyListing sequence file keys and values 
to be more easily customized.
ade3af6 is described below

commit ade3af6ef233319b0a8efe14d212059a86fe8c23
Author: Andrew Olson 
AuthorDate: Fri Mar 22 10:36:34 2019 +

HADOOP-16147. Allow CopyListing sequence file keys and values to be more 
easily customized.

Author:Andrew Olson
(cherry picked from commit faba3591d32f2e4808c2faeb9472348d52619c8a)
---
 .../java/org/apache/hadoop/tools/CopyListing.java  | 23 ++
 .../org/apache/hadoop/tools/SimpleCopyListing.java |  4 ++--
 2 files changed, 25 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index e018b0b..6f8aa34 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -249,6 +249,29 @@ public abstract class CopyListing extends Configured {
   }
 
   /**
+   * Returns the key for an entry in the copy listing sequence file.
+   * @param sourcePathRoot the root source path for determining the relative
+   *   target path
+   * @param fileStatus the copy listing file status
+   * @return the key for the sequence file entry
+   */
+  protected Text getFileListingKey(Path sourcePathRoot,
+  CopyListingFileStatus fileStatus) {
+return new Text(DistCpUtils.getRelativePath(sourcePathRoot,
+fileStatus.getPath()));
+  }
+
+  /**
+   * Returns the value for an entry in the copy listing sequence file.
+   * @param fileStatus the copy listing file status
+   * @return the value for the sequence file entry
+   */
+  protected CopyListingFileStatus getFileListingValue(
+  CopyListingFileStatus fileStatus) {
+return fileStatus;
+  }
+
+  /**
* Public Factory method with which the appropriate CopyListing 
implementation may be retrieved.
* @param configuration The input configuration.
* @param credentials Credentials object on which the FS delegation tokens 
are cached
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index a908e12..7e5a26a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -718,8 +718,8 @@ public class SimpleCopyListing extends CopyListing {
   return;
 }
 
-fileListWriter.append(new Text(DistCpUtils.getRelativePath(sourcePathRoot,
-fileStatus.getPath())), fileStatus);
+fileListWriter.append(getFileListingKey(sourcePathRoot, fileStatus),
+getFileListingValue(fileStatus));
 fileListWriter.sync();
 
 if (!fileStatus.isDirectory()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16181. HadoopExecutors shutdown Cleanup.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d18d085  HADOOP-16181. HadoopExecutors shutdown Cleanup.
d18d085 is described below

commit d18d0859ebfc46a18fd9140b42fb95f1da96380e
Author: David Mollitor 
AuthorDate: Fri Mar 22 10:29:27 2019 +

HADOOP-16181. HadoopExecutors shutdown Cleanup.

Author:David Mollitor 
---
 .../hadoop/util/concurrent/HadoopExecutors.java| 58 ++
 1 file changed, 38 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 7a04c30..7c09d93 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -92,33 +92,51 @@ public final class HadoopExecutors {
   }
 
   /**
-   * Helper routine to shutdown a executorService.
+   * Helper routine to shutdown a {@link ExecutorService}. Will wait up to a
+   * certain timeout for the ExecutorService to gracefully shutdown. If the
+   * ExecutorService did not shutdown and there are still tasks unfinished 
after
+   * the timeout period, the ExecutorService will be notified to forcibly shut
+   * down. Another timeout period will be waited before giving up. So, at most,
+   * a shutdown will be allowed to wait up to twice the timeout value before
+   * giving up.
*
-   * @param executorService - executorService
-   * @param logger  - Logger
-   * @param timeout - Timeout
-   * @param unit- TimeUnits, generally seconds.
+   * @param executorService ExecutorService to shutdown
+   * @param logger Logger
+   * @param timeout the maximum time to wait
+   * @param unit the time unit of the timeout argument
*/
   public static void shutdown(ExecutorService executorService, Logger logger,
   long timeout, TimeUnit unit) {
+
+if (executorService == null) {
+  return;
+}
+
 try {
-  if (executorService != null) {
-executorService.shutdown();
-try {
-  if (!executorService.awaitTermination(timeout, unit)) {
-executorService.shutdownNow();
-  }
-
-  if (!executorService.awaitTermination(timeout, unit)) {
-logger.error("Unable to shutdown properly.");
-  }
-} catch (InterruptedException e) {
-  logger.error("Error attempting to shutdown.", e);
-  executorService.shutdownNow();
-}
+  executorService.shutdown();
+
+  logger.info(
+  "Gracefully shutting down executor service. Waiting max {} {}",
+  timeout, unit);
+  if (!executorService.awaitTermination(timeout, unit)) {
+logger.info(
+"Executor service has not shutdown yet. Forcing. "
++ "Will wait up to an additional {} {} for shutdown",
+timeout, unit);
+executorService.shutdownNow();
+  }
+  if (executorService.awaitTermination(timeout, unit)) {
+logger.info("Succesfully shutdown executor service");
+  } else {
+logger.error("Unable to shutdown executor service after timeout {} {}",
+(2 * timeout), unit);
   }
+} catch (InterruptedException e) {
+  logger.error("Interrupted while attempting to shutdown", e);
+  executorService.shutdownNow();
 } catch (Exception e) {
-  logger.error("Error during shutdown: ", e);
+  logger.warn("Exception closing executor service {}", e.getMessage());
+  logger.debug("Exception closing executor service", e);
   throw e;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16181. HadoopExecutors shutdown Cleanup.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 397b63a  HADOOP-16181. HadoopExecutors shutdown Cleanup.
397b63a is described below

commit 397b63ad0b160b3ea29aa7392202ff42cce46b35
Author: David Mollitor 
AuthorDate: Fri Mar 22 10:30:21 2019 +

HADOOP-16181. HadoopExecutors shutdown Cleanup.

Author:David Mollitor 
(cherry picked from commit d18d0859ebfc46a18fd9140b42fb95f1da96380e)
---
 .../hadoop/util/concurrent/HadoopExecutors.java| 58 ++
 1 file changed, 38 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 7a04c30..7c09d93 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -92,33 +92,51 @@ public final class HadoopExecutors {
   }
 
   /**
-   * Helper routine to shutdown a executorService.
+   * Helper routine to shutdown a {@link ExecutorService}. Will wait up to a
+   * certain timeout for the ExecutorService to gracefully shutdown. If the
+   * ExecutorService did not shutdown and there are still tasks unfinished 
after
+   * the timeout period, the ExecutorService will be notified to forcibly shut
+   * down. Another timeout period will be waited before giving up. So, at most,
+   * a shutdown will be allowed to wait up to twice the timeout value before
+   * giving up.
*
-   * @param executorService - executorService
-   * @param logger  - Logger
-   * @param timeout - Timeout
-   * @param unit- TimeUnits, generally seconds.
+   * @param executorService ExecutorService to shutdown
+   * @param logger Logger
+   * @param timeout the maximum time to wait
+   * @param unit the time unit of the timeout argument
*/
   public static void shutdown(ExecutorService executorService, Logger logger,
   long timeout, TimeUnit unit) {
+
+if (executorService == null) {
+  return;
+}
+
 try {
-  if (executorService != null) {
-executorService.shutdown();
-try {
-  if (!executorService.awaitTermination(timeout, unit)) {
-executorService.shutdownNow();
-  }
-
-  if (!executorService.awaitTermination(timeout, unit)) {
-logger.error("Unable to shutdown properly.");
-  }
-} catch (InterruptedException e) {
-  logger.error("Error attempting to shutdown.", e);
-  executorService.shutdownNow();
-}
+  executorService.shutdown();
+
+  logger.info(
+  "Gracefully shutting down executor service. Waiting max {} {}",
+  timeout, unit);
+  if (!executorService.awaitTermination(timeout, unit)) {
+logger.info(
+"Executor service has not shutdown yet. Forcing. "
++ "Will wait up to an additional {} {} for shutdown",
+timeout, unit);
+executorService.shutdownNow();
+  }
+  if (executorService.awaitTermination(timeout, unit)) {
+logger.info("Succesfully shutdown executor service");
+  } else {
+logger.error("Unable to shutdown executor service after timeout {} {}",
+(2 * timeout), unit);
   }
+} catch (InterruptedException e) {
+  logger.error("Interrupted while attempting to shutdown", e);
+  executorService.shutdownNow();
 } catch (Exception e) {
-  logger.error("Error during shutdown: ", e);
+  logger.warn("Exception closing executor service {}", e.getMessage());
+  logger.debug("Exception closing executor service", e);
   throw e;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16196. Path Parameterize Comparable.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 246ab77  HADOOP-16196. Path Parameterize Comparable.
246ab77 is described below

commit 246ab77f281614b2206bfb1dae70a8dec62147a7
Author: David Mollitor 
AuthorDate: Fri Mar 22 10:26:24 2019 +

HADOOP-16196. Path Parameterize Comparable.

Author:David Mollitor 
---
 .../src/main/java/org/apache/hadoop/fs/Path.java| 10 +-
 .../hadoop/mapreduce/task/reduce/MergeManagerImpl.java  | 13 ++---
 2 files changed, 11 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 7672c99..c5d7bfc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -40,7 +40,8 @@ import org.apache.hadoop.conf.Configuration;
 @Stringable
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class Path implements Comparable, Serializable, ObjectInputValidation {
+public class Path
+implements Comparable, Serializable, ObjectInputValidation {
 
   /**
* The directory separator, a slash.
@@ -490,11 +491,10 @@ public class Path implements Comparable, Serializable, 
ObjectInputValidation {
   }
 
   @Override
-  public int compareTo(Object o) {
-Path that = (Path)o;
-return this.uri.compareTo(that.uri);
+  public int compareTo(Path o) {
+return this.uri.compareTo(o.uri);
   }
-  
+
   /**
* Returns the number of elements in this path.
* @return the number of elements in this path
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index f01b24c..58c2686 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -860,16 +860,15 @@ public class MergeManagerImpl implements 
MergeManager {
 }
 
 @Override
-public int compareTo(Object obj) {
-  if(obj instanceof CompressAwarePath) {
+public int compareTo(Path obj) {
+  if (obj instanceof CompressAwarePath) {
 CompressAwarePath compPath = (CompressAwarePath) obj;
-if(this.compressedSize < compPath.getCompressedSize()) {
-  return -1;
-} else if (this.getCompressedSize() > compPath.getCompressedSize()) {
-  return 1;
-}
+int c = Long.compare(this.compressedSize, compPath.compressedSize);
 // Not returning 0 here so that objects with the same size (but
 // different paths) are still added to the TreeSet.
+if (c != 0) {
+  return c;
+}
   }
   return super.compareTo(obj);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16196. Path Parameterize Comparable.

2019-03-22 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 9a449ac  HADOOP-16196. Path Parameterize Comparable.
9a449ac is described below

commit 9a449ac0758a87ff7028b04afcc226f800571e39
Author: David Mollitor 
AuthorDate: Fri Mar 22 10:27:17 2019 +

HADOOP-16196. Path Parameterize Comparable.

Author:David Mollitor 

(cherry picked from commit 246ab77f281614b2206bfb1dae70a8dec62147a7)
---
 .../src/main/java/org/apache/hadoop/fs/Path.java| 10 +-
 .../hadoop/mapreduce/task/reduce/MergeManagerImpl.java  | 13 ++---
 2 files changed, 11 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index b6244d6..b48a351 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -40,7 +40,8 @@ import org.apache.hadoop.conf.Configuration;
 @Stringable
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class Path implements Comparable, Serializable, ObjectInputValidation {
+public class Path
+implements Comparable, Serializable, ObjectInputValidation {
 
   /**
* The directory separator, a slash.
@@ -490,11 +491,10 @@ public class Path implements Comparable, Serializable, 
ObjectInputValidation {
   }
 
   @Override
-  public int compareTo(Object o) {
-Path that = (Path)o;
-return this.uri.compareTo(that.uri);
+  public int compareTo(Path o) {
+return this.uri.compareTo(o.uri);
   }
-  
+
   /**
* Returns the number of elements in this path.
* @return the number of elements in this path
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index f01b24c..58c2686 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -860,16 +860,15 @@ public class MergeManagerImpl implements 
MergeManager {
 }
 
 @Override
-public int compareTo(Object obj) {
-  if(obj instanceof CompressAwarePath) {
+public int compareTo(Path obj) {
+  if (obj instanceof CompressAwarePath) {
 CompressAwarePath compPath = (CompressAwarePath) obj;
-if(this.compressedSize < compPath.getCompressedSize()) {
-  return -1;
-} else if (this.getCompressedSize() > compPath.getCompressedSize()) {
-  return 1;
-}
+int c = Long.compare(this.compressedSize, compPath.compressedSize);
 // Not returning 0 here so that objects with the same size (but
 // different paths) are still added to the TreeSet.
+if (c != 0) {
+  return c;
+}
   }
   return super.compareTo(obj);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org