hadoop git commit: HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 aab32c8cd -> 7f64a6886 HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi. (cherry picked from commit 81d59506e539673edde12e19c0df5c2edd9d02ad) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f64a688 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f64a688 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f64a688 Branch: refs/heads/branch-3.0 Commit: 7f64a6886ffd60e3ca4007fdd02c0ed19b988d49 Parents: aab32c8 Author: Xiao Chen Authored: Tue Jul 24 21:45:14 2018 -0700 Committer: Xiao Chen Committed: Tue Jul 24 21:46:31 2018 -0700 -- .../key/kms/LoadBalancingKMSClientProvider.java | 17 - .../kms/TestLoadBalancingKMSClientProvider.java | 79 2 files changed, 92 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f64a688/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java index 42cd47d..9677b0d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java @@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms; import java.io.IOException; import java.io.InterruptedIOException; +import java.net.ConnectException; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; @@ -27,6 +28,8 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import javax.net.ssl.SSLHandshakeException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; @@ -115,7 +118,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements if (providers.length == 0) { throw new IOException("No providers configured !"); } -IOException ex = null; int numFailovers = 0; for (int i = 0;; i++, numFailovers++) { KMSClientProvider provider = providers[(currPos + i) % providers.length]; @@ -130,8 +132,15 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements } catch (IOException ioe) { LOG.warn("KMS provider at [{}] threw an IOException: ", provider.getKMSUrl(), ioe); -ex = ioe; - +// SSLHandshakeException can occur here because of lost connection +// with the KMS server, creating a ConnectException from it, +// so that the FailoverOnNetworkExceptionRetry policy will retry +if (ioe instanceof SSLHandshakeException) { + Exception cause = ioe; + ioe = new ConnectException("SSLHandshakeException: " + + cause.getMessage()); + ioe.initCause(cause); +} RetryAction action = null; try { action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false); @@ -153,7 +162,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements CommonConfigurationKeysPublic. KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length), providers.length); - throw ex; + throw ioe; } if (((numFailovers + 1) % providers.length) == 0) { // Sleep only after we try all the providers for every cycle. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f64a688/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index bd68dca..4e7aed9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -18,6 +18,7 @@ package
hadoop git commit: HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 00c476abd -> 40c06b389 HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi. (cherry picked from commit 81d59506e539673edde12e19c0df5c2edd9d02ad) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40c06b38 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40c06b38 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40c06b38 Branch: refs/heads/branch-3.1 Commit: 40c06b389a15eeef655f108b422993ef12a1fb5b Parents: 00c476a Author: Xiao Chen Authored: Tue Jul 24 21:45:14 2018 -0700 Committer: Xiao Chen Committed: Tue Jul 24 21:46:21 2018 -0700 -- .../key/kms/LoadBalancingKMSClientProvider.java | 17 - .../kms/TestLoadBalancingKMSClientProvider.java | 79 2 files changed, 92 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c06b38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java index 42cd47d..9677b0d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java @@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms; import java.io.IOException; import java.io.InterruptedIOException; +import java.net.ConnectException; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; @@ -27,6 +28,8 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import javax.net.ssl.SSLHandshakeException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; @@ -115,7 +118,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements if (providers.length == 0) { throw new IOException("No providers configured !"); } -IOException ex = null; int numFailovers = 0; for (int i = 0;; i++, numFailovers++) { KMSClientProvider provider = providers[(currPos + i) % providers.length]; @@ -130,8 +132,15 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements } catch (IOException ioe) { LOG.warn("KMS provider at [{}] threw an IOException: ", provider.getKMSUrl(), ioe); -ex = ioe; - +// SSLHandshakeException can occur here because of lost connection +// with the KMS server, creating a ConnectException from it, +// so that the FailoverOnNetworkExceptionRetry policy will retry +if (ioe instanceof SSLHandshakeException) { + Exception cause = ioe; + ioe = new ConnectException("SSLHandshakeException: " + + cause.getMessage()); + ioe.initCause(cause); +} RetryAction action = null; try { action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false); @@ -153,7 +162,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements CommonConfigurationKeysPublic. KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length), providers.length); - throw ex; + throw ioe; } if (((numFailovers + 1) % providers.length) == 0) { // Sleep only after we try all the providers for every cycle. http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c06b38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index bd68dca..4e7aed9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -18,6 +18,7 @@ package
hadoop git commit: HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi.
Repository: hadoop Updated Branches: refs/heads/trunk 26864471c -> 81d59506e HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81d59506 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81d59506 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81d59506 Branch: refs/heads/trunk Commit: 81d59506e539673edde12e19c0df5c2edd9d02ad Parents: 2686447 Author: Xiao Chen Authored: Tue Jul 24 21:45:14 2018 -0700 Committer: Xiao Chen Committed: Tue Jul 24 21:45:43 2018 -0700 -- .../key/kms/LoadBalancingKMSClientProvider.java | 17 - .../kms/TestLoadBalancingKMSClientProvider.java | 79 2 files changed, 92 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/81d59506/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java index 1ac3fd3..23cdc50 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java @@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms; import java.io.IOException; import java.io.InterruptedIOException; +import java.net.ConnectException; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; @@ -27,6 +28,8 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import javax.net.ssl.SSLHandshakeException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; @@ -115,7 +118,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements if (providers.length == 0) { throw new IOException("No providers configured !"); } -IOException ex = null; int numFailovers = 0; for (int i = 0;; i++, numFailovers++) { KMSClientProvider provider = providers[(currPos + i) % providers.length]; @@ -130,8 +132,15 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements } catch (IOException ioe) { LOG.warn("KMS provider at [{}] threw an IOException: ", provider.getKMSUrl(), ioe); -ex = ioe; - +// SSLHandshakeException can occur here because of lost connection +// with the KMS server, creating a ConnectException from it, +// so that the FailoverOnNetworkExceptionRetry policy will retry +if (ioe instanceof SSLHandshakeException) { + Exception cause = ioe; + ioe = new ConnectException("SSLHandshakeException: " + + cause.getMessage()); + ioe.initCause(cause); +} RetryAction action = null; try { action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false); @@ -153,7 +162,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements CommonConfigurationKeysPublic. KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length), providers.length); - throw ex; + throw ioe; } if (((numFailovers + 1) % providers.length) == 0) { // Sleep only after we try all the providers for every cycle. http://git-wip-us.apache.org/repos/asf/hadoop/blob/81d59506/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index bd68dca..4e7aed9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.crypto.key.kms; import static
hadoop git commit: HDFS-13761. Add toString Method to AclFeature Class. Contributed by Shweta.
Repository: hadoop Updated Branches: refs/heads/trunk 849c45db1 -> 26864471c HDFS-13761. Add toString Method to AclFeature Class. Contributed by Shweta. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26864471 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26864471 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26864471 Branch: refs/heads/trunk Commit: 26864471c24bf389ab8fc913decc3d069404688b Parents: 849c45d Author: Xiao Chen Authored: Tue Jul 24 21:42:00 2018 -0700 Committer: Xiao Chen Committed: Tue Jul 24 21:42:47 2018 -0700 -- .../java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java | 5 + 1 file changed, 5 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/26864471/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java index 97d4759..6d546f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java @@ -73,6 +73,11 @@ public class AclFeature implements INode.Feature, ReferenceCounter { } @Override + public String toString() { +return "AclFeature : " + Integer.toHexString(hashCode()) + " Size of entries : " + entries.length; + } + + @Override public int hashCode() { return Arrays.hashCode(entries); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[25/50] hadoop git commit: HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.
HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9be25e34 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9be25e34 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9be25e34 Branch: refs/heads/HADOOP-15461 Commit: 9be25e347683d26e0575458c7f470c76fd4d951b Parents: d2acf8d Author: Xiaoyu Yao Authored: Fri Jul 20 14:22:02 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 14:22:02 2018 -0700 -- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 + .../scm/container/common/helpers/Pipeline.java | 7 + .../common/src/main/resources/ozone-default.xml | 12 ++ .../hdds/scm/container/ContainerMapping.java| 4 + .../hdds/scm/exceptions/SCMException.java | 1 + .../hdds/scm/pipelines/PipelineManager.java | 64 +++--- .../hdds/scm/pipelines/PipelineSelector.java| 212 --- .../scm/pipelines/ratis/RatisManagerImpl.java | 33 +-- .../standalone/StandaloneManagerImpl.java | 21 +- .../hdds/scm/pipeline/TestNode2PipelineMap.java | 14 ++ 10 files changed, 273 insertions(+), 100 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 71184cf..6e940ad 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -236,6 +236,11 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; + public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT = + "ozone.scm.pipeline.creation.lease.timeout"; + + public static final String + OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index c5794f4..534c9fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -214,6 +214,13 @@ public class Pipeline { } /** + * Update the State of the pipeline. + */ + public void setLifeCycleState(HddsProtos.LifeCycleState nextState) { + lifeCycleState = nextState; + } + + /** * Gets the pipeline Name. * * @return - Name of the pipeline http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/common/src/main/resources/ozone-default.xml -- diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 5a1d26a..69a382a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1085,5 +1085,17 @@ executed since last report. Unit could be defined with postfix (ns,ms,s,m,h,d) + +ozone.scm.pipeline.creation.lease.timeout +60s +OZONE, SCM, PIPELINE + + Pipeline creation timeout in milliseconds to be used by SCM. When + BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to + CREATING state, SCM will now wait for the configured amount of time + to get COMPLETE_CREATE event if it doesn't receive it will move the + pipeline to DELETING. + + \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be25e34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index 26f4d86..f07d22b 100644 ---
[44/50] hadoop git commit: YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt.
YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e673dd1d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e673dd1d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e673dd1d Branch: refs/heads/HADOOP-15461 Commit: e673dd1d4d78b66e7b6705ec6dc3679d2347d704 Parents: cd0b9f1 Author: bibinchundatt Authored: Tue Jul 24 18:36:49 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 23:26:59 2018 +0530 -- .../server/resourcemanager/RMAppManager.java| 48 ++-- .../placement/PlacementManager.java | 9 .../TestWorkPreservingRMRestart.java| 48 .../placement/TestPlacementManager.java | 20 4 files changed, 80 insertions(+), 45 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 3e64cfc..7011aaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -364,17 +364,9 @@ public class RMAppManager implements EventHandler, ApplicationSubmissionContext submissionContext, long submitTime, String user, boolean isRecovery, long startTime) throws YarnException { -ApplicationPlacementContext placementContext = null; -try { - placementContext = placeApplication(rmContext, submissionContext, user); -} catch (YarnException e) { - String msg = - "Failed to place application " + submissionContext.getApplicationId() - + " to queue and specified " + "queue is invalid : " - + submissionContext.getQueue(); - LOG.error(msg, e); - throw e; -} +ApplicationPlacementContext placementContext = +placeApplication(rmContext.getQueuePlacementManager(), +submissionContext, user, isRecovery); // We only replace the queue when it's a new application if (!isRecovery) { @@ -789,23 +781,31 @@ public class RMAppManager implements EventHandler, } @VisibleForTesting - ApplicationPlacementContext placeApplication(RMContext rmContext, - ApplicationSubmissionContext context, String user) throws YarnException { + ApplicationPlacementContext placeApplication( + PlacementManager placementManager, ApplicationSubmissionContext context, + String user, boolean isRecovery) throws YarnException { ApplicationPlacementContext placementContext = null; -PlacementManager placementManager = rmContext.getQueuePlacementManager(); - if (placementManager != null) { - placementContext = placementManager.placeApplication(context, user); -} else{ - if ( context.getQueue() == null || context.getQueue().isEmpty()) { -final String msg = "Queue Placement Manager is not set. Cannot place " -+ "application : " + context.getApplicationId() + " to queue and " -+ "specified queue is invalid " + context.getQueue(); -LOG.error(msg); -throw new YarnException(msg); + try { +placementContext = placementManager.placeApplication(context, user); + } catch (YarnException e) { +// Placement could also fail if the user doesn't exist in system +// skip if the user is not found during recovery. +if (isRecovery) { + LOG.warn("PlaceApplication failed,skipping on recovery of rm"); + return placementContext; +} +throw e; } } - +if (placementContext == null && (context.getQueue() == null) || context +.getQueue().isEmpty()) { + String msg = "Failed to place application " + context.getApplicationId() + + " to queue and specified " + "queue is invalid : " + context + .getQueue(); + LOG.error(msg); + throw new YarnException(msg); +} return placementContext; }
[14/50] hadoop git commit: YARN-8528. Final states in ContainerAllocation might be modified externally causing unexpected allocation results. Contributed by Xintong Song.
YARN-8528. Final states in ContainerAllocation might be modified externally causing unexpected allocation results. Contributed by Xintong Song. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbf20264 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbf20264 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbf20264 Branch: refs/heads/HADOOP-15461 Commit: cbf20264838f536382a9d8c4cd2144faf6875c3a Parents: 7b25fb9 Author: Weiwei Yang Authored: Fri Jul 20 22:32:11 2018 +0800 Committer: Weiwei Yang Committed: Fri Jul 20 22:34:06 2018 +0800 -- .../capacity/allocator/ContainerAllocation.java | 2 +- .../allocator/RegularContainerAllocator.java| 10 ++-- .../capacity/TestCapacityScheduler.java | 48 3 files changed, 54 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbf20264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java index f408508..b9b9bcf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java @@ -56,7 +56,7 @@ public class ContainerAllocation { RMContainer containerToBeUnreserved; private Resource resourceToBeAllocated = Resources.none(); - AllocationState state; + private AllocationState state; NodeType containerNodeType = NodeType.NODE_LOCAL; NodeType requestLocalityType = null; http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbf20264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 99a5b84..8f49b41 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -263,7 +263,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator { reservedContainer, schedulingMode, resourceLimits); if (null == reservedContainer) { - if (result.state == AllocationState.PRIORITY_SKIPPED) { + if (result.getAllocationState() == AllocationState.PRIORITY_SKIPPED) { // Don't count 'skipped nodes' as a scheduling opportunity! application.subtractSchedulingOpportunity(schedulerKey); } @@ -487,8 +487,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator { // When a returned allocation is LOCALITY_SKIPPED, since we're in // off-switch request now, we will skip this app w.r.t priorities - if (allocation.state == AllocationState.LOCALITY_SKIPPED) { -allocation.state = AllocationState.APP_SKIPPED; + if (allocation.getAllocationState() == AllocationState.LOCALITY_SKIPPED) { +allocation = ContainerAllocation.APP_SKIPPED; } allocation.requestLocalityType = requestLocalityType; @@ -836,8 +836,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator { result = tryAllocateOnNode(clusterResource, node, schedulingMode,
[47/50] hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)
HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton) Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/849c45db Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/849c45db Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/849c45db Branch: refs/heads/HADOOP-15461 Commit: 849c45db187224095b13fe297a4d7377fbb9d2cd Parents: 6bec03c Author: Daniel Templeton Authored: Tue Jul 24 15:34:19 2018 -0700 Committer: Daniel Templeton Committed: Tue Jul 24 16:05:27 2018 -0700 -- .../java/org/apache/hadoop/fs/CreateFlag.java | 9 ++- .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++- .../org/apache/hadoop/hdfs/DFSOutputStream.java | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 11 +++ .../src/main/proto/ClientNamenodeProtocol.proto | 1 + .../BlockPlacementPolicyDefault.java| 4 +- .../hdfs/server/namenode/FSDirWriteFileOp.java | 30 +--- .../server/namenode/TestFSDirWriteFileOp.java | 79 8 files changed, 134 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java index 383d65a..c3e088b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java @@ -116,7 +116,14 @@ public enum CreateFlag { * Enforce the file to be a replicated file, no matter what its parent * directory's replication or erasure coding policy is. */ - SHOULD_REPLICATE((short) 0x80); + SHOULD_REPLICATE((short) 0x80), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + */ + IGNORE_CLIENT_LOCALITY((short) 0x100); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java index 6a0805b..b0686d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java @@ -36,7 +36,16 @@ public enum AddBlockFlag { * * @see CreateFlag#NO_LOCAL_WRITE */ - NO_LOCAL_WRITE((short) 0x01); + NO_LOCAL_WRITE((short) 0x01), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + * + * @see CreateFlag#IGNORE_CLIENT_LOCALITY + */ + IGNORE_CLIENT_LOCALITY((short) 0x02); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9734752..e977054 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) { this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE); } +if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) { + this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY); +} if (progress != null) { DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream " +"{}", src);
[18/50] hadoop git commit: YARN-6964. Fair scheduler misuses Resources operations. (Daniel Templeton and Szilard Nemeth via Haibo Chen)
YARN-6964. Fair scheduler misuses Resources operations. (Daniel Templeton and Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a6bb840 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a6bb840 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a6bb840 Branch: refs/heads/HADOOP-15461 Commit: 8a6bb8409c2dc695c0ffc70df0528d7f8bd5d795 Parents: 5c19ee3 Author: Haibo Chen Authored: Fri Jul 20 10:46:28 2018 -0700 Committer: Haibo Chen Committed: Fri Jul 20 10:46:28 2018 -0700 -- .../hadoop/yarn/util/resource/Resources.java| 20 +- .../scheduler/SchedulerApplicationAttempt.java | 11 +-- .../allocator/RegularContainerAllocator.java| 4 +- .../scheduler/fair/AllocationConfiguration.java | 2 +- .../scheduler/fair/FSAppAttempt.java| 3 +- .../scheduler/fair/FSLeafQueue.java | 9 +-- .../fair/policies/FairSharePolicy.java | 76 ++-- .../TestSchedulerApplicationAttempt.java| 58 +++ .../scheduler/fair/FakeSchedulable.java | 22 +++--- 9 files changed, 140 insertions(+), 65 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index db0f980..8636577 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -242,7 +242,7 @@ public class Resources { public static boolean isNone(Resource other) { return NONE.equals(other); } - + public static Resource unbounded() { return UNBOUNDED; } @@ -300,8 +300,9 @@ public class Resources { } /** - * Subtract rhs from lhs and reset any negative - * values to zero. + * Subtract {@code rhs} from {@code lhs} and reset any negative values to + * zero. This call will modify {@code lhs}. + * * @param lhs {@link Resource} to subtract from * @param rhs {@link Resource} to subtract * @return the value of lhs after subtraction @@ -317,6 +318,19 @@ public class Resources { return lhs; } + /** + * Subtract {@code rhs} from {@code lhs} and reset any negative values to + * zero. This call will operate on a copy of {@code lhs}, leaving {@code lhs} + * unmodified. + * + * @param lhs {@link Resource} to subtract from + * @param rhs {@link Resource} to subtract + * @return the value of lhs after subtraction + */ + public static Resource subtractNonNegative(Resource lhs, Resource rhs) { +return subtractFromNonNegative(clone(lhs), rhs); + } + public static Resource negate(Resource resource) { return subtract(NONE, resource); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6bb840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 1225af1..dd6d38f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -1280,8 +1280,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { } @Private - public boolean hasPendingResourceRequest(ResourceCalculator rc, - String nodePartition, Resource cluster, + public boolean hasPendingResourceRequest(String nodePartition, SchedulingMode schedulingMode) { // We need to consider unconfirmed allocations if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { @@
[30/50] hadoop git commit: HDDS-181. CloseContainer should commit all pending open Keys on a datanode. Contributed by Shashikant Banerjee.
HDDS-181. CloseContainer should commit all pending open Keys on a datanode. Contributed by Shashikant Banerjee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbe2f622 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbe2f622 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbe2f622 Branch: refs/heads/HADOOP-15461 Commit: bbe2f6225ea500651de04c064f7b847be18e5b66 Parents: 9fa9e30 Author: Mukul Kumar Singh Authored: Mon Jul 23 09:12:47 2018 +0530 Committer: Mukul Kumar Singh Committed: Mon Jul 23 09:13:03 2018 +0530 -- .../ozone/container/common/helpers/KeyData.java | 20 +- .../common/impl/OpenContainerBlockMap.java | 167 .../container/keyvalue/KeyValueHandler.java | 69 - .../common/impl/TestCloseContainerHandler.java | 260 +++ 4 files changed, 504 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java index 129e4a8..b63332f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.ArrayList; /** * Helper class to convert Protobuf to Java classes. @@ -131,7 +132,25 @@ public class KeyData { } /** + * Adds chinkInfo to the list + */ + public void addChunk(ContainerProtos.ChunkInfo chunkInfo) { +if (chunks == null) { + chunks = new ArrayList<>(); +} +chunks.add(chunkInfo); + } + + /** + * removes the chunk. + */ + public void removeChunk(ContainerProtos.ChunkInfo chunkInfo) { +chunks.remove(chunkInfo); + } + + /** * Returns container ID. + * * @return long. */ public long getContainerID() { @@ -170,5 +189,4 @@ public class KeyData { public long getSize() { return chunks.parallelStream().mapToLong(e->e.getLen()).sum(); } - } http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbe2f622/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java new file mode 100644 index 000..ab5f861 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.ozone.container.common.helpers.KeyData; + +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * This class will maintain list of open keys per container when closeContainer + * command comes, it should autocommit all open keys of a open container before + * marking the container as closed. + */ +public class OpenContainerBlockMap { + + /** + * TODO : We may construct the openBlockMap
[08/50] hadoop git commit: HADOOP-15547/ WASB: improve listStatus performance. Contributed by Thomas Marquardt.
HADOOP-15547/ WASB: improve listStatus performance. Contributed by Thomas Marquardt. (cherry picked from commit 749fff577ed9afb4ef8a54b8948f74be083cc620) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d9568a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d9568a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d9568a Branch: refs/heads/HADOOP-15461 Commit: 45d9568aaaf532a6da11bd7c1844ff81bf66bab1 Parents: 5836e0a Author: Steve Loughran Authored: Thu Jul 19 12:31:19 2018 -0700 Committer: Steve Loughran Committed: Thu Jul 19 12:31:19 2018 -0700 -- .../dev-support/findbugs-exclude.xml| 10 + hadoop-tools/hadoop-azure/pom.xml | 12 + .../fs/azure/AzureNativeFileSystemStore.java| 182 - .../apache/hadoop/fs/azure/FileMetadata.java| 77 ++-- .../hadoop/fs/azure/NativeAzureFileSystem.java | 376 --- .../hadoop/fs/azure/NativeFileSystemStore.java | 15 +- .../apache/hadoop/fs/azure/PartialListing.java | 61 --- .../hadoop/fs/azure/ITestListPerformance.java | 196 ++ 8 files changed, 514 insertions(+), 415 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml index cde1734..38de35e 100644 --- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml @@ -47,4 +47,14 @@ + + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/pom.xml -- diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 44b67a0..52b5b72 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -43,6 +43,8 @@ unset 7200 + 10 + 1000 @@ -298,6 +300,8 @@ ${fs.azure.scale.test.huge.filesize} ${fs.azure.scale.test.huge.partitionsize} ${fs.azure.scale.test.timeout} + ${fs.azure.scale.test.list.performance.threads} + ${fs.azure.scale.test.list.performance.files} **/Test*.java @@ -326,6 +330,8 @@ ${fs.azure.scale.test.huge.filesize} ${fs.azure.scale.test.huge.partitionsize} ${fs.azure.scale.test.timeout} + ${fs.azure.scale.test.list.performance.threads} + ${fs.azure.scale.test.list.performance.files} **/TestRollingWindowAverage*.java @@ -367,6 +373,8 @@ ${fs.azure.scale.test.huge.filesize} ${fs.azure.scale.test.huge.partitionsize} ${fs.azure.scale.test.timeout} + ${fs.azure.scale.test.list.performance.threads} + ${fs.azure.scale.test.list.performance.files} @@ -412,6 +420,8 @@ ${fs.azure.scale.test.huge.filesize} ${fs.azure.scale.test.huge.partitionsize} ${fs.azure.scale.test.timeout} + ${fs.azure.scale.test.list.performance.threads} + ${fs.azure.scale.test.list.performance.files} **/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java @@ -454,6 +464,8 @@ ${fs.azure.scale.test.enabled} ${fs.azure.scale.test.huge.filesize} ${fs.azure.scale.test.timeout} + ${fs.azure.scale.test.list.performance.threads} + ${fs.azure.scale.test.list.performance.files} ${fs.azure.scale.test.timeout} false http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d9568a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java -- diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index 197ab22..d2f9ca6 100644 ---
[36/50] hadoop git commit: HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. Contributed by Dibyendu Karmakar.
HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. Contributed by Dibyendu Karmakar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17a87977 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17a87977 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17a87977 Branch: refs/heads/HADOOP-15461 Commit: 17a87977f29ced49724f561a68565217c8cb4e94 Parents: 8688a0c Author: Yiqun Lin Authored: Tue Jul 24 11:15:47 2018 +0800 Committer: Yiqun Lin Committed: Tue Jul 24 11:15:47 2018 +0800 -- .../hdfs/server/federation/router/Quota.java| 9 ++- .../federation/router/RouterAdminServer.java| 8 -- .../federation/router/RouterQuotaManager.java | 4 +-- .../router/RouterQuotaUpdateService.java| 2 +- .../federation/router/RouterQuotaUsage.java | 4 +-- .../federation/store/records/MountTable.java| 4 +-- .../store/records/impl/pb/MountTablePBImpl.java | 4 +-- .../hdfs/tools/federation/RouterAdmin.java | 8 +++--- .../federation/router/TestRouterAdmin.java | 8 ++ .../federation/router/TestRouterAdminCLI.java | 16 +--- .../federation/router/TestRouterQuota.java | 26 +--- .../router/TestRouterQuotaManager.java | 20 +++ .../store/records/TestMountTable.java | 4 +-- 13 files changed, 82 insertions(+), 35 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index 75d3e04..846ccd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -162,6 +162,8 @@ public class Quota { private QuotaUsage aggregateQuota(Map results) { long nsCount = 0; long ssCount = 0; +long nsQuota = HdfsConstants.QUOTA_RESET; +long ssQuota = HdfsConstants.QUOTA_RESET; boolean hasQuotaUnSet = false; for (Map.Entry entry : results.entrySet()) { @@ -173,6 +175,8 @@ public class Quota { if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) { hasQuotaUnSet = true; } +nsQuota = usage.getQuota(); +ssQuota = usage.getSpaceQuota(); nsCount += usage.getFileAndDirectoryCount(); ssCount += usage.getSpaceConsumed(); @@ -187,7 +191,10 @@ public class Quota { QuotaUsage.Builder builder = new QuotaUsage.Builder() .fileAndDirectoryCount(nsCount).spaceConsumed(ssCount); if (hasQuotaUnSet) { - builder.quota(HdfsConstants.QUOTA_DONT_SET); + builder.quota(HdfsConstants.QUOTA_RESET) + .spaceQuota(HdfsConstants.QUOTA_RESET); +} else { + builder.quota(nsQuota).spaceQuota(ssQuota); } return builder.build(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a87977/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 8e23eca..114f008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -28,6 +28,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB; @@ -253,8 +254,11 @@ public class RouterAdminServer extends AbstractService if (nsQuota != HdfsConstants.QUOTA_DONT_SET || ssQuota != HdfsConstants.QUOTA_DONT_SET) { -
[16/50] hadoop git commit: HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar.
HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68b57ad3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68b57ad3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68b57ad3 Branch: refs/heads/HADOOP-15461 Commit: 68b57ad32cb0978ad5cd20b5fdc821f087a2c9dc Parents: e9c44ec Author: Xiaoyu Yao Authored: Fri Jul 20 09:07:58 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 09:12:48 2018 -0700 -- .../org/apache/hadoop/hdds/HddsConfigKeys.java | 26 ++-- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 --- .../apache/hadoop/ozone/OzoneConfigKeys.java| 5 --- .../common/src/main/resources/ozone-default.xml | 26 +--- .../apache/hadoop/hdds/scm/HddsServerUtil.java | 13 .../report/CommandStatusReportPublisher.java| 24 --- .../common/report/ContainerReportPublisher.java | 25 --- .../common/report/NodeReportPublisher.java | 32 ++-- .../common/report/ReportPublisher.java | 14 +++-- .../common/report/TestReportPublisher.java | 11 ++- .../scm/container/closer/ContainerCloser.java | 12 .../container/closer/TestContainerCloser.java | 8 ++--- .../hadoop/hdds/scm/node/TestNodeManager.java | 17 +-- .../hadoop/ozone/MiniOzoneClusterImpl.java | 5 +-- .../ozone/TestStorageContainerManager.java | 8 ++--- .../hadoop/ozone/scm/node/TestQueryNode.java| 5 ++- 16 files changed, 162 insertions(+), 74 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 8b449fb..0283615 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -17,15 +17,35 @@ */ package org.apache.hadoop.hdds; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; /** - * Config class for HDDS. + * This class contains constants for configuration keys and default values + * used in hdds. */ public final class HddsConfigKeys { + + /** + * Do not instantiate. + */ private HddsConfigKeys() { } + + public static final String HDDS_HEARTBEAT_INTERVAL = + "hdds.heartbeat.interval"; + public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT = + "30s"; + + public static final String HDDS_NODE_REPORT_INTERVAL = + "hdds.node.report.interval"; + public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT = + "60s"; + + public static final String HDDS_CONTAINER_REPORT_INTERVAL = + "hdds.container.report.interval"; + public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT = + "60s"; + public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL = "hdds.command.status.report.interval"; public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT = - ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT; + "60s"; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 46eb8aa..71184cf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -156,11 +156,6 @@ public final class ScmConfigKeys { "ozone.scm.handler.count.key"; public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10; - public static final String OZONE_SCM_HEARTBEAT_INTERVAL = - "ozone.scm.heartbeat.interval"; - public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT = - "30s"; - public static final String OZONE_SCM_DEADNODE_INTERVAL = "ozone.scm.dead.node.interval"; public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT = http://git-wip-us.apache.org/repos/asf/hadoop/blob/68b57ad3/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
[37/50] hadoop git commit: HDDS-258. Helper methods to generate NodeReport and ContainerReport for testing. Contributed by Nanda Kumar.
HDDS-258. Helper methods to generate NodeReport and ContainerReport for testing. Contributed by Nanda Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ced3efe Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ced3efe Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ced3efe Branch: refs/heads/HADOOP-15461 Commit: 2ced3efe94eecc3e6076be1f0341bf6a2f2affab Parents: 17a8797 Author: Xiaoyu Yao Authored: Mon Jul 23 21:29:44 2018 -0700 Committer: Xiaoyu Yao Committed: Mon Jul 23 22:04:20 2018 -0700 -- .../hadoop/hdds/scm/node/SCMNodeManager.java| 27 +- .../org/apache/hadoop/hdds/scm/TestUtils.java | 372 ++- .../command/TestCommandStatusReportHandler.java | 2 +- .../hdds/scm/container/MockNodeManager.java | 4 +- .../scm/container/TestContainerMapping.java | 4 +- .../container/closer/TestContainerCloser.java | 2 +- .../TestSCMContainerPlacementCapacity.java | 2 +- .../TestSCMContainerPlacementRandom.java| 2 +- .../replication/TestReplicationManager.java | 7 +- .../hdds/scm/node/TestContainerPlacement.java | 4 - .../hadoop/hdds/scm/node/TestNodeManager.java | 80 ++-- .../hdds/scm/node/TestNodeReportHandler.java| 19 +- .../scm/node/TestSCMNodeStorageStatMap.java | 13 +- .../TestSCMDatanodeHeartbeatDispatcher.java | 4 +- .../ozone/container/common/TestEndPoint.java| 74 ++-- .../hadoop/ozone/TestMiniOzoneCluster.java | 20 +- .../container/metrics/TestContainerMetrics.java | 2 +- .../container/ozoneimpl/TestOzoneContainer.java | 2 +- .../container/server/TestContainerServer.java | 4 +- 19 files changed, 406 insertions(+), 238 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 7370b07..fca08bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -342,7 +342,8 @@ public class SCMNodeManager public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { return VersionResponse.newBuilder() .setVersion(this.version.getVersion()) -.addValue(OzoneConsts.SCM_ID, this.scmManager.getScmStorage().getScmId()) +.addValue(OzoneConsts.SCM_ID, +this.scmManager.getScmStorage().getScmId()) .addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorage() .getClusterID()) .build(); @@ -364,15 +365,11 @@ public class SCMNodeManager public RegisteredCommand register( DatanodeDetails datanodeDetails, NodeReportProto nodeReport) { -String hostname = null; -String ip = null; InetAddress dnAddress = Server.getRemoteIp(); if (dnAddress != null) { // Mostly called inside an RPC, update ip and peer hostname - hostname = dnAddress.getHostName(); - ip = dnAddress.getHostAddress(); - datanodeDetails.setHostName(hostname); - datanodeDetails.setIpAddress(ip); + datanodeDetails.setHostName(dnAddress.getHostName()); + datanodeDetails.setIpAddress(dnAddress.getHostAddress()); } UUID dnId = datanodeDetails.getUuid(); try { @@ -390,14 +387,12 @@ public class SCMNodeManager LOG.trace("Datanode is already registered. Datanode: {}", datanodeDetails.toString()); } -RegisteredCommand.Builder builder = -RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) -.setDatanodeUUID(datanodeDetails.getUuidString()) -.setClusterID(this.clusterID); -if (hostname != null && ip != null) { - builder.setHostname(hostname).setIpAddress(ip); -} -return builder.build(); +return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) +.setDatanodeUUID(datanodeDetails.getUuidString()) +.setClusterID(this.clusterID) +.setHostname(datanodeDetails.getHostName()) +.setIpAddress(datanodeDetails.getIpAddress()) +.build(); } /** @@ -430,7 +425,7 @@ public class SCMNodeManager */ @Override public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) { - this.updateNodeStat(dnUuid, nodeReport); +this.updateNodeStat(dnUuid, nodeReport); } /**
[35/50] hadoop git commit: YARN-8380. Support bind propagation options for mounts in docker runtime. Contributed by Billie Rinaldi
YARN-8380. Support bind propagation options for mounts in docker runtime. Contributed by Billie Rinaldi Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8688a0c7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8688a0c7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8688a0c7 Branch: refs/heads/HADOOP-15461 Commit: 8688a0c7f88f2adf1a7fce695e06f3dd1f745080 Parents: 17e2616 Author: Eric Yang Authored: Mon Jul 23 20:12:04 2018 -0400 Committer: Eric Yang Committed: Mon Jul 23 20:12:04 2018 -0400 -- .../runtime/DockerLinuxContainerRuntime.java| 37 ++- .../linux/runtime/docker/DockerRunCommand.java | 18 +- .../container-executor/impl/utils/docker-util.c | 196 -- .../test/utils/test_docker_util.cc | 133 +- .../runtime/TestDockerContainerRuntime.java | 259 +-- .../gpu/TestNvidiaDockerV1CommandPlugin.java| 2 +- .../src/site/markdown/DockerContainers.md | 13 +- 7 files changed, 349 insertions(+), 309 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8688a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index c89d5fb..88e6c91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -154,9 +154,13 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r * {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify + additional volume mounts for the Docker container. The value of the * environment variable should be a comma-separated list of mounts. - * All such mounts must be given as {@code source:dest:mode}, and the mode + * All such mounts must be given as {@code source:dest[:mode]} and the mode * must be "ro" (read-only) or "rw" (read-write) to specify the type of - * access being requested. The requested mounts will be validated by + * access being requested. If neither is specified, read-write will be + * assumed. The mode may include a bind propagation option. In that case, + * the mode should either be of the form [option], rw+[option], or + * ro+[option]. Valid bind propagation options are shared, rshared, slave, + * rslave, private, and rprivate. The requested mounts will be validated by * container-executor based on the values set in container-executor.cfg for * {@code docker.allowed.ro-mounts} and {@code docker.allowed.rw-mounts}. * @@ -189,7 +193,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { private static final Pattern hostnamePattern = Pattern.compile( HOSTNAME_PATTERN); private static final Pattern USER_MOUNT_PATTERN = Pattern.compile( - "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)"); + "(?<=^|,)([^:\\x00]+):([^:\\x00]+)" + + "(:(r[ow]|(r[ow][+])?(r?shared|r?slave|r?private)))?(?:,|$)"); private static final int HOST_NAME_LENGTH = 64; private static final String DEFAULT_PROCFS = "/proc"; @@ -840,24 +845,30 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { + environment.get(ENV_DOCKER_CONTAINER_MOUNTS)); } parsedMounts.reset(); + long mountCount = 0; while (parsedMounts.find()) { +mountCount++; String src = parsedMounts.group(1); java.nio.file.Path srcPath = java.nio.file.Paths.get(src); if (!srcPath.isAbsolute()) { src = mountReadOnlyPath(src, localizedResources); } String dst = parsedMounts.group(2); -String mode = parsedMounts.group(3); -if (!mode.equals("ro") && !mode.equals("rw")) { - throw new ContainerExecutionException( - "Invalid mount mode requested for mount: " - +
[29/50] hadoop git commit: HDDS-249. Fail if multiple SCM IDs on the DataNode and add SCM ID check after version request. Contributed by Bharat Viswanadham.
HDDS-249. Fail if multiple SCM IDs on the DataNode and add SCM ID check after version request. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fa9e301 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fa9e301 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fa9e301 Branch: refs/heads/HADOOP-15461 Commit: 9fa9e301b0471f38530a3cb596b00064436d311d Parents: 993ec02 Author: Nanda kumar Authored: Sat Jul 21 18:46:31 2018 +0530 Committer: Nanda kumar Committed: Sat Jul 21 18:46:31 2018 +0530 -- .../states/endpoint/VersionEndpointTask.java| 27 -- .../container/common/utils/HddsVolumeUtil.java | 56 .../container/ozoneimpl/ContainerReader.java| 22 +--- .../container/ozoneimpl/OzoneContainer.java | 2 +- .../ozone/container/common/ScmTestMock.java | 27 +- .../common/TestDatanodeStateMachine.java| 14 ++--- .../ozone/container/common/TestEndPoint.java| 49 + 7 files changed, 177 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index d782b59..64e078d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -23,10 +23,14 @@ import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.statemachine .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.VersionResponse; +import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Map; @@ -37,6 +41,8 @@ import java.util.concurrent.Callable; */ public class VersionEndpointTask implements Callable { + public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask + .class); private final EndpointStateMachine rpcEndPoint; private final Configuration configuration; private final OzoneContainer ozoneContainer; @@ -71,21 +77,32 @@ public class VersionEndpointTask implements Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " + "null"); - Preconditions.checkNotNull(scmId, "Reply from SCM: clusterId cannot be" + - " null"); + Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " + + "cannot be null"); // If version file does not exist create version file and also set scmId for (Map.Entry entry : volumeMap.entrySet()) { HddsVolume hddsVolume = entry.getValue(); -hddsVolume.format(clusterId); -ozoneContainer.getDispatcher().setScmId(scmId); +boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId, +clusterId, LOG); +if (!result) { + volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath()); +} } + if (volumeSet.getVolumesList().size() == 0) { +// All volumes are inconsistent state +throw new DiskOutOfSpaceException("All configured Volumes are in " + +"Inconsistent State"); + } + ozoneContainer.getDispatcher().setScmId(scmId); EndpointStateMachine.EndPointStates nextState = rpcEndPoint.getState().getNextState(); rpcEndPoint.setState(nextState); rpcEndPoint.zeroMissedCount(); -} catch (IOException ex) { +} catch (DiskOutOfSpaceException ex) { + rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); +} catch(IOException ex) { rpcEndPoint.logIfNeeded(ex); } finally { rpcEndPoint.unlock(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fa9e301/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
[49/50] hadoop git commit: HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed by Giovanni Matteo Fumarola.
HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/866646eb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/866646eb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/866646eb Branch: refs/heads/HADOOP-15461 Commit: 866646eb3bf15d101574d000c41915206e8db713 Parents: b8d2b09 Author: Inigo Goiri Authored: Mon Jun 11 13:14:34 2018 -0700 Committer: Inigo Goiri Committed: Tue Jul 24 18:30:47 2018 -0700 -- .../java/org/apache/hadoop/fs/FileUtil.java | 21 +++- .../main/java/org/apache/hadoop/util/Shell.java | 8 +++- 2 files changed, 19 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/866646eb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 61cb8d2..f3b5d58 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -196,22 +196,25 @@ public class FileUtil { * a symlink. */ public static String readLink(File f) { -/* NB: Use readSymbolicLink in java.nio.file.Path once available. Could - * use getCanonicalPath in File to get the target of the symlink but that - * does not indicate if the given path refers to a symlink. - */ if (f == null) { LOG.warn("Can not read a null symLink"); return ""; } -try { - return Shell.execCommand( - Shell.getReadlinkCommand(f.toString())).trim(); -} catch (IOException x) { - return ""; +if (Files.isSymbolicLink(f.toPath())) { + java.nio.file.Path p = null; + try { +p = Files.readSymbolicLink(f.toPath()); + } catch (Exception e) { +LOG.warn("Exception while reading the symbolic link " ++ f.getAbsolutePath() + ". Exception= " + e.getMessage()); +return ""; + } + return p.toAbsolutePath().toString(); } +LOG.warn("The file " + f.getAbsolutePath() + " is not a symbolic link."); +return ""; } /* http://git-wip-us.apache.org/repos/asf/hadoop/blob/866646eb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index e902af0..691df63 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -309,7 +309,13 @@ public abstract class Shell { : new String[] { "ln", "-s", target, link }; } - /** Return a command to read the target of the a symbolic link. */ + /** + * Return a command to read the target of the a symbolic link. + * + * Deprecated and likely to be deleted in the near future. Please use + * FileUtil.symlink(). + */ + @Deprecated public static String[] getReadlinkCommand(String link) { return WINDOWS ? new String[] { getWinUtilsPath(), "readlink", link } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[46/50] hadoop git commit: HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)
HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bec03cf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bec03cf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bec03cf Branch: refs/heads/HADOOP-15461 Commit: 6bec03cfc8bdcf6aa3df9c22231ab959ba31f2f5 Parents: ea2c6c8 Author: Gera Shegalov Authored: Tue Jul 17 00:05:39 2018 -0700 Committer: Gera Shegalov Committed: Tue Jul 24 14:32:30 2018 -0700 -- .../hadoop/io/file/tfile/Compression.java | 31 +++--- .../hadoop/io/file/tfile/TestCompression.java | 34 +++- 2 files changed, 53 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java index fa85ed7..c4347e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -24,6 +24,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -78,25 +79,33 @@ public final class Compression { public enum Algorithm { LZO(TFile.COMPRESSION_LZO) { private transient boolean checked = false; + private transient ClassNotFoundException cnf; + private transient boolean reinitCodecInTests; private static final String defaultClazz = "org.apache.hadoop.io.compress.LzoCodec"; + private transient String clazz; private transient CompressionCodec codec = null; + private String getLzoCodecClass() { +String extClazzConf = conf.get(CONF_LZO_CLASS); +String extClazz = (extClazzConf != null) ? +extClazzConf : System.getProperty(CONF_LZO_CLASS); +return (extClazz != null) ? extClazz : defaultClazz; + } + @Override public synchronized boolean isSupported() { -if (!checked) { +if (!checked || reinitCodecInTests) { checked = true; - String extClazzConf = conf.get(CONF_LZO_CLASS); - String extClazz = (extClazzConf != null) ? - extClazzConf : System.getProperty(CONF_LZO_CLASS); - String clazz = (extClazz != null) ? extClazz : defaultClazz; + reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false); + clazz = getLzoCodecClass(); try { LOG.info("Trying to load Lzo codec class: " + clazz); codec = (CompressionCodec) ReflectionUtils.newInstance(Class .forName(clazz), conf); } catch (ClassNotFoundException e) { -// that is okay +cnf = e; } } return codec != null; @@ -105,9 +114,9 @@ public final class Compression { @Override CompressionCodec getCodec() throws IOException { if (!isSupported()) { - throw new IOException( - "LZO codec class not specified. Did you forget to set property " - + CONF_LZO_CLASS + "?"); + throw new IOException(String.format( + "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz), + cnf); } return codec; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
[41/50] hadoop git commit: HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain.
HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773d312f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773d312f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773d312f Branch: refs/heads/HADOOP-15461 Commit: 773d312f7412d5050c106ed3a1cd0d1934bfa2e0 Parents: ff7c2ed Author: Mukul Kumar Singh Authored: Tue Jul 24 21:23:20 2018 +0530 Committer: Mukul Kumar Singh Committed: Tue Jul 24 21:23:20 2018 +0530 -- .../container/keyvalue/KeyValueHandler.java | 2 +- .../background/BlockDeletingService.java| 9 +- .../testutils/BlockDeletingServiceTestImpl.java | 3 +- .../common/TestBlockDeletingService.java| 90 4 files changed, 60 insertions(+), 44 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 9aa3df7..d3a1ca4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -136,7 +136,7 @@ public class KeyValueHandler extends Handler { TimeUnit.MILLISECONDS); this.blockDeletingService = new BlockDeletingService(containerSet, svcInterval, serviceTimeout, -config); +TimeUnit.MILLISECONDS, config); blockDeletingService.start(); // TODO: Add supoort for different volumeChoosingPolicies. volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index a3e36f4..4a572ca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -88,11 +88,10 @@ public class BlockDeletingService extends BackgroundService{ // Core pool size for container tasks private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; - public BlockDeletingService(ContainerSet containerSet, - long serviceInterval, long serviceTimeout, Configuration conf) { -super("BlockDeletingService", serviceInterval, -TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, -serviceTimeout); + public BlockDeletingService(ContainerSet containerSet, long serviceInterval, + long serviceTimeout, TimeUnit timeUnit, Configuration conf) { +super("BlockDeletingService", serviceInterval, timeUnit, +BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.containerSet = containerSet; containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java -- diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index a87f655..115b5e2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -44,7 +44,8 @@ public class BlockDeletingServiceTestImpl public BlockDeletingServiceTestImpl(ContainerSet containerSet, int
[15/50] hadoop git commit: HDDS-269. Refactor IdentifiableEventPayload to use a long ID. Contributed by Ajay Kumar.
HDDS-269. Refactor IdentifiableEventPayload to use a long ID. Contributed by Ajay Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c44ecf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c44ecf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c44ecf Branch: refs/heads/HADOOP-15461 Commit: e9c44ecfc6ca9d02b2073f16eeadfd7f4a490799 Parents: cbf2026 Author: Nanda kumar Authored: Fri Jul 20 21:39:45 2018 +0530 Committer: Nanda kumar Committed: Fri Jul 20 21:39:45 2018 +0530 -- .../common/statemachine/StateContext.java | 4 +- .../commandhandler/CommandHandler.java | 6 +- .../commands/CloseContainerCommand.java | 6 +- .../protocol/commands/DeleteBlocksCommand.java | 6 +- .../commands/ReplicateContainerCommand.java | 6 +- .../protocol/commands/ReregisterCommand.java| 2 +- .../ozone/protocol/commands/SCMCommand.java | 16 ++--- .../hadoop/hdds/server/events/EventWatcher.java | 47 --- .../server/events/IdentifiableEventPayload.java | 4 +- .../hdds/server/events/TestEventWatcher.java| 61 +--- 10 files changed, 75 insertions(+), 83 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 7ed30f8..faaff69 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -311,9 +311,9 @@ public class StateContext { * @param cmd - {@link SCMCommand}. */ public void addCmdStatus(SCMCommand cmd) { -this.addCmdStatus(cmd.getCmdId(), +this.addCmdStatus(cmd.getId(), CommandStatusBuilder.newBuilder() -.setCmdId(cmd.getCmdId()) +.setCmdId(cmd.getId()) .setStatus(Status.PENDING) .setType(cmd.getType()) .build()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java index 2016419..71c25b5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java @@ -64,9 +64,9 @@ public interface CommandHandler { */ default void updateCommandStatus(StateContext context, SCMCommand command, boolean cmdExecuted, Logger log) { -if (!context.updateCommandStatus(command.getCmdId(), cmdExecuted)) { - log.debug("{} with cmdId:{} not found.", command.getType(), - command.getCmdId()); +if (!context.updateCommandStatus(command.getId(), cmdExecuted)) { + log.debug("{} with Id:{} not found.", command.getType(), + command.getId()); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c44ecf/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java index 6b7c22c..1829642 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java @@ -41,8 +41,8 @@ public class CloseContainerCommand // Should be called only for protobuf conversion private CloseContainerCommand(long containerID, - HddsProtos.ReplicationType replicationType, long cmdId) { -super(cmdId); +
[28/50] hadoop git commit: HADOOP-15596. Stack trace should not be printed out when running hadoop key commands. Contributed by Kitti Nanasi.
HADOOP-15596. Stack trace should not be printed out when running hadoop key commands. Contributed by Kitti Nanasi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/993ec026 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/993ec026 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/993ec026 Branch: refs/heads/HADOOP-15461 Commit: 993ec026d10c7566fd358c022c061bca118c92f0 Parents: 1622a4b Author: Xiao Chen Authored: Thu Jul 19 14:25:38 2018 -0700 Committer: Xiao Chen Committed: Fri Jul 20 19:46:46 2018 -0700 -- .../org/apache/hadoop/crypto/key/KeyShell.java | 32 +--- .../key/kms/LoadBalancingKMSClientProvider.java | 2 +- .../org/apache/hadoop/tools/CommandShell.java | 6 +++- 3 files changed, 27 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/993ec026/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index fa84c47..3f8b337 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -265,8 +265,7 @@ public class KeyShell extends CommandShell { } } } catch (IOException e) { -getOut().println("Cannot list keys for KeyProvider: " + provider -+ ": " + e.toString()); +getOut().println("Cannot list keys for KeyProvider: " + provider); throw e; } } @@ -318,12 +317,12 @@ public class KeyShell extends CommandShell { printProviderWritten(); } catch (NoSuchAlgorithmException e) { getOut().println("Cannot roll key: " + keyName + - " within KeyProvider: " + provider + ". " + e.toString()); + " within KeyProvider: " + provider + "."); throw e; } } catch (IOException e1) { getOut().println("Cannot roll key: " + keyName + " within KeyProvider: " -+ provider + ". " + e1.toString()); ++ provider + "."); throw e1; } } @@ -374,8 +373,8 @@ public class KeyShell extends CommandShell { } return cont; } catch (IOException e) { - getOut().println(keyName + " will not be deleted."); - e.printStackTrace(getErr()); + getOut().println(keyName + " will not be deleted. " + + prettifyException(e)); } } return true; @@ -392,7 +391,7 @@ public class KeyShell extends CommandShell { getOut().println(keyName + " has been successfully deleted."); printProviderWritten(); } catch (IOException e) { - getOut().println(keyName + " has not been deleted. " + e.toString()); + getOut().println(keyName + " has not been deleted."); throw e; } } @@ -463,13 +462,13 @@ public class KeyShell extends CommandShell { "with options " + options.toString() + "."); printProviderWritten(); } catch (InvalidParameterException e) { -getOut().println(keyName + " has not been created. " + e.toString()); +getOut().println(keyName + " has not been created."); throw e; } catch (IOException e) { -getOut().println(keyName + " has not been created. " + e.toString()); +getOut().println(keyName + " has not been created."); throw e; } catch (NoSuchAlgorithmException e) { -getOut().println(keyName + " has not been created. " + e.toString()); +getOut().println(keyName + " has not been created."); throw e; } } @@ -520,7 +519,7 @@ public class KeyShell extends CommandShell { printProviderWritten(); } catch (IOException e) { getOut().println("Cannot invalidate cache for key: " + keyName + -" within KeyProvider: " + provider + ". " + e.toString()); +" within KeyProvider: " + provider + "."); throw e; } } @@ -531,6 +530,17 @@ public class KeyShell extends CommandShell { } } + @Override + protected void printException(Exception e){ +getErr().println("Executing command failed with " + +"the following exception: " + prettifyException(e)); + } + + private String prettifyException(Exception e) { +return e.getClass().getSimpleName() + ": " + +e.getLocalizedMessage().split("\n")[0]; + } + /** * main() entry point for the KeyShell.
[23/50] hadoop git commit: HDDS-275. Add message output for succeeded -deleteVolume CLI. Contributed by Nilotpal Nandi.
HDDS-275. Add message output for succeeded -deleteVolume CLI. Contributed by Nilotpal Nandi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ae5567 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ae5567 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ae5567 Branch: refs/heads/HADOOP-15461 Commit: c7ae55675ed56cb18266425c02674a5a87561e0c Parents: 6837121 Author: Xiaoyu Yao Authored: Fri Jul 20 13:12:07 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 13:12:53 2018 -0700 -- .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 2 ++ .../hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java | 1 + 2 files changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ae5567/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java -- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index 8f53049..573f097 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -249,6 +249,8 @@ public class TestOzoneShell { String[] args = new String[] {"-deleteVolume", url + "/" + volumeName, "-root"}; assertEquals(0, ToolRunner.run(shell, args)); +String output = out.toString(); +assertTrue(output.contains("Volume " + volumeName + " is deleted")); // verify if volume has been deleted try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ae5567/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java -- diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java index d6facf6..2df788a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java @@ -67,5 +67,6 @@ public class DeleteVolumeHandler extends Handler { } client.getObjectStore().deleteVolume(volumeName); +System.out.printf("Volume %s is deleted%n", volumeName); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[22/50] hadoop git commit: HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh.
HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6837121a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6837121a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6837121a Branch: refs/heads/HADOOP-15461 Commit: 6837121a43231f854b0b22ad20330012439313ce Parents: ba25d27 Author: Xiaoyu Yao Authored: Fri Jul 20 13:03:25 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 13:03:35 2018 -0700 -- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 + .../scm/container/common/helpers/Pipeline.java | 7 + .../common/src/main/resources/ozone-default.xml | 12 + .../common/statemachine/StateContext.java | 52 +++- .../states/endpoint/HeartbeatEndpointTask.java | 24 +- .../StorageContainerDatanodeProtocol.proto | 4 +- .../common/report/TestReportPublisher.java | 41 --- .../endpoint/TestHeartbeatEndpointTask.java | 302 +++ .../common/states/endpoint/package-info.java| 18 ++ .../hdds/scm/container/ContainerMapping.java| 4 + .../hdds/scm/exceptions/SCMException.java | 1 + .../hdds/scm/pipelines/PipelineManager.java | 64 ++-- .../hdds/scm/pipelines/PipelineSelector.java| 212 +++-- .../scm/pipelines/ratis/RatisManagerImpl.java | 33 +- .../standalone/StandaloneManagerImpl.java | 21 +- .../hdds/scm/pipeline/TestNode2PipelineMap.java | 14 + 16 files changed, 668 insertions(+), 146 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 71184cf..6e940ad 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -236,6 +236,11 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; + public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT = + "ozone.scm.pipeline.creation.lease.timeout"; + + public static final String + OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index c5794f4..534c9fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -214,6 +214,13 @@ public class Pipeline { } /** + * Update the State of the pipeline. + */ + public void setLifeCycleState(HddsProtos.LifeCycleState nextState) { + lifeCycleState = nextState; + } + + /** * Gets the pipeline Name. * * @return - Name of the pipeline http://git-wip-us.apache.org/repos/asf/hadoop/blob/6837121a/hadoop-hdds/common/src/main/resources/ozone-default.xml -- diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 5a1d26a..69a382a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1085,5 +1085,17 @@ executed since last report. Unit could be defined with postfix (ns,ms,s,m,h,d) + +ozone.scm.pipeline.creation.lease.timeout +60s +OZONE, SCM, PIPELINE + + Pipeline creation timeout in milliseconds to be used by SCM. When + BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to + CREATING state, SCM will now wait for the configured amount of time + to get COMPLETE_CREATE event if it doesn't receive it will move the + pipeline to DELETING. + + \ No newline at end of file
[31/50] hadoop git commit: YARN-8360. Improve YARN service restart policy and node manager auto restart policy. Contributed by Suma Shivaprasad
YARN-8360. Improve YARN service restart policy and node manager auto restart policy. Contributed by Suma Shivaprasad Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84d7bf1e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84d7bf1e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84d7bf1e Branch: refs/heads/HADOOP-15461 Commit: 84d7bf1eeff6b9418361afa4aa713e5e6f771365 Parents: bbe2f62 Author: Eric Yang Authored: Mon Jul 23 12:57:01 2018 -0400 Committer: Eric Yang Committed: Mon Jul 23 12:57:01 2018 -0400 -- .../service/component/AlwaysRestartPolicy.java | 5 ++ .../component/ComponentRestartPolicy.java | 2 + .../service/component/NeverRestartPolicy.java | 5 ++ .../component/OnFailureRestartPolicy.java | 5 ++ .../provider/AbstractProviderService.java | 29 + .../hadoop/yarn/service/ServiceTestUtils.java | 2 +- .../containerlaunch/TestAbstractLauncher.java | 66 7 files changed, 101 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java index 704ab14..505120d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java @@ -79,4 +79,9 @@ public final class AlwaysRestartPolicy implements ComponentRestartPolicy { @Override public boolean shouldTerminate(Component component) { return false; } + + @Override public boolean allowContainerRetriesForInstance( + ComponentInstance componentInstance) { +return true; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java index 23b0fb9..c5adffe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java @@ -42,4 +42,6 @@ public interface ComponentRestartPolicy { boolean shouldTerminate(Component component); + boolean allowContainerRetriesForInstance(ComponentInstance componentInstance); + } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d7bf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java index ace1f89..cd44a58 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java +++
[42/50] hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.
YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35ce6eb1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35ce6eb1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35ce6eb1 Branch: refs/heads/HADOOP-15461 Commit: 35ce6eb1f526ce3db7e015fb1761eee15604100c Parents: 773d312 Author: Sunil G Authored: Tue Jul 24 22:20:06 2018 +0530 Committer: Sunil G Committed: Tue Jul 24 22:20:17 2018 +0530 -- .../scheduler/capacity/TestContainerResizing.java | 18 +- 1 file changed, 13 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/35ce6eb1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index eacbf6e..307d5ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; @@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica .FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.util.resource.Resources; @@ -740,11 +740,14 @@ public class TestContainerResizing { @Test public void testIncreaseContainerUnreservedWhenApplicationCompleted() throws Exception { +// Disable relaunch app attempt on failure, in order to check +// resource usages for current app only. +conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); /** * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when * application finishes, reserved increase container should be cancelled */ -MockRM rm1 = new MockRM() { +MockRM rm1 = new MockRM(conf) { @Override public RMNodeLabelsManager createNodeLabelManager() { return mgr; @@ -807,9 +810,14 @@ public class TestContainerResizing { Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize()); -// Kill the application -cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(), -RMAppAttemptState.KILLED, false)); +// Kill the application by killing the AM container +ContainerId amContainer = +ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); +cs.killContainer(cs.getRMContainer(amContainer)); +rm1.waitForState(am1.getApplicationAttemptId(), +RMAppAttemptState.FAILED); +rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(), +RMAppState.FAILED); /* Check statuses after reservation satisfied */ // Increase request should be unreserved - To unsubscribe, e-mail:
[38/50] hadoop git commit: HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.
HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16f9aee5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16f9aee5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16f9aee5 Branch: refs/heads/HADOOP-15461 Commit: 16f9aee5f55bc37c1bb243708ee9b3f97e5a5b83 Parents: 2ced3ef Author: Nanda kumar Authored: Tue Jul 24 12:09:15 2018 +0530 Committer: Nanda kumar Committed: Tue Jul 24 12:09:15 2018 +0530 -- .../container/common/volume/HddsVolume.java | 81 ++-- .../container/common/volume/VolumeSet.java | 28 +-- .../container/common/volume/TestVolumeSet.java | 35 - 3 files changed, 111 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 0cbfd9f..6b90146 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.util.Properties; +import java.util.UUID; /** * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a @@ -84,6 +85,7 @@ public final class HddsVolume { private String datanodeUuid; private String clusterID; +private boolean failedVolume = false; public Builder(String rootDirStr) { this.volumeRootStr = rootDirStr; @@ -114,29 +116,47 @@ public final class HddsVolume { return this; } +// This is added just to create failed volume objects, which will be used +// to create failed HddsVolume objects in the case of any exceptions caused +// during creating HddsVolume object. +public Builder failedVolume(boolean failed) { + this.failedVolume = failed; + return this; +} + public HddsVolume build() throws IOException { return new HddsVolume(this); } } private HddsVolume(Builder b) throws IOException { -StorageLocation location = StorageLocation.parse(b.volumeRootStr); -hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); -this.state = VolumeState.NOT_INITIALIZED; -this.clusterID = b.clusterID; -this.datanodeUuid = b.datanodeUuid; -this.volumeIOStats = new VolumeIOStats(); - -VolumeInfo.Builder volumeBuilder = -new VolumeInfo.Builder(b.volumeRootStr, b.conf) -.storageType(b.storageType) -.configuredCapacity(b.configuredCapacity); -this.volumeInfo = volumeBuilder.build(); - -LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + -b.storageType + " and capacity : " + volumeInfo.getCapacity()); - -initialize(); +if (!b.failedVolume) { + StorageLocation location = StorageLocation.parse(b.volumeRootStr); + hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); + this.state = VolumeState.NOT_INITIALIZED; + this.clusterID = b.clusterID; + this.datanodeUuid = b.datanodeUuid; + this.volumeIOStats = new VolumeIOStats(); + + VolumeInfo.Builder volumeBuilder = + new VolumeInfo.Builder(b.volumeRootStr, b.conf) + .storageType(b.storageType) + .configuredCapacity(b.configuredCapacity); + this.volumeInfo = volumeBuilder.build(); + + LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + + b.storageType + " and capacity : " + volumeInfo.getCapacity()); + + initialize(); +} else { + // Builder is called with failedVolume set, so create a failed volume + // HddsVolumeObject. + hddsRootDir = new File(b.volumeRootStr); + volumeIOStats = null; + volumeInfo = null; + storageID = UUID.randomUUID().toString(); + state = VolumeState.FAILED; +} } public VolumeInfo getVolumeInfo() { @@ -285,7 +305,10 @@ public final class HddsVolume { } public StorageType getStorageType() { -return volumeInfo.getStorageType(); +if(volumeInfo != null) { + return volumeInfo.getStorageType(); +} +return StorageType.DEFAULT; } public String getStorageID() { @@ -313,11 +336,17 @@ public final class HddsVolume { }
[24/50] hadoop git commit: Revert "HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh."
Revert "HDDS-239. Add PipelineStateManager to track pipeline state transition. Contributed by Mukul Kumar Singh." This reverts commit 6837121a43231f854b0b22ad20330012439313ce.(Mixed with HDDS-260) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2acf8d5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2acf8d5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2acf8d5 Branch: refs/heads/HADOOP-15461 Commit: d2acf8d560950f06ffbf5c217fbfab76cd70d5da Parents: c7ae556 Author: Xiaoyu Yao Authored: Fri Jul 20 14:20:18 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 14:20:18 2018 -0700 -- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 - .../scm/container/common/helpers/Pipeline.java | 7 - .../common/src/main/resources/ozone-default.xml | 12 - .../common/statemachine/StateContext.java | 52 +--- .../states/endpoint/HeartbeatEndpointTask.java | 24 +- .../StorageContainerDatanodeProtocol.proto | 4 +- .../common/report/TestReportPublisher.java | 41 +++ .../endpoint/TestHeartbeatEndpointTask.java | 302 --- .../common/states/endpoint/package-info.java| 18 -- .../hdds/scm/container/ContainerMapping.java| 4 - .../hdds/scm/exceptions/SCMException.java | 1 - .../hdds/scm/pipelines/PipelineManager.java | 64 ++-- .../hdds/scm/pipelines/PipelineSelector.java| 212 ++--- .../scm/pipelines/ratis/RatisManagerImpl.java | 33 +- .../standalone/StandaloneManagerImpl.java | 21 +- .../hdds/scm/pipeline/TestNode2PipelineMap.java | 14 - 16 files changed, 146 insertions(+), 668 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 6e940ad..71184cf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -236,11 +236,6 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT = - "ozone.scm.pipeline.creation.lease.timeout"; - - public static final String - OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = "ozone.scm.block.deletion.max.retry"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index 534c9fd..c5794f4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -214,13 +214,6 @@ public class Pipeline { } /** - * Update the State of the pipeline. - */ - public void setLifeCycleState(HddsProtos.LifeCycleState nextState) { - lifeCycleState = nextState; - } - - /** * Gets the pipeline Name. * * @return - Name of the pipeline http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2acf8d5/hadoop-hdds/common/src/main/resources/ozone-default.xml -- diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 69a382a..5a1d26a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1085,17 +1085,5 @@ executed since last report. Unit could be defined with postfix (ns,ms,s,m,h,d) - -ozone.scm.pipeline.creation.lease.timeout -60s -OZONE, SCM, PIPELINE - - Pipeline creation timeout in milliseconds to be used by SCM. When - BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to - CREATING state, SCM will now wait for the configured amount of time - to get COMPLETE_CREATE event if it doesn't receive it will move the - pipeline to DELETING. - - \ No newline at end of file
[03/50] hadoop git commit: HDDS-207. ozone listVolume command accepts random values as argument. Contributed by Lokesh Jain.
HDDS-207. ozone listVolume command accepts random values as argument. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/129269f9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/129269f9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/129269f9 Branch: refs/heads/HADOOP-15461 Commit: 129269f98926775ccb5046d9dd41b58f1013211d Parents: d5d4447 Author: Xiaoyu Yao Authored: Wed Jul 18 11:05:42 2018 -0700 Committer: Xiaoyu Yao Committed: Wed Jul 18 11:05:42 2018 -0700 -- .../src/test/acceptance/basic/ozone-shell.robot| 8 +--- .../apache/hadoop/ozone/ozShell/TestOzoneShell.java| 12 ++-- .../org/apache/hadoop/ozone/web/ozShell/Shell.java | 1 + .../ozone/web/ozShell/volume/ListVolumeHandler.java| 13 - 4 files changed, 28 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/129269f9/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot -- diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot index f4be3e0..cc4b035 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot @@ -52,7 +52,9 @@ Test ozone shell ${result} = Execute on datanodeozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root Should not contain ${result} Failed Should contain ${result} Creating Volume: ${volume} -${result} = Execute on datanodeozone oz -listVolume o3://ozoneManager -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' +${result} = Execute on datanodeozone oz -listVolume ${protocol}${server}/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' +Should contain ${result} createdOn +${result} = Execute on datanodeozone oz -listVolume -user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")' Should contain ${result} createdOn Execute on datanodeozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB ${result} = Execute on datanodeozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' @@ -66,7 +68,7 @@ Test ozone shell Should Be Equal ${result} GROUP ${result} = Execute on datanodeozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' Should Be Equal ${result} USER -${result} = Execute on datanodeozone oz -listBucket o3://ozoneManager/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' +${result} = Execute on datanodeozone oz -listBucket ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' Should Be Equal ${result} ${volume} Run Keyword and Return If ${withkeytest} Test key handling ${protocol} ${server} ${volume} Execute on datanodeozone oz -deleteBucket ${protocol}${server}/${volume}/bb1 @@ -80,6 +82,6 @@ Test key handling Execute on datanodels -l NOTICE.txt.1 ${result} = Execute on datanodeozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' Should contain ${result} createdOn -${result} = Execute on datanodeozone oz -listKey o3://ozoneManager/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' +${result} = Execute on datanodeozone oz -listKey ${protocol}${server}/${volume}/bb1 | grep -Ev
[33/50] hadoop git commit: YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen)
YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d3c39e9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d3c39e9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d3c39e9 Branch: refs/heads/HADOOP-15461 Commit: 9d3c39e9dd88b8f32223c01328581bb68507d415 Parents: 3a9e25e Author: Haibo Chen Authored: Mon Jul 23 11:06:44 2018 -0700 Committer: Haibo Chen Committed: Mon Jul 23 11:07:24 2018 -0700 -- .../containermanager/ContainerManagerImpl.java | 2 +- .../scheduler/ContainerScheduler.java | 16 -- .../recovery/NMLeveldbStateStoreService.java| 32 ++- .../recovery/NMNullStateStoreService.java | 2 +- .../recovery/NMStateStoreService.java | 3 +- .../BaseContainerManagerTest.java | 2 +- .../TestContainerManagerRecovery.java | 57 .../TestContainerSchedulerRecovery.java | 46 +++- .../metrics/TestNodeManagerMetrics.java | 4 +- .../recovery/NMMemoryStateStoreService.java | 16 +- .../TestNMLeveldbStateStoreService.java | 21 +++- 11 files changed, 163 insertions(+), 38 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index ad63720..89bef8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -496,7 +496,7 @@ public class ContainerManagerImpl extends CompositeService implements Container container = new ContainerImpl(getConfig(), dispatcher, launchContext, credentials, metrics, token, context, rcs); context.getContainers().put(token.getContainerID(), container); -containerScheduler.recoverActiveContainer(container, rcs.getStatus()); +containerScheduler.recoverActiveContainer(container, rcs); app.handle(new ApplicationContainerInitEvent(container)); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c39e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index 5cdcf41..a61b9d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -44,6 +44,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService +.RecoveredContainerState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -259,11 +262,11 @@ public class ContainerScheduler extends AbstractService implements * @param rcs Recovered Container status */ public void recoverActiveContainer(Container container, -
[50/50] hadoop git commit: HADOOP-15536. Adding support in FileUtil for the creation of directories. Contributed by Giovanni Matteo Fumarola.
HADOOP-15536. Adding support in FileUtil for the creation of directories. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bac459b3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bac459b3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bac459b3 Branch: refs/heads/HADOOP-15461 Commit: bac459b3fffb62e04ecf39ebf0c3ff54b2dc49c0 Parents: 866646e Author: Inigo Goiri Authored: Fri Jun 29 13:38:24 2018 -0700 Committer: Inigo Goiri Committed: Tue Jul 24 18:30:49 2018 -0700 -- .../java/org/apache/hadoop/fs/FileUtil.java | 50 + .../apache/hadoop/fs/TestFileUtilsMkDir.java| 205 +++ 2 files changed, 255 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac459b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index f3b5d58..bf3feb5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -57,6 +57,7 @@ import java.util.zip.ZipInputStream; import org.apache.commons.collections.map.CaseInsensitiveMap; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.io.FileExistsException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -1638,4 +1639,53 @@ public class FileUtil { // check for ports return srcUri.getPort()==dstUri.getPort(); } + + /** + * Creates the directory named by the destination pathname, including any + * necessary but nonexistent parent directories. Note that if this operation + * fails it may have succeeded in creating some of the necessary parent + * directories. + * + * @param dst the directory which creation should be performed. + * @return 0 on success or if the directory was already present, 1 otherwise. + * @throws FileExistsException if the dst is an existing file + */ + public static int mkDirs(String dst) throws FileAlreadyExistsException { +// Null pointer input check +if (dst == null) { + LOG.warn("Can not create a directory with null path"); + return 1; +} +File directory = new File(dst); + +// Create the directory(ies) +boolean result = false; +try { + result = directory.mkdirs(); +} catch (SecurityException e) { + LOG.warn("Unable to create the directory {}. Exception = {}", dst, + e.getMessage()); + return 1; +} + +// Check if mkdir created successfully the directory(ies) +if (result) { + LOG.debug("Directory created successfully: {}", dst); + return 0; +} else { + // File already present check + if (directory.exists()) { +if (directory.isFile()) { + throw new FileAlreadyExistsException( + "Can not create a directory since a file is already present" + + " at the destination " + dst); +} +LOG.debug("Directory already present {}", dst); +return 0; + } + LOG.warn("Unable to create the directory {}", dst); + return 1; +} + } + } http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac459b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java new file mode 100644 index 000..0e6bfdb --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtilsMkDir.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + *
[39/50] hadoop git commit: YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.
YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84612788 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84612788 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84612788 Branch: refs/heads/HADOOP-15461 Commit: 84612788339392fcda1aef0e27c43f5c6b2a19e5 Parents: 16f9aee Author: bibinchundatt Authored: Tue Jul 24 13:09:17 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 13:09:17 2018 +0530 -- .../src/main/conf/hadoop-policy.xml | 20 .../dev-support/findbugs-exclude.xml| 4 .../hadoop/yarn/conf/YarnConfiguration.java | 7 ++ .../yarn/conf/TestYarnConfigurationFields.java | 4 .../nodemanager/amrmproxy/AMRMProxyService.java | 8 +++ .../collectormanager/NMCollectorService.java| 2 +- .../containermanager/ContainerManagerImpl.java | 2 +- .../localizer/ResourceLocalizationService.java | 2 +- .../security/authorize/NMPolicyProvider.java| 25 ++-- .../security/authorize/RMPolicyProvider.java| 3 +++ 10 files changed, 72 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml index cf3dd1f..bd7c111 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -242,4 +242,24 @@ group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. + + + security.applicationmaster-nodemanager.applicationmaster.protocol.acl +* +ACL for ApplicationMasterProtocol, used by the Nodemanager +and ApplicationMasters to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + + + +security.distributedscheduling.protocol.acl +* +ACL for DistributedSchedulingAMProtocol, used by the Nodemanager +and Resourcemanager to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 5cc81e5..216c3bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -468,6 +468,10 @@ + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 9156c2d..bbf877f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2248,6 +2248,9 @@ public class YarnConfiguration extends Configuration { public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL = "security.applicationmaster.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL = + "security.distributedscheduling.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL = @@ -2264,6 +2267,10 @@ public class YarnConfiguration extends Configuration { YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL = "security.collector-nodemanager.protocol.acl"; + public static final
[19/50] hadoop git commit: HDDS-256. Adding CommandStatusReport Handler. Contributed by Ajay Kumar.
HDDS-256. Adding CommandStatusReport Handler. Contributed by Ajay Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89a0f807 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89a0f807 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89a0f807 Branch: refs/heads/HADOOP-15461 Commit: 89a0f80741beb5a998f143849e797d780332048b Parents: 8a6bb84 Author: Xiaoyu Yao Authored: Fri Jul 20 11:03:33 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 11:07:09 2018 -0700 -- .../scm/command/CommandStatusReportHandler.java | 129 + .../hadoop/hdds/scm/command/package-info.java | 26 .../hadoop/hdds/scm/events/SCMEvents.java | 24 .../scm/server/StorageContainerManager.java | 4 + .../org/apache/hadoop/hdds/scm/TestUtils.java | 18 +++ .../command/TestCommandStatusReportHandler.java | 137 +++ .../hadoop/hdds/scm/command/package-info.java | 22 +++ 7 files changed, 360 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a0f807/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java new file mode 100644 index 000..9413a46 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.command; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.CommandStatus; +import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher +.CommandStatusReportFromDatanode; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * Handles CommandStatusReports from datanode. + */ +public class CommandStatusReportHandler implements +EventHandler { + + private static final Logger LOGGER = LoggerFactory + .getLogger(CommandStatusReportHandler.class); + + @Override + public void onMessage(CommandStatusReportFromDatanode report, + EventPublisher publisher) { +Preconditions.checkNotNull(report); +List cmdStatusList = report.getReport().getCmdStatusList(); +Preconditions.checkNotNull(cmdStatusList); +LOGGER.trace("Processing command status report for dn: {}", report +.getDatanodeDetails()); + +// Route command status to its watchers. +cmdStatusList.forEach(cmdStatus -> { + LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus + .getCmdId(), cmdStatus.getType()); + switch (cmdStatus.getType()) { + case replicateContainerCommand: +publisher.fireEvent(SCMEvents.REPLICATION_STATUS, new +ReplicationStatus(cmdStatus)); +break; + case closeContainerCommand: +publisher.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new +CloseContainerStatus(cmdStatus)); +break; + case deleteBlocksCommand: +publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS, new +DeleteBlockCommandStatus(cmdStatus)); +break; + default: +LOGGER.debug("CommandStatus of type:{} not handled in " + +"CommandStatusReportHandler.", cmdStatus.getType()); +break; + } +}); + } + + /** + * Wrapper event for CommandStatus. + */ + public static class CommandStatusEvent implements IdentifiableEventPayload { +private
[27/50] hadoop git commit: YARN-8301. Added YARN service upgrade instructions. Contributed by Chandni Singh
YARN-8301. Added YARN service upgrade instructions. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1622a4b8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1622a4b8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1622a4b8 Branch: refs/heads/HADOOP-15461 Commit: 1622a4b810eaf9c4fe9f9ad6bef6b49db7bec16f Parents: 347c955 Author: Eric Yang Authored: Fri Jul 20 19:46:35 2018 -0400 Committer: Eric Yang Committed: Fri Jul 20 19:46:35 2018 -0400 -- .../src/site/markdown/yarn-service/Overview.md | 4 +- .../markdown/yarn-service/ServiceUpgrade.md | 197 +++ 2 files changed, 198 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1622a4b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md index 8e2bf9a..041b0ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md @@ -56,6 +56,4 @@ The benefits of combining these workloads are two-fold: * [Registry DNS](RegistryDNS.html): Deep dives into the Registry DNS internals. * [Examples](Examples.html): List some example service definitions (`Yarnfile`). * [Configurations](Configurations.html): Describes how to configure the custom services on YARN. - - - +* [Service Upgrade](ServiceUpgrade.html): Describes how to upgrade a YARN service which is an experimental feature. http://git-wip-us.apache.org/repos/asf/hadoop/blob/1622a4b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md new file mode 100644 index 000..839be22 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md @@ -0,0 +1,197 @@ + + +# Service Upgrade (Experimental Feature - Tech Preview) + +Yarn service provides a way of upgrading/downgrading long running applications without +shutting down the application to minimize the downtime during this process. This is +an experimental feature which is currently not enabled by default. + +## Overview + +Upgrading a Yarn Service is a 3 steps (or 2 steps when auto-finalization of +upgrade is chosen) process: + +1. Initiate service upgrade.\ +This step involves providing the service spec of the newer version of the service. +Once, the service upgrade is initiated, the state of the service is changed to +`UPGRADING`. + +2. Upgrade component instances.\ +This step involves triggering upgrade of individual component instance. +By providing an API to upgrade at instance level, users can orchestrate upgrade +of the entire service in any order which is relevant for the service.\ +In addition, there are APIs to upgrade multiple instances, all instances of a +component, and all instances of multiple components. + +3. Finalize upgrade.\ +This step involves finalization of upgrade. With an explicit step to finalize the +upgrade, users have a chance to cancel current upgrade in progress. When the +user chose to cancel, the service will make the best effort to revert to the +previous version.\ +\ +When the upgrade is finalized, the old service definition is +overwritten by the new service definition and the service state changes to `STABLE`.\ +A service can be auto-finalized when the upgrade is initialized with +`-autoFinalize` option. With auto-finalization, when all the component-instances of +the service have been upgraded, finalization will be performed automatically by the +service framework.\ +\ +**NOTE**: Cancel of upgrade is not implemented yet. + +## Upgrade Example +This example shows upgrade of sleeper service. Below is the sleeper service +definition + +``` +{ + "name": "sleeper-service", + "components" : +[ + { +"name": "sleeper", +"version": "1.0.0", +"number_of_containers": 1, +"launch_command": "sleep 90", +"resource": { + "cpus": 1, + "memory": "256" + } + } +] +} +``` +Assuming, user launched an instance of sleeper service named as `my-sleeper`: +``` +{ + "components": +[ + { +
[12/50] hadoop git commit: YARN-7300. DiskValidator is not used in LocalDirAllocator. (Szilard Nemeth via Haibo Chen)
YARN-7300. DiskValidator is not used in LocalDirAllocator. (Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6873dfd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6873dfd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6873dfd Branch: refs/heads/HADOOP-15461 Commit: e6873dfde057e63ce5efa91f3061db3ee1b2e236 Parents: f354f47 Author: Haibo Chen Authored: Thu Jul 19 16:27:11 2018 -0700 Committer: Haibo Chen Committed: Thu Jul 19 16:27:11 2018 -0700 -- .../org/apache/hadoop/fs/LocalDirAllocator.java | 28 +++- .../nodemanager/LocalDirsHandlerService.java| 27 ++- 2 files changed, 42 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6873dfd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java index 1c216f4..a4b158a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java @@ -24,8 +24,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.util.*; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -78,11 +76,25 @@ public class LocalDirAllocator { /** Used when size of file to be allocated is unknown. */ public static final int SIZE_UNKNOWN = -1; + private final DiskValidator diskValidator; + /**Create an allocator object * @param contextCfgItemName */ public LocalDirAllocator(String contextCfgItemName) { this.contextCfgItemName = contextCfgItemName; +try { + this.diskValidator = DiskValidatorFactory.getInstance( + BasicDiskValidator.NAME); +} catch (DiskErrorException e) { + throw new RuntimeException(e); +} + } + + public LocalDirAllocator(String contextCfgItemName, + DiskValidator diskValidator) { +this.contextCfgItemName = contextCfgItemName; +this.diskValidator = diskValidator; } /** This method must be used to obtain the dir allocation context for a @@ -96,7 +108,8 @@ public class LocalDirAllocator { AllocatorPerContext l = contexts.get(contextCfgItemName); if (l == null) { contexts.put(contextCfgItemName, -(l = new AllocatorPerContext(contextCfgItemName))); +(l = new AllocatorPerContext(contextCfgItemName, +diskValidator))); } return l; } @@ -255,6 +268,7 @@ public class LocalDirAllocator { // NOTE: the context must be accessed via a local reference as it // may be updated at any time to reference a different context private AtomicReference currentContext; +private final DiskValidator diskValidator; private static class Context { private AtomicInteger dirNumLastAccessed = new AtomicInteger(0); @@ -280,9 +294,11 @@ public class LocalDirAllocator { } } -public AllocatorPerContext(String contextCfgItemName) { +public AllocatorPerContext(String contextCfgItemName, +DiskValidator diskValidator) { this.contextCfgItemName = contextCfgItemName; this.currentContext = new AtomicReference(new Context()); + this.diskValidator = diskValidator; } /** This method gets called everytime before any read/write to make sure @@ -312,7 +328,7 @@ public class LocalDirAllocator { ? new File(ctx.localFS.makeQualified(tmpDir).toUri()) : new File(dirStrings[i]); -DiskChecker.checkDir(tmpFile); +diskValidator.checkStatus(tmpFile); dirs.add(new Path(tmpFile.getPath())); dfList.add(new DF(tmpFile, 3)); } catch (DiskErrorException de) { @@ -348,7 +364,7 @@ public class LocalDirAllocator { //check whether we are able to create a directory here. If the disk //happens to be RDONLY we will fail try { - DiskChecker.checkDir(new File(file.getParent().toUri().getPath())); + diskValidator.checkStatus(new File(file.getParent().toUri().getPath())); return file; } catch
[21/50] hadoop git commit: HDDS-257. Hook up VolumeSet#shutdown from HddsDispatcher#shutdown. Contributed by Hanisha Koneru
HDDS-257. Hook up VolumeSet#shutdown from HddsDispatcher#shutdown. Contributed by Hanisha Koneru Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba25d27d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba25d27d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba25d27d Branch: refs/heads/HADOOP-15461 Commit: ba25d27ddb8d32abf5e1314a51eec7cad789b316 Parents: de894d3 Author: Bharat Viswanadham Authored: Fri Jul 20 12:41:52 2018 -0700 Committer: Bharat Viswanadham Committed: Fri Jul 20 12:41:52 2018 -0700 -- .../container/common/impl/HddsDispatcher.java | 2 ++ .../container/common/volume/HddsVolume.java | 2 -- .../container/common/volume/VolumeInfo.java | 8 ++ .../container/common/volume/VolumeSet.java | 18 +- .../container/common/volume/VolumeUsage.java| 17 - .../container/common/volume/TestVolumeSet.java | 26 +--- 6 files changed, 50 insertions(+), 23 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index f0c2aa9..bee8417 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -81,6 +81,8 @@ public class HddsDispatcher implements ContainerDispatcher { @Override public void shutdown() { +// Shutdown the volumes +volumeSet.shutdown(); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 6468720..0cbfd9f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -370,6 +370,4 @@ public final class HddsVolume { public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { volumeInfo.setScmUsageForTesting(scmUsageForTest); } - - } http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 4b13d45..62fca63 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -129,4 +129,12 @@ public class VolumeInfo { public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { usage.setScmUsageForTesting(scmUsageForTest); } + + /** + * Only for testing. Do not use otherwise. + */ + @VisibleForTesting + public VolumeUsage getUsageForTesting() { +return usage; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba25d27d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java index 2dd4763..4dfde37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java @@ -23,9 +23,11 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import
[40/50] hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.
YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff7c2eda Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff7c2eda Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff7c2eda Branch: refs/heads/HADOOP-15461 Commit: ff7c2eda34c2c40ad71b50df6462a661bd213fbd Parents: 8461278 Author: bibinchundatt Authored: Tue Jul 24 16:17:20 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 16:17:20 2018 +0530 -- .../impl/pb/AllocateResponsePBImpl.java | 1 + .../resourcemanager/recovery/TestProtos.java| 20 2 files changed, 21 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index 3ab5563..8df56b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -347,6 +347,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public synchronized void setNMTokens(List nmTokens) { +maybeInitBuilder(); if (nmTokens == null || nmTokens.isEmpty()) { if (this.nmTokens != null) { this.nmTokens.clear(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java index cc96412..d42b411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; +import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.junit.Assert; import org.junit.Test; @@ -33,4 +41,16 @@ public class TestProtos { String protoString = proto.toString(); Assert.assertNotNull(protoString); } + + @Test + public void testProtoAllocateResponse() { +AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance(); +AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto); +List nmTokens = new ArrayList(); +try { + alloc.setNMTokens(nmTokens); +} catch (Exception ex) { + fail(); +} + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[17/50] hadoop git commit: HDDS-264. 'oz' subcommand reference is not present in 'ozone' command help. Contributed by Sandeep Nemuri.
HDDS-264. 'oz' subcommand reference is not present in 'ozone' command help. Contributed by Sandeep Nemuri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c19ee39 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c19ee39 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c19ee39 Branch: refs/heads/HADOOP-15461 Commit: 5c19ee3994af06bbc85f3575e3e4421babc0ba5c Parents: 68b57ad Author: Mukul Kumar Singh Authored: Fri Jul 20 22:12:40 2018 +0530 Committer: Mukul Kumar Singh Committed: Fri Jul 20 22:13:09 2018 +0530 -- hadoop-ozone/common/src/main/bin/ozone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c19ee39/hadoop-ozone/common/src/main/bin/ozone -- diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 9495eff..927bc84 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -41,7 +41,7 @@ function hadoop_usage hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration" hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." hadoop_add_subcommand "om" daemon "Ozone Manager" - hadoop_add_subcommand "o3" client "command line interface for ozone" + hadoop_add_subcommand "oz" client "command line interface for ozone" hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[20/50] hadoop git commit: HDDS-250. Cleanup ContainerData.
HDDS-250. Cleanup ContainerData. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de894d34 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de894d34 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de894d34 Branch: refs/heads/HADOOP-15461 Commit: de894d34f6739685f32cd63a0e26b0e45bcf5c8c Parents: 89a0f80 Author: Hanisha Koneru Authored: Fri Jul 20 11:36:42 2018 -0700 Committer: Hanisha Koneru Committed: Fri Jul 20 11:36:42 2018 -0700 -- .../org/apache/hadoop/ozone/OzoneConsts.java| 5 +- .../org/apache/hadoop/ozone/common/Storage.java | 2 +- .../common/helpers/ContainerUtils.java | 21 --- .../container/common/impl/ContainerData.java| 64 ++-- .../container/keyvalue/KeyValueContainer.java | 6 +- .../keyvalue/KeyValueContainerData.java | 56 + .../helpers/KeyValueContainerLocationUtil.java | 18 +++--- .../background/BlockDeletingService.java| 8 +-- .../container/ozoneimpl/ContainerReader.java| 30 - .../common/impl/TestContainerDataYaml.java | 4 +- .../test/resources/additionalfields.container | 4 +- .../src/test/resources/incorrect.container | 4 +- .../common/impl/TestContainerPersistence.java | 15 +++-- 13 files changed, 71 insertions(+), 166 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 0db5993..25b68e0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -70,12 +70,9 @@ public final class OzoneConsts { public static final String CONTAINER_EXTENSION = ".container"; public static final String CONTAINER_META = ".meta"; - // container storage is in the following format. - // Data Volume basePath/containers//metadata and - // Data Volume basePath/containers//data/... + // Refer to {@link ContainerReader} for container storage layout on disk. public static final String CONTAINER_PREFIX = "containers"; public static final String CONTAINER_META_PATH = "metadata"; - public static final String CONTAINER_DATA_PATH = "data"; public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; public static final String CONTAINER_ROOT_PREFIX = "repository"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java index e8f41a6..1826a58 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java @@ -47,7 +47,7 @@ public abstract class Storage { public static final String STORAGE_DIR_CURRENT = "current"; protected static final String STORAGE_FILE_VERSION = "VERSION"; - public static final String CONTAINER_DIR = "containerdir"; + public static final String CONTAINER_DIR = "containerDir"; private final NodeType nodeType; private final File root; http://git-wip-us.apache.org/repos/asf/hadoop/blob/de894d34/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 18a5231..1d5dfc5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -104,27 +104,6 @@ public final class ContainerUtils { } /** - * Returns a ReadContainer Response. - * @param msg requestProto message. - * @param containerData container data to be returned. - * @return ReadContainer Response - */ - public static ContainerProtos.ContainerCommandResponseProto -getReadContainerResponse(ContainerProtos.ContainerCommandRequestProto msg, -
[48/50] hadoop git commit: HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. Contributed by Giovanni Matteo Fumarola.
HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8d2b091 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8d2b091 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8d2b091 Branch: refs/heads/HADOOP-15461 Commit: b8d2b09192ff21d4d6d8d861f295f6a0f4acc682 Parents: 849c45d Author: Inigo Goiri Authored: Thu Jun 7 17:02:01 2018 -0700 Committer: Inigo Goiri Committed: Tue Jul 24 18:30:46 2018 -0700 -- .../java/org/apache/hadoop/fs/FileUtil.java | 60 .../apache/hadoop/fs/RawLocalFileSystem.java| 2 - .../main/java/org/apache/hadoop/util/Shell.java | 9 ++- .../hadoop/yarn/server/MiniYARNCluster.java | 13 ++--- 4 files changed, 37 insertions(+), 47 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8d2b091/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index df89598..61cb8d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -34,8 +34,10 @@ import java.net.URI; import java.net.UnknownHostException; import java.nio.charset.Charset; import java.nio.file.AccessDeniedException; +import java.nio.file.FileAlreadyExistsException; import java.nio.file.FileSystems; import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; @@ -1028,17 +1030,15 @@ public class FileUtil { } /** - * Create a soft link between a src and destination - * only on a local disk. HDFS does not support this. - * On Windows, when symlink creation fails due to security - * setting, we will log a warning. The return code in this - * case is 2. + * Create a soft link between a src and destination only on a local disk. On + * Windows, when symlink creation fails due to security setting, we will log a + * warning. The return code in this case is 2. * * @param target the target for symlink * @param linkname the symlink * @return 0 on success */ - public static int symLink(String target, String linkname) throws IOException{ + public static int symLink(String target, String linkname) throws IOException { if (target == null || linkname == null) { LOG.warn("Can not create a symLink with a target = " + target @@ -1053,44 +1053,32 @@ public class FileUtil { File linkFile = new File( Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString()); -String[] cmd = Shell.getSymlinkCommand( -targetFile.toString(), -linkFile.toString()); - -ShellCommandExecutor shExec; try { - if (Shell.WINDOWS && - linkFile.getParentFile() != null && - !new Path(target).isAbsolute()) { -// Relative links on Windows must be resolvable at the time of -// creation. To ensure this we run the shell command in the directory -// of the link. -// -shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile()); - } else { -shExec = new ShellCommandExecutor(cmd); - } - shExec.execute(); -} catch (Shell.ExitCodeException ec) { - int returnVal = ec.getExitCode(); - if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) { -LOG.warn("Fail to create symbolic links on Windows. " -+ "The default security settings in Windows disallow non-elevated " -+ "administrators and all non-administrators from creating symbolic links. " -+ "This behavior can be changed in the Local Security Policy management console"); - } else if (returnVal != 0) { -LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed " -+ returnVal + " with: " + ec.getMessage()); - } - return returnVal; + Files.createSymbolicLink(Paths.get(linkFile.toString()), + Paths.get(targetFile.toString())); +} catch (SecurityException e3) { + LOG.warn("Fail to create symbolic links on Windows. " + + "The default security settings in Windows disallow non-elevated " + + "administrators and all non-administrators from creating symbolic" + + " links. This behavior can be changed in the Local Security Policy" + + " management console"); + return SYMLINK_NO_PRIVILEGE; + +} catch
[06/50] hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.
HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccf2db7f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccf2db7f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccf2db7f Branch: refs/heads/HADOOP-15461 Commit: ccf2db7fc2688d262df3309007cb12a4dfedc179 Parents: ba1ab08 Author: Kihwal Lee Authored: Thu Jul 19 11:13:37 2018 -0500 Committer: Kihwal Lee Committed: Thu Jul 19 11:13:37 2018 -0500 -- .../apache/hadoop/security/TestGroupsCaching.java | 17 +++-- 1 file changed, 11 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccf2db7f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 46e36b3..bba8152 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -561,23 +561,28 @@ public class TestGroupsCaching { // Then expire that entry timer.advance(4 * 1000); +// Pause the getGroups operation and this will delay the cache refresh +FakeGroupMapping.pause(); // Now get the cache entry - it should return immediately // with the old value and the cache will not have completed // a request to getGroups yet. assertEquals(groups.getGroups("me").size(), 2); assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount()); +// Resume the getGroups operation and the cache can get refreshed +FakeGroupMapping.resume(); -// Now sleep for a short time and re-check the request count. It should have -// increased, but the exception means the cache will not have updated -Thread.sleep(50); +// Now wait for the refresh done, because of the exception, we expect +// a onFailure callback gets called and the counter for failure is 1 +waitForGroupCounters(groups, 0, 0, 0, 1); FakeGroupMapping.setThrowException(false); assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount()); assertEquals(groups.getGroups("me").size(), 2); -// Now sleep another short time - the 3rd call to getGroups above -// will have kicked off another refresh that updates the cache -Thread.sleep(50); +// Now the 3rd call to getGroups above will have kicked off +// another refresh that updates the cache, since it no longer gives +// exception, we now expect the counter for success is 1. +waitForGroupCounters(groups, 0, 0, 1, 1); assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount()); assertEquals(groups.getGroups("me").size(), 3); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[32/50] hadoop git commit: HDDS-199. Implement ReplicationManager to handle underreplication of closed containers. Contributed by Elek Marton.
HDDS-199. Implement ReplicationManager to handle underreplication of closed containers. Contributed by Elek Marton. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a9e25ed Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a9e25ed Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a9e25ed Branch: refs/heads/HADOOP-15461 Commit: 3a9e25edf53187f16ec9f9f6075e850b74b3b91f Parents: 84d7bf1 Author: Xiaoyu Yao Authored: Mon Jul 23 10:13:53 2018 -0700 Committer: Xiaoyu Yao Committed: Mon Jul 23 10:28:33 2018 -0700 -- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 7 + .../apache/hadoop/ozone/OzoneConfigKeys.java| 1 + .../common/src/main/resources/ozone-default.xml | 10 + .../container/replication/ReplicationQueue.java | 76 -- .../replication/ReplicationRequest.java | 106 .../container/replication/package-info.java | 23 -- .../replication/TestReplicationQueue.java | 134 -- .../container/replication/package-info.java | 23 -- .../hadoop/hdds/server/events/EventWatcher.java | 4 +- .../hadoop/hdds/server/events/TypedEvent.java | 5 + .../hdds/server/events/TestEventWatcher.java| 6 +- .../algorithms/ContainerPlacementPolicy.java| 5 +- .../placement/algorithms/SCMCommonPolicy.java | 8 +- .../SCMContainerPlacementCapacity.java | 16 +- .../algorithms/SCMContainerPlacementRandom.java | 7 +- .../replication/ReplicationCommandWatcher.java | 56 + .../replication/ReplicationManager.java | 242 +++ .../container/replication/ReplicationQueue.java | 73 ++ .../replication/ReplicationRequest.java | 107 .../scm/container/replication/package-info.java | 23 ++ .../hadoop/hdds/scm/events/SCMEvents.java | 31 +++ .../scm/server/StorageContainerManager.java | 42 +++- .../TestSCMContainerPlacementCapacity.java | 106 .../TestSCMContainerPlacementRandom.java| 86 +++ .../replication/TestReplicationManager.java | 215 .../replication/TestReplicationQueue.java | 134 ++ .../scm/container/replication/package-info.java | 23 ++ .../placement/TestContainerPlacement.java | 5 +- 28 files changed, 1192 insertions(+), 382 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 6e940ad..e337d2f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -251,6 +251,13 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD = "ozone.scm.container.close.threshold"; public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f; + + public static final String HDDS_SCM_WATCHER_TIMEOUT = + "hdds.scm.watcher.timeout"; + + public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT = + "10m"; + /** * Never constructed. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 0273677..92f0c41 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; + import org.apache.ratis.util.TimeDuration; /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9e25ed/hadoop-hdds/common/src/main/resources/ozone-default.xml -- diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 84a3e0c..6ddf3c6 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1108,4 +1108,14 @@ + +hdds.scm.watcher.timeout +10m +
[45/50] hadoop git commit: YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen)
YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea2c6c8c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea2c6c8c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea2c6c8c Branch: refs/heads/HADOOP-15461 Commit: ea2c6c8c9a55813a19b3dbd0d29747d6a7739030 Parents: e673dd1 Author: Haibo Chen Authored: Tue Jul 24 12:46:15 2018 -0700 Committer: Haibo Chen Committed: Tue Jul 24 12:46:59 2018 -0700 -- .../scheduler/fair/FairScheduler.java | 36 ++-- 1 file changed, 18 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2c6c8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index eb9f6af..20d1afe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -467,8 +467,8 @@ public class FairScheduler extends return; } +writeLock.lock(); try { - writeLock.lock(); RMApp rmApp = rmContext.getRMApps().get(applicationId); FSLeafQueue queue = assignToQueue(rmApp, queueName, user); if (queue == null) { @@ -550,8 +550,8 @@ public class FairScheduler extends ApplicationAttemptId applicationAttemptId, boolean transferStateFromPreviousAttempt, boolean isAttemptRecovering) { +writeLock.lock(); try { - writeLock.lock(); SchedulerApplication application = applications.get( applicationAttemptId.getApplicationId()); String user = application.getUser(); @@ -653,8 +653,8 @@ public class FairScheduler extends private void removeApplicationAttempt( ApplicationAttemptId applicationAttemptId, RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) { +writeLock.lock(); try { - writeLock.lock(); LOG.info("Application " + applicationAttemptId + " is done. finalState=" + rmAppAttemptFinalState); FSAppAttempt attempt = getApplicationAttempt(applicationAttemptId); @@ -720,8 +720,8 @@ public class FairScheduler extends protected void completedContainerInternal( RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) { +writeLock.lock(); try { - writeLock.lock(); Container container = rmContainer.getContainer(); // Get the application for the finished container @@ -768,8 +768,8 @@ public class FairScheduler extends private void addNode(List containerReports, RMNode node) { +writeLock.lock(); try { - writeLock.lock(); FSSchedulerNode schedulerNode = new FSSchedulerNode(node, usePortForNodeName); nodeTracker.addNode(schedulerNode); @@ -790,8 +790,8 @@ public class FairScheduler extends } private void removeNode(RMNode rmNode) { +writeLock.lock(); try { - writeLock.lock(); NodeId nodeId = rmNode.getNodeID(); FSSchedulerNode node = nodeTracker.getNode(nodeId); if (node == null) { @@ -988,8 +988,8 @@ public class FairScheduler extends @Override protected void nodeUpdate(RMNode nm) { +writeLock.lock(); try { - writeLock.lock(); long start = getClock().getTime(); super.nodeUpdate(nm); @@ -1089,8 +1089,8 @@ public class FairScheduler extends @VisibleForTesting void attemptScheduling(FSSchedulerNode node) { +writeLock.lock(); try { - writeLock.lock(); if (rmContext.isWorkPreservingRecoveryEnabled() && !rmContext .isSchedulerReadyForAllocatingContainers()) { return; @@ -1305,8 +1305,8 @@ public class FairScheduler extends private String resolveReservationQueueName(String queueName, ApplicationId applicationId, ReservationId reservationID, boolean isRecovering) { +readLock.lock(); try { - readLock.lock(); FSQueue queue =
[04/50] hadoop git commit: HDDS-255. Fix TestOzoneConfigurationFields for missing hdds.command.status.report.interval in config classes. Contributed by Sandeep Nemuri.
HDDS-255. Fix TestOzoneConfigurationFields for missing hdds.command.status.report.interval in config classes. Contributed by Sandeep Nemuri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c492eacc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c492eacc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c492eacc Branch: refs/heads/HADOOP-15461 Commit: c492eaccc21bb53d0d40214290b2fa9c493e2955 Parents: 129269f Author: Xiaoyu Yao Authored: Wed Jul 18 11:46:26 2018 -0700 Committer: Xiaoyu Yao Committed: Wed Jul 18 11:46:26 2018 -0700 -- .../org/apache/hadoop/ozone/TestOzoneConfigurationFields.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c492eacc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java -- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 717bb68..909cddf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.TestConfigurationFieldsBase; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -31,7 +32,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase { xmlFilename = new String("ozone-default.xml"); configurationClasses = new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class, -OMConfigKeys.class}; +OMConfigKeys.class, HddsConfigKeys.class}; errorIfMissingConfigProps = true; errorIfMissingXmlProps = true; xmlPropsToSkipCompare.add("hadoop.tags.custom"); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[43/50] hadoop git commit: HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton.
HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd0b9f13 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd0b9f13 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd0b9f13 Branch: refs/heads/HADOOP-15461 Commit: cd0b9f13805affcc91a2cba42b176bb9031378eb Parents: 35ce6eb Author: Xiaoyu Yao Authored: Tue Jul 24 10:16:53 2018 -0700 Committer: Xiaoyu Yao Committed: Tue Jul 24 10:17:03 2018 -0700 -- .../container/common/statemachine/EndpointStateMachine.java | 4 ++-- .../common/states/endpoint/RegisterEndpointTask.java | 3 +-- .../statemachine/background/BlockDeletingService.java| 8 +--- .../org/apache/hadoop/hdds/server/events/EventQueue.java | 3 +-- .../org/apache/hadoop/hdds/server/events/TypedEvent.java | 6 -- 5 files changed, 13 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index 7e85923..fb32a05 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -203,11 +203,11 @@ public class EndpointStateMachine this.incMissed(); if (this.getMissedCount() % getLogWarnInterval(conf) == 0) { - LOG.warn("Unable to communicate to SCM server at {}. We have not been " + + LOG.error("Unable to communicate to SCM server at {}. We have not been " + "able to communicate to this SCM server for past {} seconds.", this.getAddress().getHostString() + ":" + this.getAddress().getPort(), this.getMissedCount() * getScmHeartbeatInterval( - this.conf)); + this.conf), ex); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index b3d2b62..25af4a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -125,8 +125,7 @@ public final class RegisterEndpointTask implements rpcEndPoint.setState(nextState); rpcEndPoint.zeroMissedCount(); } catch (IOException ex) { - rpcEndPoint.logIfNeeded(ex - ); + rpcEndPoint.logIfNeeded(ex); } finally { rpcEndPoint.unlock(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 4a572ca..51eed7f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -119,9 +119,11 @@ public class BlockDeletingService extends BackgroundService{ // configured. containers = containerSet.chooseContainerForBlockDeletion( containerLimitPerInterval, containerDeletionPolicy); - LOG.info("Plan to choose {} containers for block deletion, " - + "actually returns {} valid containers.", - containerLimitPerInterval, containers.size()); +
[13/50] hadoop git commit: HDFS-13743. RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver. Contributed by Takanobu Asanuma.
HDFS-13743. RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver. Contributed by Takanobu Asanuma. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b25fb94 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b25fb94 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b25fb94 Branch: refs/heads/HADOOP-15461 Commit: 7b25fb949bf6f02df997beeca7df46c9e84c8d96 Parents: e6873df Author: Yiqun Lin Authored: Fri Jul 20 17:28:57 2018 +0800 Committer: Yiqun Lin Committed: Fri Jul 20 17:28:57 2018 +0800 -- .../federation/resolver/MountTableResolver.java | 28 +-- .../TestInitializeMountTableResolver.java | 82 2 files changed, 102 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b25fb94/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 3f6efd6..c264de3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT; @@ -42,7 +44,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; @@ -149,14 +150,25 @@ public class MountTableResolver * @param conf Configuration for this resolver. */ private void initDefaultNameService(Configuration conf) { -try { - this.defaultNameService = conf.get( - DFS_ROUTER_DEFAULT_NAMESERVICE, - DFSUtil.getNamenodeNameServiceId(conf)); -} catch (HadoopIllegalArgumentException e) { - LOG.error("Cannot find default name service, setting it to the first"); +this.defaultNameService = conf.get( +DFS_ROUTER_DEFAULT_NAMESERVICE, +DFSUtil.getNamenodeNameServiceId(conf)); + +if (defaultNameService == null) { + LOG.warn( + "{} and {} is not set. Fallback to {} as the default name service.", + DFS_ROUTER_DEFAULT_NAMESERVICE, DFS_NAMESERVICE_ID, DFS_NAMESERVICES); Collection nsIds = DFSUtilClient.getNameServiceIds(conf); - this.defaultNameService = nsIds.iterator().next(); + if (nsIds.isEmpty()) { +this.defaultNameService = ""; + } else { +this.defaultNameService = nsIds.iterator().next(); + } +} + +if (this.defaultNameService.equals("")) { + LOG.warn("Default name service is not set."); +} else { LOG.info("Default name service: {}", this.defaultNameService); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b25fb94/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java new file mode 100644 index 000..5db7531 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional
[10/50] hadoop git commit: MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. (Jason Lowe via wangda)
MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. (Jason Lowe via wangda) Change-Id: I89ab4852b4ad305fec19812e8931c59d96581376 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3b4d4cc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3b4d4cc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3b4d4cc Branch: refs/heads/HADOOP-15461 Commit: b3b4d4ccb53fdf8dacc66e912822b34f8b3bf215 Parents: 2564884 Author: Wangda Tan Authored: Thu Jul 19 12:03:24 2018 -0700 Committer: Wangda Tan Committed: Thu Jul 19 14:26:05 2018 -0700 -- .../mapreduce/v2/util/LocalResourceBuilder.java | 8 +++- .../hadoop/mapreduce/v2/util/TestMRApps.java| 20 ++-- 2 files changed, 21 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3b4d4cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java index 48b157e..48cc29e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java @@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -144,10 +143,9 @@ class LocalResourceBuilder { LocalResource orig = localResources.get(linkName); if(orig != null && !orig.getResource().equals(URL.fromURI(p.toUri( { - throw new InvalidJobConfException( - getResourceDescription(orig.getType()) + orig.getResource() - + - " conflicts with " + getResourceDescription(type) + u); + LOG.warn(getResourceDescription(orig.getType()) + orig.getResource() + + " conflicts with " + getResourceDescription(type) + u); + continue; } Boolean sharedCachePolicy = sharedCacheUploadPolicies.get(u.toString()); sharedCachePolicy = http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3b4d4cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java index 3aadd63..c6a2874 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java @@ -360,7 +360,7 @@ public class TestMRApps { } @SuppressWarnings("deprecation") - @Test(timeout = 12, expected = InvalidJobConfException.class) + @Test(timeout = 12) public void testSetupDistributedCacheConflicts() throws Exception { Configuration conf = new Configuration(); conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class); @@ -388,10 +388,18 @@ public class TestMRApps { Map localResources = new HashMap(); MRApps.setupDistributedCache(conf, localResources); + +assertEquals(1, localResources.size()); +LocalResource lr = localResources.get("something"); +//Archive wins +assertNotNull(lr); +assertEquals(10l, lr.getSize()); +assertEquals(10l, lr.getTimestamp()); +assertEquals(LocalResourceType.ARCHIVE, lr.getType()); } @SuppressWarnings("deprecation") - @Test(timeout = 12, expected = InvalidJobConfException.class) + @Test(timeout = 12) public
[34/50] hadoop git commit: HADOOP-15586. Fix wrong log statement in AbstractService. (Szilard Nemeth via Haibo Chen)
HADOOP-15586. Fix wrong log statement in AbstractService. (Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17e26163 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17e26163 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17e26163 Branch: refs/heads/HADOOP-15461 Commit: 17e26163ec1b71cd13a6a82150aca94283f10ed1 Parents: 9d3c39e Author: Haibo Chen Authored: Mon Jul 23 11:18:25 2018 -0700 Committer: Haibo Chen Committed: Mon Jul 23 11:18:25 2018 -0700 -- .../src/main/java/org/apache/hadoop/service/AbstractService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/17e26163/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java index 70de647..5b96fbf4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java @@ -254,7 +254,7 @@ public abstract class AbstractService implements Service { * @param exception the exception */ protected final void noteFailure(Exception exception) { -LOG.debug("noteFailure {}" + exception); +LOG.debug("noteFailure", exception); if (exception == null) { //make sure failure logic doesn't itself cause problems return; - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[07/50] hadoop git commit: YARN-8501. Reduce complexity of RMWebServices getApps method. Contributed by Szilard Nemeth
YARN-8501. Reduce complexity of RMWebServices getApps method. Contributed by Szilard Nemeth Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5836e0a4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5836e0a4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5836e0a4 Branch: refs/heads/HADOOP-15461 Commit: 5836e0a46bf9793e0a61bb8ec46536f4a67d38d7 Parents: ccf2db7 Author: Eric Yang Authored: Thu Jul 19 12:30:38 2018 -0400 Committer: Eric Yang Committed: Thu Jul 19 12:30:38 2018 -0400 -- .../hadoop/yarn/server/webapp/WebServices.java | 2 +- .../webapp/ApplicationsRequestBuilder.java | 231 .../resourcemanager/webapp/RMWebServices.java | 145 + .../webapp/TestApplicationsRequestBuilder.java | 529 +++ 4 files changed, 777 insertions(+), 130 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java index 03b1055..5bb5448 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java @@ -392,7 +392,7 @@ public class WebServices { response.setContentType(null); } - protected static Set + public static Set parseQueries(Set queries, boolean isState) { Set params = new HashSet(); if (!queries.isEmpty()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/5836e0a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java new file mode 100644 index 000..876d044 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import com.google.common.collect.Sets; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity +.CapacityScheduler; +import org.apache.hadoop.yarn.webapp.BadRequestException; + +import java.io.IOException; +import java.util.Set; + +import static org.apache.hadoop.yarn.server.webapp.WebServices.parseQueries; + +public class ApplicationsRequestBuilder { + + private Set statesQuery = Sets.newHashSet(); + private Set users = Sets.newHashSetWithExpectedSize(1); + private Set queues = Sets.newHashSetWithExpectedSize(1); + private String limit = null; + private Long limitNumber; + + // set values suitable in case both of begin/end not specified + private long
[26/50] hadoop git commit: HDDS-260. Support in Datanode for sending ContainerActions to SCM. Contributed by Nanda kumar.
HDDS-260. Support in Datanode for sending ContainerActions to SCM. Contributed by Nanda kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/347c9550 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/347c9550 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/347c9550 Branch: refs/heads/HADOOP-15461 Commit: 347c9550135ea10fd84d5007124452bf5f2d6619 Parents: 9be25e3 Author: Xiaoyu Yao Authored: Fri Jul 20 14:37:13 2018 -0700 Committer: Xiaoyu Yao Committed: Fri Jul 20 14:37:13 2018 -0700 -- .../org/apache/hadoop/hdds/HddsConfigKeys.java | 6 + .../common/src/main/resources/ozone-default.xml | 10 + .../common/statemachine/StateContext.java | 55 +++- .../states/endpoint/HeartbeatEndpointTask.java | 33 +- .../StorageContainerDatanodeProtocol.proto | 4 +- .../common/report/TestReportPublisher.java | 41 --- .../endpoint/TestHeartbeatEndpointTask.java | 300 +++ .../common/states/endpoint/package-info.java| 18 ++ 8 files changed, 414 insertions(+), 53 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 0283615..fd4bf08 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -48,4 +48,10 @@ public final class HddsConfigKeys { "hdds.command.status.report.interval"; public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT = "60s"; + + public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT = + "hdds.container.action.max.limit"; + public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT = + 20; + } http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/common/src/main/resources/ozone-default.xml -- diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 69a382a..84a3e0c 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1098,4 +1098,14 @@ + +hdds.container.action.max.limit +20 +DATANODE + + Maximum number of Container Actions sent by the datanode to SCM in a + single heartbeat. + + + \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/347c9550/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index faaff69..7862cc6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -20,14 +20,18 @@ import com.google.protobuf.GeneratedMessage; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.ContainerAction; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import org.apache.hadoop.ozone.container.common.states.DatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode .InitDatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode .RunningDatanodeState; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus.CommandStatusBuilder; +import org.apache.hadoop.ozone.protocol.commands.CommandStatus +.CommandStatusBuilder; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,6 +47,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock;
[11/50] hadoop git commit: YARN-6995. Improve use of ResourceNotFoundException in resource types code. (Daniel Templeton and Szilard Nemeth via Haibo Chen)
YARN-6995. Improve use of ResourceNotFoundException in resource types code. (Daniel Templeton and Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f354f47f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f354f47f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f354f47f Branch: refs/heads/HADOOP-15461 Commit: f354f47f9959d8a79baee690858af3e160494c32 Parents: b3b4d4c Author: Haibo Chen Authored: Thu Jul 19 15:34:12 2018 -0700 Committer: Haibo Chen Committed: Thu Jul 19 15:35:05 2018 -0700 -- .../hadoop/yarn/api/records/Resource.java | 22 --- .../exceptions/ResourceNotFoundException.java | 29 +++- .../api/records/impl/pb/ResourcePBImpl.java | 10 +++ .../hadoop/yarn/util/resource/Resources.java| 6 ++-- 4 files changed, 34 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f354f47f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 3cac1d1..1a7252d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -257,18 +257,15 @@ public abstract class Resource implements Comparable { * * @param resource name of the resource * @return the ResourceInformation object for the resource - * @throws ResourceNotFoundException if the resource can't be found */ @Public @InterfaceStability.Unstable - public ResourceInformation getResourceInformation(String resource) - throws ResourceNotFoundException { + public ResourceInformation getResourceInformation(String resource) { Integer index = ResourceUtils.getResourceTypeIndex().get(resource); if (index != null) { return resources[index]; } -throw new ResourceNotFoundException("Unknown resource '" + resource -+ "'. Known resources are " + Arrays.toString(resources)); +throw new ResourceNotFoundException(this, resource); } /** @@ -299,12 +296,10 @@ public abstract class Resource implements Comparable { * * @param resource name of the resource * @return the value for the resource - * @throws ResourceNotFoundException if the resource can't be found */ @Public @InterfaceStability.Unstable - public long getResourceValue(String resource) - throws ResourceNotFoundException { + public long getResourceValue(String resource) { return getResourceInformation(resource).getValue(); } @@ -313,13 +308,11 @@ public abstract class Resource implements Comparable { * * @param resource the resource for which the ResourceInformation is provided * @param resourceInformation ResourceInformation object - * @throws ResourceNotFoundException if the resource is not found */ @Public @InterfaceStability.Unstable public void setResourceInformation(String resource, - ResourceInformation resourceInformation) - throws ResourceNotFoundException { + ResourceInformation resourceInformation) { if (resource.equals(ResourceInformation.MEMORY_URI)) { this.setMemorySize(resourceInformation.getValue()); return; @@ -348,8 +341,7 @@ public abstract class Resource implements Comparable { ResourceInformation resourceInformation) throws ResourceNotFoundException { if (index < 0 || index >= resources.length) { - throw new ResourceNotFoundException("Unknown resource at index '" + index - + "'. Valid resources are " + Arrays.toString(resources)); + throwExceptionWhenArrayOutOfBound(index); } ResourceInformation.copy(resourceInformation, resources[index]); } @@ -360,12 +352,10 @@ public abstract class Resource implements Comparable { * * @param resource the resource for which the value is provided. * @param valuethe value to set - * @throws ResourceNotFoundException if the resource is not found */ @Public @InterfaceStability.Unstable - public void setResourceValue(String resource, long value) - throws ResourceNotFoundException { + public void setResourceValue(String resource, long value) { if (resource.equals(ResourceInformation.MEMORY_URI)) { this.setMemorySize(value); return;
[05/50] hadoop git commit: HADOOP-15610. Fixed pylint version for Hadoop docker image. Contributed by Jack Bearden
HADOOP-15610. Fixed pylint version for Hadoop docker image. Contributed by Jack Bearden Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1ab08f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1ab08f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1ab08f Branch: refs/heads/HADOOP-15461 Commit: ba1ab08fdae96ad7c9c4f4bf8672abd741b7f758 Parents: c492eac Author: Eric Yang Authored: Wed Jul 18 20:09:43 2018 -0400 Committer: Eric Yang Committed: Wed Jul 18 20:09:43 2018 -0400 -- dev-support/docker/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1ab08f/dev-support/docker/Dockerfile -- diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 369c606..a8c5c12 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -154,9 +154,10 @@ RUN apt-get -q update && apt-get -q install -y shellcheck RUN apt-get -q update && apt-get -q install -y bats -# Install pylint (always want latest) +# Install pylint at fixed version (2.0.0 removed python2 support) +# https://github.com/PyCQA/pylint/issues/2294 -RUN pip2 install pylint +RUN pip2 install pylint==1.9.2 # Install dateutil.parser - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[01/50] hadoop git commit: HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. Contributed by Takanobu Asanuma. [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/HADOOP-15461 ad353e3a1 -> bac459b3f (forced update) HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. Contributed by Takanobu Asanuma. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1af87df2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1af87df2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1af87df2 Branch: refs/heads/HADOOP-15461 Commit: 1af87df242c4286474961078d306a5692f85debc Parents: 0a1e922 Author: Yiqun Lin Authored: Tue Jul 17 10:45:08 2018 +0800 Committer: Yiqun Lin Committed: Tue Jul 17 10:45:08 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 1 file changed, 12 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1af87df2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 73e0f4a..c5bf5e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -330,6 +330,18 @@ The administration server to manage the Mount Table. | dfs.federation.router.admin-bind-host | 0.0.0.0 | The actual address the RPC admin server will bind to. | | dfs.federation.router.admin.handler.count | 1 | The number of server threads for the router to handle RPC requests from admin. | +### HTTP Server + +The HTTP Server to provide Web UI and the HDFS REST interface ([WebHDFS](../hadoop-hdfs/WebHDFS.html)) for the clients. The default URL is "`http://router_host:50071`;. + +| Property | Default | Description| +|: |: |: | +| dfs.federation.router.http.enable | `true` | If `true`, the HTTP service to handle client requests in the router is enabled. | +| dfs.federation.router.http-address | 0.0.0.0:50071 | HTTP address that handles the web requests to the Router. | +| dfs.federation.router.http-bind-host | 0.0.0.0 | The actual address the HTTP server will bind to. | +| dfs.federation.router.https-address | 0.0.0.0:50072 | HTTPS address that handles the web requests to the Router. | +| dfs.federation.router.https-bind-host | 0.0.0.0 | The actual address the HTTPS server will bind to. | + ### State Store The connection to the State Store and the internal caching at the Router. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[09/50] hadoop git commit: YARN-8436. FSParentQueue: Comparison method violates its general contract. (Wilfred Spiegelenburg via Haibo Chen)
YARN-8436. FSParentQueue: Comparison method violates its general contract. (Wilfred Spiegelenburg via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25648847 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25648847 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25648847 Branch: refs/heads/HADOOP-15461 Commit: 2564884757fbf4df7718f814cc448f7f23dad875 Parents: 45d9568 Author: Haibo Chen Authored: Thu Jul 19 13:21:57 2018 -0700 Committer: Haibo Chen Committed: Thu Jul 19 13:22:31 2018 -0700 -- .../scheduler/fair/FSParentQueue.java | 30 +++- .../scheduler/fair/FakeSchedulable.java | 4 + .../TestDominantResourceFairnessPolicy.java | 77 3 files changed, 93 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/25648847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index 26c5630..d5df549 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.List; +import java.util.TreeSet; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -188,25 +188,19 @@ public class FSParentQueue extends FSQueue { return assigned; } -// Hold the write lock when sorting childQueues -writeLock.lock(); -try { - Collections.sort(childQueues, policy.getComparator()); -} finally { - writeLock.unlock(); -} - -/* - * We are releasing the lock between the sort and iteration of the - * "sorted" list. There could be changes to the list here: - * 1. Add a child queue to the end of the list, this doesn't affect - * container assignment. - * 2. Remove a child queue, this is probably good to take care of so we - * don't assign to a queue that is going to be removed shortly. - */ +// Sort the queues while holding a read lock on this parent only. +// The individual entries are not locked and can change which means that +// the collection of childQueues can not be sorted by calling Sort(). +// Locking each childqueue to prevent changes would have a large +// performance impact. +// We do not have to handle the queue removal case as a queue must be +// empty before removal. Assigning an application to a queue and removal of +// that queue both need the scheduler lock. +TreeSet sortedChildQueues = new TreeSet<>(policy.getComparator()); readLock.lock(); try { - for (FSQueue child : childQueues) { + sortedChildQueues.addAll(childQueues); + for (FSQueue child : sortedChildQueues) { assigned = child.assignContainer(node); if (!Resources.equals(assigned, Resources.none())) { break; http://git-wip-us.apache.org/repos/asf/hadoop/blob/25648847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java index 03332b2..01eec73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java +++
[02/50] hadoop git commit: HDDS-241. Handle Volume in inconsistent state. Contributed by Hanisha Koneru.
HDDS-241. Handle Volume in inconsistent state. Contributed by Hanisha Koneru. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5d44473 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5d44473 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5d44473 Branch: refs/heads/HADOOP-15461 Commit: d5d444732bf5c3f3cfc681f8d87e0681a7471f2f Parents: 1af87df Author: Xiaoyu Yao Authored: Wed Jul 18 09:38:43 2018 -0700 Committer: Xiaoyu Yao Committed: Wed Jul 18 09:38:43 2018 -0700 -- .../container/common/volume/HddsVolume.java | 45 +-- .../container/common/volume/VolumeSet.java | 14 +++- .../container/common/volume/TestVolumeSet.java | 78 +--- .../container/ozoneimpl/TestOzoneContainer.java | 18 - 4 files changed, 129 insertions(+), 26 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d44473/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 1e71494..6468720 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -42,6 +42,18 @@ import java.util.Properties; * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a * list of HddsVolumes, one for each volume in the Datanode. * {@link VolumeInfo} in encompassed by this class. + * + * The disk layout per volume is as follows: + * ../hdds/VERSION + * ../hdds/<>/current/<>/<>/metadata + * ../hdds/<>/current/<>/<>/<> + * + * Each hdds volume has its own VERSION file. The hdds volume will have one + * scmUuid directory for each SCM it is a part of (currently only one SCM is + * supported). + * + * During DN startup, if the VERSION file exists, we verify that the + * clusterID in the version file matches the clusterID from SCM. */ public final class HddsVolume { @@ -108,11 +120,6 @@ public final class HddsVolume { } private HddsVolume(Builder b) throws IOException { -Preconditions.checkNotNull(b.volumeRootStr, -"Volume root dir cannot be null"); -Preconditions.checkNotNull(b.datanodeUuid, "DatanodeUUID cannot be null"); -Preconditions.checkNotNull(b.conf, "Configuration cannot be null"); - StorageLocation location = StorageLocation.parse(b.volumeRootStr); hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); this.state = VolumeState.NOT_INITIALIZED; @@ -162,6 +169,10 @@ public final class HddsVolume { readVersionFile(); setState(VolumeState.NORMAL); break; +case INCONSISTENT: + // Volume Root is in an inconsistent state. Skip loading this volume. + throw new IOException("Volume is in an " + VolumeState.INCONSISTENT + + " state. Skipped loading volume: " + hddsRootDir.getPath()); default: throw new IOException("Unrecognized initial state : " + intialVolumeState + "of volume : " + hddsRootDir); @@ -170,11 +181,23 @@ public final class HddsVolume { private VolumeState analyzeVolumeState() { if (!hddsRootDir.exists()) { + // Volume Root does not exist. return VolumeState.NON_EXISTENT; } -if (!getVersionFile().exists()) { +if (!hddsRootDir.isDirectory()) { + // Volume Root exists but is not a directory. + return VolumeState.INCONSISTENT; +} +File[] files = hddsRootDir.listFiles(); +if (files == null || files.length == 0) { + // Volume Root exists and is empty. return VolumeState.NOT_FORMATTED; } +if (!getVersionFile().exists()) { + // Volume Root is non empty but VERSION file does not exist. + return VolumeState.INCONSISTENT; +} +// Volume Root and VERSION file exist. return VolumeState.NOT_INITIALIZED; } @@ -321,11 +344,21 @@ public final class HddsVolume { /** * VolumeState represents the different states a HddsVolume can be in. + * NORMAL => Volume can be used for storage + * FAILED => Volume has failed due and can no longer be used for + *storing containers. + * NON_EXISTENT=> Volume Root dir does not exist + * INCONSISTENT=> Volume Root dir is not empty but VERSION file is + *missing or Volume Root dir is not a directory + * NOT_FORMATTED => Volume Root exists but not formatted (no VERSION file) + *
hadoop git commit: HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)
Repository: hadoop Updated Branches: refs/heads/branch-3.0 3210b3d8a -> aab32c8cd HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera) (cherry picked from commit 6bec03cfc8bdcf6aa3df9c22231ab959ba31f2f5) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aab32c8c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aab32c8c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aab32c8c Branch: refs/heads/branch-3.0 Commit: aab32c8cdae37862dea6f5f37e400cfa2478b080 Parents: 3210b3d Author: Gera Shegalov Authored: Tue Jul 17 00:05:39 2018 -0700 Committer: Gera Shegalov Committed: Tue Jul 24 17:25:28 2018 -0700 -- .../hadoop/io/file/tfile/Compression.java | 31 +++--- .../hadoop/io/file/tfile/TestCompression.java | 34 +++- 2 files changed, 53 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab32c8c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java index fa85ed7..c4347e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -24,6 +24,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -78,25 +79,33 @@ public final class Compression { public enum Algorithm { LZO(TFile.COMPRESSION_LZO) { private transient boolean checked = false; + private transient ClassNotFoundException cnf; + private transient boolean reinitCodecInTests; private static final String defaultClazz = "org.apache.hadoop.io.compress.LzoCodec"; + private transient String clazz; private transient CompressionCodec codec = null; + private String getLzoCodecClass() { +String extClazzConf = conf.get(CONF_LZO_CLASS); +String extClazz = (extClazzConf != null) ? +extClazzConf : System.getProperty(CONF_LZO_CLASS); +return (extClazz != null) ? extClazz : defaultClazz; + } + @Override public synchronized boolean isSupported() { -if (!checked) { +if (!checked || reinitCodecInTests) { checked = true; - String extClazzConf = conf.get(CONF_LZO_CLASS); - String extClazz = (extClazzConf != null) ? - extClazzConf : System.getProperty(CONF_LZO_CLASS); - String clazz = (extClazz != null) ? extClazz : defaultClazz; + reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false); + clazz = getLzoCodecClass(); try { LOG.info("Trying to load Lzo codec class: " + clazz); codec = (CompressionCodec) ReflectionUtils.newInstance(Class .forName(clazz), conf); } catch (ClassNotFoundException e) { -// that is okay +cnf = e; } } return codec != null; @@ -105,9 +114,9 @@ public final class Compression { @Override CompressionCodec getCodec() throws IOException { if (!isSupported()) { - throw new IOException( - "LZO codec class not specified. Did you forget to set property " - + CONF_LZO_CLASS + "?"); + throw new IOException(String.format( + "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz), + cnf); } return codec; http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab32c8c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java -- diff --git
hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)
Repository: hadoop Updated Branches: refs/heads/branch-3.0 1d8fce0d2 -> 3210b3d8a HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton) Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d (cherry picked from commit 849c45db187224095b13fe297a4d7377fbb9d2cd) (cherry picked from commit 00c476abd8f1d34414b646219856859477558458) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3210b3d8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3210b3d8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3210b3d8 Branch: refs/heads/branch-3.0 Commit: 3210b3d8aa7f1927ac2ef5f2d9e5d83969ae2c48 Parents: 1d8fce0 Author: Daniel Templeton Authored: Tue Jul 24 15:34:19 2018 -0700 Committer: Daniel Templeton Committed: Tue Jul 24 16:13:30 2018 -0700 -- .../java/org/apache/hadoop/fs/CreateFlag.java | 9 ++- .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++- .../org/apache/hadoop/hdfs/DFSOutputStream.java | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 11 +++ .../src/main/proto/ClientNamenodeProtocol.proto | 1 + .../BlockPlacementPolicyDefault.java| 4 +- .../hdfs/server/namenode/FSDirWriteFileOp.java | 30 +--- .../server/namenode/TestFSDirWriteFileOp.java | 79 8 files changed, 134 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java index 383d65a..c3e088b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java @@ -116,7 +116,14 @@ public enum CreateFlag { * Enforce the file to be a replicated file, no matter what its parent * directory's replication or erasure coding policy is. */ - SHOULD_REPLICATE((short) 0x80); + SHOULD_REPLICATE((short) 0x80), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + */ + IGNORE_CLIENT_LOCALITY((short) 0x100); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java index 6a0805b..b0686d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java @@ -36,7 +36,16 @@ public enum AddBlockFlag { * * @see CreateFlag#NO_LOCAL_WRITE */ - NO_LOCAL_WRITE((short) 0x01); + NO_LOCAL_WRITE((short) 0x01), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + * + * @see CreateFlag#IGNORE_CLIENT_LOCALITY + */ + IGNORE_CLIENT_LOCALITY((short) 0x02); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9734752..e977054 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) { this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE); } +if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) { + this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY); +} if
hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)
Repository: hadoop Updated Branches: refs/heads/branch-3.1 7e7792dd7 -> 00c476abd HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton) Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d (cherry picked from commit 849c45db187224095b13fe297a4d7377fbb9d2cd) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00c476ab Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00c476ab Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00c476ab Branch: refs/heads/branch-3.1 Commit: 00c476abd8f1d34414b646219856859477558458 Parents: 7e7792d Author: Daniel Templeton Authored: Tue Jul 24 15:34:19 2018 -0700 Committer: Daniel Templeton Committed: Tue Jul 24 16:12:43 2018 -0700 -- .../java/org/apache/hadoop/fs/CreateFlag.java | 9 ++- .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++- .../org/apache/hadoop/hdfs/DFSOutputStream.java | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 11 +++ .../src/main/proto/ClientNamenodeProtocol.proto | 1 + .../BlockPlacementPolicyDefault.java| 4 +- .../hdfs/server/namenode/FSDirWriteFileOp.java | 30 +--- .../server/namenode/TestFSDirWriteFileOp.java | 79 8 files changed, 134 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java index 383d65a..c3e088b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java @@ -116,7 +116,14 @@ public enum CreateFlag { * Enforce the file to be a replicated file, no matter what its parent * directory's replication or erasure coding policy is. */ - SHOULD_REPLICATE((short) 0x80); + SHOULD_REPLICATE((short) 0x80), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + */ + IGNORE_CLIENT_LOCALITY((short) 0x100); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java index 6a0805b..b0686d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java @@ -36,7 +36,16 @@ public enum AddBlockFlag { * * @see CreateFlag#NO_LOCAL_WRITE */ - NO_LOCAL_WRITE((short) 0x01); + NO_LOCAL_WRITE((short) 0x01), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + * + * @see CreateFlag#IGNORE_CLIENT_LOCALITY + */ + IGNORE_CLIENT_LOCALITY((short) 0x02); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9734752..e977054 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) { this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE); } +if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) { + this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY); +} if (progress != null) { DFSClient.LOG.debug("Set non-null
hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)
Repository: hadoop Updated Branches: refs/heads/trunk 6bec03cfc -> 849c45db1 HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton) Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/849c45db Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/849c45db Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/849c45db Branch: refs/heads/trunk Commit: 849c45db187224095b13fe297a4d7377fbb9d2cd Parents: 6bec03c Author: Daniel Templeton Authored: Tue Jul 24 15:34:19 2018 -0700 Committer: Daniel Templeton Committed: Tue Jul 24 16:05:27 2018 -0700 -- .../java/org/apache/hadoop/fs/CreateFlag.java | 9 ++- .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++- .../org/apache/hadoop/hdfs/DFSOutputStream.java | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 11 +++ .../src/main/proto/ClientNamenodeProtocol.proto | 1 + .../BlockPlacementPolicyDefault.java| 4 +- .../hdfs/server/namenode/FSDirWriteFileOp.java | 30 +--- .../server/namenode/TestFSDirWriteFileOp.java | 79 8 files changed, 134 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java index 383d65a..c3e088b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java @@ -116,7 +116,14 @@ public enum CreateFlag { * Enforce the file to be a replicated file, no matter what its parent * directory's replication or erasure coding policy is. */ - SHOULD_REPLICATE((short) 0x80); + SHOULD_REPLICATE((short) 0x80), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + */ + IGNORE_CLIENT_LOCALITY((short) 0x100); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java index 6a0805b..b0686d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java @@ -36,7 +36,16 @@ public enum AddBlockFlag { * * @see CreateFlag#NO_LOCAL_WRITE */ - NO_LOCAL_WRITE((short) 0x01); + NO_LOCAL_WRITE((short) 0x01), + + /** + * Advise that the first block replica NOT take into account DataNode + * locality. The first block replica should be placed randomly within the + * cluster. Subsequent block replicas should follow DataNode locality rules. + * + * @see CreateFlag#IGNORE_CLIENT_LOCALITY + */ + IGNORE_CLIENT_LOCALITY((short) 0x02); private final short mode; http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9734752..e977054 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) { this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE); } +if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) { + this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY); +} if (progress != null) { DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream " +"{}", src);
hadoop git commit: HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)
Repository: hadoop Updated Branches: refs/heads/trunk ea2c6c8c9 -> 6bec03cfc HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bec03cf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bec03cf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bec03cf Branch: refs/heads/trunk Commit: 6bec03cfc8bdcf6aa3df9c22231ab959ba31f2f5 Parents: ea2c6c8 Author: Gera Shegalov Authored: Tue Jul 17 00:05:39 2018 -0700 Committer: Gera Shegalov Committed: Tue Jul 24 14:32:30 2018 -0700 -- .../hadoop/io/file/tfile/Compression.java | 31 +++--- .../hadoop/io/file/tfile/TestCompression.java | 34 +++- 2 files changed, 53 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java index fa85ed7..c4347e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java @@ -5,9 +5,9 @@ * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -24,6 +24,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -78,25 +79,33 @@ public final class Compression { public enum Algorithm { LZO(TFile.COMPRESSION_LZO) { private transient boolean checked = false; + private transient ClassNotFoundException cnf; + private transient boolean reinitCodecInTests; private static final String defaultClazz = "org.apache.hadoop.io.compress.LzoCodec"; + private transient String clazz; private transient CompressionCodec codec = null; + private String getLzoCodecClass() { +String extClazzConf = conf.get(CONF_LZO_CLASS); +String extClazz = (extClazzConf != null) ? +extClazzConf : System.getProperty(CONF_LZO_CLASS); +return (extClazz != null) ? extClazz : defaultClazz; + } + @Override public synchronized boolean isSupported() { -if (!checked) { +if (!checked || reinitCodecInTests) { checked = true; - String extClazzConf = conf.get(CONF_LZO_CLASS); - String extClazz = (extClazzConf != null) ? - extClazzConf : System.getProperty(CONF_LZO_CLASS); - String clazz = (extClazz != null) ? extClazz : defaultClazz; + reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false); + clazz = getLzoCodecClass(); try { LOG.info("Trying to load Lzo codec class: " + clazz); codec = (CompressionCodec) ReflectionUtils.newInstance(Class .forName(clazz), conf); } catch (ClassNotFoundException e) { -// that is okay +cnf = e; } } return codec != null; @@ -105,9 +114,9 @@ public final class Compression { @Override CompressionCodec getCodec() throws IOException { if (!isSupported()) { - throw new IOException( - "LZO codec class not specified. Did you forget to set property " - + CONF_LZO_CLASS + "?"); + throw new IOException(String.format( + "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz), + cnf); } return codec; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bec03cf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java -- diff --git
hadoop git commit: YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen)
Repository: hadoop Updated Branches: refs/heads/branch-3.1 4488fd829 -> 7e7792dd7 YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen) (cherry picked from commit 9d3c39e9dd88b8f32223c01328581bb68507d415) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e7792dd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e7792dd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e7792dd Branch: refs/heads/branch-3.1 Commit: 7e7792dd7b4d97a10af1dd583fc65214b4b9c009 Parents: 4488fd8 Author: Haibo Chen Authored: Mon Jul 23 11:06:44 2018 -0700 Committer: Haibo Chen Committed: Tue Jul 24 12:50:43 2018 -0700 -- .../containermanager/ContainerManagerImpl.java | 2 +- .../scheduler/ContainerScheduler.java | 16 -- .../recovery/NMLeveldbStateStoreService.java| 32 ++- .../recovery/NMNullStateStoreService.java | 2 +- .../recovery/NMStateStoreService.java | 3 +- .../BaseContainerManagerTest.java | 2 +- .../TestContainerManagerRecovery.java | 57 .../TestContainerSchedulerRecovery.java | 46 +++- .../metrics/TestNodeManagerMetrics.java | 4 +- .../recovery/NMMemoryStateStoreService.java | 16 +- .../TestNMLeveldbStateStoreService.java | 21 +++- 11 files changed, 163 insertions(+), 38 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7792dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 26d06aa..ce240bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -496,7 +496,7 @@ public class ContainerManagerImpl extends CompositeService implements Container container = new ContainerImpl(getConfig(), dispatcher, launchContext, credentials, metrics, token, context, rcs); context.getContainers().put(token.getContainerID(), container); -containerScheduler.recoverActiveContainer(container, rcs.getStatus()); +containerScheduler.recoverActiveContainer(container, rcs); app.handle(new ApplicationContainerInitEvent(container)); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7792dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index 57368ab..e6b0f46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -41,6 +41,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService +.RecoveredContainerState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -229,11 +232,11 @@ public class ContainerScheduler extends
hadoop git commit: YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen)
Repository: hadoop Updated Branches: refs/heads/trunk e673dd1d4 -> ea2c6c8c9 YARN-7133. Clean up lock-try order in fair scheduler. (Szilard Nemeth via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea2c6c8c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea2c6c8c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea2c6c8c Branch: refs/heads/trunk Commit: ea2c6c8c9a55813a19b3dbd0d29747d6a7739030 Parents: e673dd1 Author: Haibo Chen Authored: Tue Jul 24 12:46:15 2018 -0700 Committer: Haibo Chen Committed: Tue Jul 24 12:46:59 2018 -0700 -- .../scheduler/fair/FairScheduler.java | 36 ++-- 1 file changed, 18 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2c6c8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index eb9f6af..20d1afe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -467,8 +467,8 @@ public class FairScheduler extends return; } +writeLock.lock(); try { - writeLock.lock(); RMApp rmApp = rmContext.getRMApps().get(applicationId); FSLeafQueue queue = assignToQueue(rmApp, queueName, user); if (queue == null) { @@ -550,8 +550,8 @@ public class FairScheduler extends ApplicationAttemptId applicationAttemptId, boolean transferStateFromPreviousAttempt, boolean isAttemptRecovering) { +writeLock.lock(); try { - writeLock.lock(); SchedulerApplication application = applications.get( applicationAttemptId.getApplicationId()); String user = application.getUser(); @@ -653,8 +653,8 @@ public class FairScheduler extends private void removeApplicationAttempt( ApplicationAttemptId applicationAttemptId, RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) { +writeLock.lock(); try { - writeLock.lock(); LOG.info("Application " + applicationAttemptId + " is done. finalState=" + rmAppAttemptFinalState); FSAppAttempt attempt = getApplicationAttempt(applicationAttemptId); @@ -720,8 +720,8 @@ public class FairScheduler extends protected void completedContainerInternal( RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) { +writeLock.lock(); try { - writeLock.lock(); Container container = rmContainer.getContainer(); // Get the application for the finished container @@ -768,8 +768,8 @@ public class FairScheduler extends private void addNode(List containerReports, RMNode node) { +writeLock.lock(); try { - writeLock.lock(); FSSchedulerNode schedulerNode = new FSSchedulerNode(node, usePortForNodeName); nodeTracker.addNode(schedulerNode); @@ -790,8 +790,8 @@ public class FairScheduler extends } private void removeNode(RMNode rmNode) { +writeLock.lock(); try { - writeLock.lock(); NodeId nodeId = rmNode.getNodeID(); FSSchedulerNode node = nodeTracker.getNode(nodeId); if (node == null) { @@ -988,8 +988,8 @@ public class FairScheduler extends @Override protected void nodeUpdate(RMNode nm) { +writeLock.lock(); try { - writeLock.lock(); long start = getClock().getTime(); super.nodeUpdate(nm); @@ -1089,8 +1089,8 @@ public class FairScheduler extends @VisibleForTesting void attemptScheduling(FSSchedulerNode node) { +writeLock.lock(); try { - writeLock.lock(); if (rmContext.isWorkPreservingRecoveryEnabled() && !rmContext .isSchedulerReadyForAllocatingContainers()) { return; @@ -1305,8 +1305,8 @@ public class FairScheduler extends private String resolveReservationQueueName(String queueName, ApplicationId applicationId, ReservationId reservationID, boolean isRecovering) { +
hadoop git commit: YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/trunk cd0b9f138 -> e673dd1d4 YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e673dd1d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e673dd1d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e673dd1d Branch: refs/heads/trunk Commit: e673dd1d4d78b66e7b6705ec6dc3679d2347d704 Parents: cd0b9f1 Author: bibinchundatt Authored: Tue Jul 24 18:36:49 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 23:26:59 2018 +0530 -- .../server/resourcemanager/RMAppManager.java| 48 ++-- .../placement/PlacementManager.java | 9 .../TestWorkPreservingRMRestart.java| 48 .../placement/TestPlacementManager.java | 20 4 files changed, 80 insertions(+), 45 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e673dd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 3e64cfc..7011aaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -364,17 +364,9 @@ public class RMAppManager implements EventHandler, ApplicationSubmissionContext submissionContext, long submitTime, String user, boolean isRecovery, long startTime) throws YarnException { -ApplicationPlacementContext placementContext = null; -try { - placementContext = placeApplication(rmContext, submissionContext, user); -} catch (YarnException e) { - String msg = - "Failed to place application " + submissionContext.getApplicationId() - + " to queue and specified " + "queue is invalid : " - + submissionContext.getQueue(); - LOG.error(msg, e); - throw e; -} +ApplicationPlacementContext placementContext = +placeApplication(rmContext.getQueuePlacementManager(), +submissionContext, user, isRecovery); // We only replace the queue when it's a new application if (!isRecovery) { @@ -789,23 +781,31 @@ public class RMAppManager implements EventHandler, } @VisibleForTesting - ApplicationPlacementContext placeApplication(RMContext rmContext, - ApplicationSubmissionContext context, String user) throws YarnException { + ApplicationPlacementContext placeApplication( + PlacementManager placementManager, ApplicationSubmissionContext context, + String user, boolean isRecovery) throws YarnException { ApplicationPlacementContext placementContext = null; -PlacementManager placementManager = rmContext.getQueuePlacementManager(); - if (placementManager != null) { - placementContext = placementManager.placeApplication(context, user); -} else{ - if ( context.getQueue() == null || context.getQueue().isEmpty()) { -final String msg = "Queue Placement Manager is not set. Cannot place " -+ "application : " + context.getApplicationId() + " to queue and " -+ "specified queue is invalid " + context.getQueue(); -LOG.error(msg); -throw new YarnException(msg); + try { +placementContext = placementManager.placeApplication(context, user); + } catch (YarnException e) { +// Placement could also fail if the user doesn't exist in system +// skip if the user is not found during recovery. +if (isRecovery) { + LOG.warn("PlaceApplication failed,skipping on recovery of rm"); + return placementContext; +} +throw e; } } - +if (placementContext == null && (context.getQueue() == null) || context +.getQueue().isEmpty()) { + String msg = "Failed to place application " + context.getApplicationId() + + " to queue and specified " + "queue is invalid : " + context + .getQueue(); + LOG.error(msg); + throw new YarnException(msg); +} return placementContext; }
hadoop git commit: HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton.
Repository: hadoop Updated Branches: refs/heads/trunk 35ce6eb1f -> cd0b9f138 HDDS-282. Consolidate logging in scm/container-service. Contributed by Elek Marton. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd0b9f13 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd0b9f13 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd0b9f13 Branch: refs/heads/trunk Commit: cd0b9f13805affcc91a2cba42b176bb9031378eb Parents: 35ce6eb Author: Xiaoyu Yao Authored: Tue Jul 24 10:16:53 2018 -0700 Committer: Xiaoyu Yao Committed: Tue Jul 24 10:17:03 2018 -0700 -- .../container/common/statemachine/EndpointStateMachine.java | 4 ++-- .../common/states/endpoint/RegisterEndpointTask.java | 3 +-- .../statemachine/background/BlockDeletingService.java| 8 +--- .../org/apache/hadoop/hdds/server/events/EventQueue.java | 3 +-- .../org/apache/hadoop/hdds/server/events/TypedEvent.java | 6 -- 5 files changed, 13 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index 7e85923..fb32a05 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -203,11 +203,11 @@ public class EndpointStateMachine this.incMissed(); if (this.getMissedCount() % getLogWarnInterval(conf) == 0) { - LOG.warn("Unable to communicate to SCM server at {}. We have not been " + + LOG.error("Unable to communicate to SCM server at {}. We have not been " + "able to communicate to this SCM server for past {} seconds.", this.getAddress().getHostString() + ":" + this.getAddress().getPort(), this.getMissedCount() * getScmHeartbeatInterval( - this.conf)); + this.conf), ex); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index b3d2b62..25af4a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -125,8 +125,7 @@ public final class RegisterEndpointTask implements rpcEndPoint.setState(nextState); rpcEndPoint.zeroMissedCount(); } catch (IOException ex) { - rpcEndPoint.logIfNeeded(ex - ); + rpcEndPoint.logIfNeeded(ex); } finally { rpcEndPoint.unlock(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd0b9f13/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 4a572ca..51eed7f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -119,9 +119,11 @@ public class BlockDeletingService extends BackgroundService{ // configured. containers = containerSet.chooseContainerForBlockDeletion( containerLimitPerInterval, containerDeletionPolicy); - LOG.info("Plan to choose {} containers for block deletion, " - + "actually returns {} valid
hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 1f713d6c6 -> 1d8fce0d2 YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang. (cherry picked from commit 35ce6eb1f526ce3db7e015fb1761eee15604100c) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d8fce0d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d8fce0d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d8fce0d Branch: refs/heads/branch-3.0 Commit: 1d8fce0d2f9d3dbce65563bcb73133288222abe4 Parents: 1f713d6 Author: Sunil G Authored: Tue Jul 24 22:20:06 2018 +0530 Committer: Sunil G Committed: Tue Jul 24 22:21:44 2018 +0530 -- .../scheduler/capacity/TestContainerResizing.java | 18 +- 1 file changed, 13 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d8fce0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index eacbf6e..307d5ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; @@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica .FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.util.resource.Resources; @@ -740,11 +740,14 @@ public class TestContainerResizing { @Test public void testIncreaseContainerUnreservedWhenApplicationCompleted() throws Exception { +// Disable relaunch app attempt on failure, in order to check +// resource usages for current app only. +conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); /** * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when * application finishes, reserved increase container should be cancelled */ -MockRM rm1 = new MockRM() { +MockRM rm1 = new MockRM(conf) { @Override public RMNodeLabelsManager createNodeLabelManager() { return mgr; @@ -807,9 +810,14 @@ public class TestContainerResizing { Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize()); -// Kill the application -cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(), -RMAppAttemptState.KILLED, false)); +// Kill the application by killing the AM container +ContainerId amContainer = +ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); +cs.killContainer(cs.getRMContainer(amContainer)); +rm1.waitForState(am1.getApplicationAttemptId(), +RMAppAttemptState.FAILED); +rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(), +RMAppState.FAILED); /* Check statuses after reservation satisfied */ // Increase
hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 a684a2efb -> 4488fd829 YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang. (cherry picked from commit 35ce6eb1f526ce3db7e015fb1761eee15604100c) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4488fd82 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4488fd82 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4488fd82 Branch: refs/heads/branch-3.1 Commit: 4488fd8295011b37f683c964ae2012fe1b6a4044 Parents: a684a2e Author: Sunil G Authored: Tue Jul 24 22:20:06 2018 +0530 Committer: Sunil G Committed: Tue Jul 24 22:21:15 2018 +0530 -- .../scheduler/capacity/TestContainerResizing.java | 18 +- 1 file changed, 13 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4488fd82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index eacbf6e..307d5ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; @@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica .FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.util.resource.Resources; @@ -740,11 +740,14 @@ public class TestContainerResizing { @Test public void testIncreaseContainerUnreservedWhenApplicationCompleted() throws Exception { +// Disable relaunch app attempt on failure, in order to check +// resource usages for current app only. +conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); /** * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when * application finishes, reserved increase container should be cancelled */ -MockRM rm1 = new MockRM() { +MockRM rm1 = new MockRM(conf) { @Override public RMNodeLabelsManager createNodeLabelManager() { return mgr; @@ -807,9 +810,14 @@ public class TestContainerResizing { Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize()); -// Kill the application -cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(), -RMAppAttemptState.KILLED, false)); +// Kill the application by killing the AM container +ContainerId amContainer = +ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); +cs.killContainer(cs.getRMContainer(amContainer)); +rm1.waitForState(am1.getApplicationAttemptId(), +RMAppAttemptState.FAILED); +rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(), +RMAppState.FAILED); /* Check statuses after reservation satisfied */ // Increase
hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.
Repository: hadoop Updated Branches: refs/heads/trunk 773d312f7 -> 35ce6eb1f YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35ce6eb1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35ce6eb1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35ce6eb1 Branch: refs/heads/trunk Commit: 35ce6eb1f526ce3db7e015fb1761eee15604100c Parents: 773d312 Author: Sunil G Authored: Tue Jul 24 22:20:06 2018 +0530 Committer: Sunil G Committed: Tue Jul 24 22:20:17 2018 +0530 -- .../scheduler/capacity/TestContainerResizing.java | 18 +- 1 file changed, 13 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/35ce6eb1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index eacbf6e..307d5ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; @@ -58,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica .FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.util.resource.Resources; @@ -740,11 +740,14 @@ public class TestContainerResizing { @Test public void testIncreaseContainerUnreservedWhenApplicationCompleted() throws Exception { +// Disable relaunch app attempt on failure, in order to check +// resource usages for current app only. +conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); /** * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when * application finishes, reserved increase container should be cancelled */ -MockRM rm1 = new MockRM() { +MockRM rm1 = new MockRM(conf) { @Override public RMNodeLabelsManager createNodeLabelManager() { return mgr; @@ -807,9 +810,14 @@ public class TestContainerResizing { Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize()); -// Kill the application -cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(), -RMAppAttemptState.KILLED, false)); +// Kill the application by killing the AM container +ContainerId amContainer = +ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); +cs.killContainer(cs.getRMContainer(amContainer)); +rm1.waitForState(am1.getApplicationAttemptId(), +RMAppAttemptState.FAILED); +rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(), +RMAppState.FAILED); /* Check statuses after reservation satisfied */ // Increase request should be unreserved
hadoop git commit: HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain.
Repository: hadoop Updated Branches: refs/heads/trunk ff7c2eda3 -> 773d312f7 HDDS-272. TestBlockDeletingService is failing with DiskOutOfSpaceException. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773d312f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773d312f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773d312f Branch: refs/heads/trunk Commit: 773d312f7412d5050c106ed3a1cd0d1934bfa2e0 Parents: ff7c2ed Author: Mukul Kumar Singh Authored: Tue Jul 24 21:23:20 2018 +0530 Committer: Mukul Kumar Singh Committed: Tue Jul 24 21:23:20 2018 +0530 -- .../container/keyvalue/KeyValueHandler.java | 2 +- .../background/BlockDeletingService.java| 9 +- .../testutils/BlockDeletingServiceTestImpl.java | 3 +- .../common/TestBlockDeletingService.java| 90 4 files changed, 60 insertions(+), 44 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 9aa3df7..d3a1ca4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -136,7 +136,7 @@ public class KeyValueHandler extends Handler { TimeUnit.MILLISECONDS); this.blockDeletingService = new BlockDeletingService(containerSet, svcInterval, serviceTimeout, -config); +TimeUnit.MILLISECONDS, config); blockDeletingService.start(); // TODO: Add supoort for different volumeChoosingPolicies. volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index a3e36f4..4a572ca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -88,11 +88,10 @@ public class BlockDeletingService extends BackgroundService{ // Core pool size for container tasks private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; - public BlockDeletingService(ContainerSet containerSet, - long serviceInterval, long serviceTimeout, Configuration conf) { -super("BlockDeletingService", serviceInterval, -TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, -serviceTimeout); + public BlockDeletingService(ContainerSet containerSet, long serviceInterval, + long serviceTimeout, TimeUnit timeUnit, Configuration conf) { +super("BlockDeletingService", serviceInterval, timeUnit, +BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.containerSet = containerSet; containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, http://git-wip-us.apache.org/repos/asf/hadoop/blob/773d312f/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java -- diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index a87f655..115b5e2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -44,7 +44,8 @@ public class BlockDeletingServiceTestImpl
hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 1a270e3b0 -> 1f713d6c6 YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T. (cherry picked from commit ff7c2eda34c2c40ad71b50df6462a661bd213fbd) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f713d6c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f713d6c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f713d6c Branch: refs/heads/branch-3.0 Commit: 1f713d6c66877bd687d785d496bd50a5b44b0a72 Parents: 1a270e3 Author: bibinchundatt Authored: Tue Jul 24 16:17:20 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 16:32:21 2018 +0530 -- .../impl/pb/AllocateResponsePBImpl.java | 1 + .../resourcemanager/recovery/TestProtos.java| 20 2 files changed, 21 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f713d6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index ff35da8..33d6c10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -329,6 +329,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public synchronized void setNMTokens(List nmTokens) { +maybeInitBuilder(); if (nmTokens == null || nmTokens.isEmpty()) { if (this.nmTokens != null) { this.nmTokens.clear(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f713d6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java index cc96412..d42b411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; +import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.junit.Assert; import org.junit.Test; @@ -33,4 +41,16 @@ public class TestProtos { String protoString = proto.toString(); Assert.assertNotNull(protoString); } + + @Test + public void testProtoAllocateResponse() { +AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance(); +AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto); +List nmTokens = new ArrayList(); +try { + alloc.setNMTokens(nmTokens); +} catch (Exception ex) { + fail(); +} + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 0710107f8 -> a684a2efb YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T. (cherry picked from commit ff7c2eda34c2c40ad71b50df6462a661bd213fbd) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a684a2ef Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a684a2ef Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a684a2ef Branch: refs/heads/branch-3.1 Commit: a684a2efb855e1933b0d808363c3c1fe69778867 Parents: 0710107 Author: bibinchundatt Authored: Tue Jul 24 16:17:20 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 16:30:31 2018 +0530 -- .../impl/pb/AllocateResponsePBImpl.java | 1 + .../resourcemanager/recovery/TestProtos.java| 20 2 files changed, 21 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a684a2ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index 3ab5563..8df56b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -347,6 +347,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public synchronized void setNMTokens(List nmTokens) { +maybeInitBuilder(); if (nmTokens == null || nmTokens.isEmpty()) { if (this.nmTokens != null) { this.nmTokens.clear(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/a684a2ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java index cc96412..d42b411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; +import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.junit.Assert; import org.junit.Test; @@ -33,4 +41,16 @@ public class TestProtos { String protoString = proto.toString(); Assert.assertNotNull(protoString); } + + @Test + public void testProtoAllocateResponse() { +AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance(); +AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto); +List nmTokens = new ArrayList(); +try { + alloc.setNMTokens(nmTokens); +} catch (Exception ex) { + fail(); +} + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.
Repository: hadoop Updated Branches: refs/heads/trunk 846127883 -> ff7c2eda3 YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff7c2eda Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff7c2eda Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff7c2eda Branch: refs/heads/trunk Commit: ff7c2eda34c2c40ad71b50df6462a661bd213fbd Parents: 8461278 Author: bibinchundatt Authored: Tue Jul 24 16:17:20 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 16:17:20 2018 +0530 -- .../impl/pb/AllocateResponsePBImpl.java | 1 + .../resourcemanager/recovery/TestProtos.java| 20 2 files changed, 21 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index 3ab5563..8df56b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -347,6 +347,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public synchronized void setNMTokens(List nmTokens) { +maybeInitBuilder(); if (nmTokens == null || nmTokens.isEmpty()) { if (this.nmTokens != null) { this.nmTokens.clear(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7c2eda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java index cc96412..d42b411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; +import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.junit.Assert; import org.junit.Test; @@ -33,4 +41,16 @@ public class TestProtos { String protoString = proto.toString(); Assert.assertNotNull(protoString); } + + @Test + public void testProtoAllocateResponse() { +AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance(); +AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto); +List nmTokens = new ArrayList(); +try { + alloc.setNMTokens(nmTokens); +} catch (Exception ex) { + fail(); +} + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 5aca0588e -> 0710107f8 YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt. (cherry picked from commit 84612788339392fcda1aef0e27c43f5c6b2a19e5) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0710107f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0710107f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0710107f Branch: refs/heads/branch-3.1 Commit: 0710107f8d931d63627d356d6100885696cc8736 Parents: 5aca058 Author: bibinchundatt Authored: Tue Jul 24 13:09:17 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 13:11:31 2018 +0530 -- .../src/main/conf/hadoop-policy.xml | 20 .../dev-support/findbugs-exclude.xml| 4 .../hadoop/yarn/conf/YarnConfiguration.java | 7 ++ .../yarn/conf/TestYarnConfigurationFields.java | 4 .../nodemanager/amrmproxy/AMRMProxyService.java | 8 +++ .../collectormanager/NMCollectorService.java| 2 +- .../containermanager/ContainerManagerImpl.java | 2 +- .../localizer/ResourceLocalizationService.java | 2 +- .../security/authorize/NMPolicyProvider.java| 25 ++-- .../security/authorize/RMPolicyProvider.java| 3 +++ 10 files changed, 72 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml index cf3dd1f..bd7c111 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -242,4 +242,24 @@ group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. + + + security.applicationmaster-nodemanager.applicationmaster.protocol.acl +* +ACL for ApplicationMasterProtocol, used by the Nodemanager +and ApplicationMasters to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + + + +security.distributedscheduling.protocol.acl +* +ACL for DistributedSchedulingAMProtocol, used by the Nodemanager +and Resourcemanager to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 5841361..7d40c70 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -461,6 +461,10 @@ + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 586fabf..f7fd4fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2202,6 +2202,9 @@ public class YarnConfiguration extends Configuration { public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL = "security.applicationmaster.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL = + "security.distributedscheduling.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL = @@ -2218,6 +2221,10 @@ public class YarnConfiguration extends Configuration {
hadoop git commit: YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/trunk 16f9aee5f -> 846127883 YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84612788 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84612788 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84612788 Branch: refs/heads/trunk Commit: 84612788339392fcda1aef0e27c43f5c6b2a19e5 Parents: 16f9aee Author: bibinchundatt Authored: Tue Jul 24 13:09:17 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 13:09:17 2018 +0530 -- .../src/main/conf/hadoop-policy.xml | 20 .../dev-support/findbugs-exclude.xml| 4 .../hadoop/yarn/conf/YarnConfiguration.java | 7 ++ .../yarn/conf/TestYarnConfigurationFields.java | 4 .../nodemanager/amrmproxy/AMRMProxyService.java | 8 +++ .../collectormanager/NMCollectorService.java| 2 +- .../containermanager/ContainerManagerImpl.java | 2 +- .../localizer/ResourceLocalizationService.java | 2 +- .../security/authorize/NMPolicyProvider.java| 25 ++-- .../security/authorize/RMPolicyProvider.java| 3 +++ 10 files changed, 72 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml index cf3dd1f..bd7c111 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -242,4 +242,24 @@ group list is separated by a blank. For e.g. "alice,bob users,wheel". A special value of "*" means all users are allowed. + + + security.applicationmaster-nodemanager.applicationmaster.protocol.acl +* +ACL for ApplicationMasterProtocol, used by the Nodemanager +and ApplicationMasters to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + + + +security.distributedscheduling.protocol.acl +* +ACL for DistributedSchedulingAMProtocol, used by the Nodemanager +and Resourcemanager to communicate. +The ACL is a comma-separated list of user and group names. The user and +group list is separated by a blank. For e.g. "alice,bob users,wheel". +A special value of "*" means all users are allowed. + http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 5cc81e5..216c3bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -468,6 +468,10 @@ + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/84612788/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 9156c2d..bbf877f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2248,6 +2248,9 @@ public class YarnConfiguration extends Configuration { public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL = "security.applicationmaster.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL = + "security.distributedscheduling.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL = @@ -2264,6 +2267,10 @@ public class YarnConfiguration extends Configuration { YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL =
hadoop git commit: YARN-8434. Update federation documentation of Nodemanager configurations. Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 097ae7fdc -> 1a270e3b0 YARN-8434. Update federation documentation of Nodemanager configurations. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a270e3b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a270e3b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a270e3b Branch: refs/heads/branch-3.0 Commit: 1a270e3b071218dc8284b3da7255f1eadbb3e098 Parents: 097ae7f Author: bibinchundatt Authored: Tue Jul 24 13:04:26 2018 +0530 Committer: bibinchundatt Committed: Tue Jul 24 13:04:26 2018 +0530 -- .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md| 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a270e3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md index f83d57e..995be45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -266,7 +266,6 @@ These are extra configurations that should appear in the **conf/yarn-site.xml** |: |: | | `yarn.nodemanager.amrmproxy.enabled` | `true` | Whether or not the AMRMProxy is enabled. |`yarn.nodemanager.amrmproxy.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.nodemanager.amrmproxy.FederationInterceptor` | A comma-separated list of interceptors to be run at the amrmproxy. For federation the last step in the pipeline should be the FederationInterceptor. -| `yarn.client.failover-proxy-provider` | `org.apache.hadoop.yarn.server.federation.failover.FederationRMFailoverProxyProvider` | The class used to connect to the RMs by looking up the membership information in federation state-store. This must be set if federation is enabled, even if RM HA is not enabled.| Optional: - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/trunk 2ced3efe9 -> 16f9aee5f HDDS-262. Send SCM healthy and failed volumes in the heartbeat. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16f9aee5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16f9aee5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16f9aee5 Branch: refs/heads/trunk Commit: 16f9aee5f55bc37c1bb243708ee9b3f97e5a5b83 Parents: 2ced3ef Author: Nanda kumar Authored: Tue Jul 24 12:09:15 2018 +0530 Committer: Nanda kumar Committed: Tue Jul 24 12:09:15 2018 +0530 -- .../container/common/volume/HddsVolume.java | 81 ++-- .../container/common/volume/VolumeSet.java | 28 +-- .../container/common/volume/TestVolumeSet.java | 35 - 3 files changed, 111 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/16f9aee5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 0cbfd9f..6b90146 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.util.Properties; +import java.util.UUID; /** * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a @@ -84,6 +85,7 @@ public final class HddsVolume { private String datanodeUuid; private String clusterID; +private boolean failedVolume = false; public Builder(String rootDirStr) { this.volumeRootStr = rootDirStr; @@ -114,29 +116,47 @@ public final class HddsVolume { return this; } +// This is added just to create failed volume objects, which will be used +// to create failed HddsVolume objects in the case of any exceptions caused +// during creating HddsVolume object. +public Builder failedVolume(boolean failed) { + this.failedVolume = failed; + return this; +} + public HddsVolume build() throws IOException { return new HddsVolume(this); } } private HddsVolume(Builder b) throws IOException { -StorageLocation location = StorageLocation.parse(b.volumeRootStr); -hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); -this.state = VolumeState.NOT_INITIALIZED; -this.clusterID = b.clusterID; -this.datanodeUuid = b.datanodeUuid; -this.volumeIOStats = new VolumeIOStats(); - -VolumeInfo.Builder volumeBuilder = -new VolumeInfo.Builder(b.volumeRootStr, b.conf) -.storageType(b.storageType) -.configuredCapacity(b.configuredCapacity); -this.volumeInfo = volumeBuilder.build(); - -LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + -b.storageType + " and capacity : " + volumeInfo.getCapacity()); - -initialize(); +if (!b.failedVolume) { + StorageLocation location = StorageLocation.parse(b.volumeRootStr); + hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); + this.state = VolumeState.NOT_INITIALIZED; + this.clusterID = b.clusterID; + this.datanodeUuid = b.datanodeUuid; + this.volumeIOStats = new VolumeIOStats(); + + VolumeInfo.Builder volumeBuilder = + new VolumeInfo.Builder(b.volumeRootStr, b.conf) + .storageType(b.storageType) + .configuredCapacity(b.configuredCapacity); + this.volumeInfo = volumeBuilder.build(); + + LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + + b.storageType + " and capacity : " + volumeInfo.getCapacity()); + + initialize(); +} else { + // Builder is called with failedVolume set, so create a failed volume + // HddsVolumeObject. + hddsRootDir = new File(b.volumeRootStr); + volumeIOStats = null; + volumeInfo = null; + storageID = UUID.randomUUID().toString(); + state = VolumeState.FAILED; +} } public VolumeInfo getVolumeInfo() { @@ -285,7 +305,10 @@ public final class HddsVolume { } public StorageType getStorageType() { -return volumeInfo.getStorageType(); +if(volumeInfo != null) { + return volumeInfo.getStorageType(); +} +return StorageType.DEFAULT; } public String