hadoop git commit: YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via Haibo Chen)

2017-06-30 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 38996fdcf -> 147df300b


YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/147df300
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/147df300
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/147df300

Branch: refs/heads/trunk
Commit: 147df300bf00b5f4ed250426b6ccdd69085466da
Parents: 38996fd
Author: Haibo Chen 
Authored: Fri Jun 30 16:50:06 2017 -0700
Committer: Haibo Chen 
Committed: Fri Jun 30 17:03:44 2017 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 38 +++
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 39 +++-
 .../yarn/sls/appmaster/MRAMSimulator.java   | 11 +++---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  | 15 
 .../yarn/sls/appmaster/TestAMSimulator.java |  4 +-
 5 files changed, 68 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 02da056..a534f03 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -406,7 +406,7 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-getTaskContainers(jsonJob), null);
+getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
   }
 
   private List getTaskContainers(Map jsonJob)
@@ -558,7 +558,8 @@ public class SLSRunner extends Configured implements Tool {
 
 // Only supports the default job type currently
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, null);
+jobStartTimeMS, jobFinishTimeMS, containerList, null,
+getAMContainerResource(null));
   }
 
   private Resource getDefaultContainerResource() {
@@ -676,7 +677,8 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, rr);
+jobStartTimeMS, jobFinishTimeMS, containerList, rr,
+getAMContainerResource(null));
   }
 } finally {
   stjp.close();
@@ -684,6 +686,26 @@ public class SLSRunner extends Configured implements Tool {
 
   }
 
+  private Resource getAMContainerResource(Map jsonJob) {
+Resource amContainerResource =
+SLSConfiguration.getAMContainerResource(getConf());
+
+if (jsonJob == null) {
+  return amContainerResource;
+}
+
+if (jsonJob.containsKey("am.memory")) {
+  amContainerResource.setMemorySize(
+  Long.parseLong(jsonJob.get("am.memory").toString()));
+}
+
+if (jsonJob.containsKey("am.vcores")) {
+  amContainerResource.setVirtualCores(
+  Integer.parseInt(jsonJob.get("am.vcores").toString()));
+}
+return amContainerResource;
+  }
+
   private void increaseQueueAppNum(String queue) throws YarnException {
 SchedulerWrapper wrapper = (SchedulerWrapper)rm.getResourceScheduler();
 String queueName = wrapper.getRealQueueName(queue);
@@ -700,7 +722,7 @@ public class SLSRunner extends Configured implements Tool {
   private void runNewAM(String jobType, String user,
   String jobQueue, String oldJobId, long jobStartTimeMS,
   long jobFinishTimeMS, List containerList,
-  ReservationSubmissionRequest rr) {
+  ReservationSubmissionRequest rr, Resource amContainerResource) {
 
 AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
 amClassMap.get(jobType), new Configuration());
@@ -710,9 +732,11 @@ public class SLSRunner extends Configured implements Tool {
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
   boolean isTracked = trackedApps.contains(oldJobId);
-  amSim.init(AM_ID++, heartbeatInterval, containerList,
-  rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue,
-  isTracked, oldJobId, rr, runner.getStartTimeMS());
+  AM_ID++;
+
+  amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
+  jobFinishTimeMS, user, jobQueue, isTracked, oldJobId, rr,
+  runner.getStartTimeMS(), 

[2/2] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

2017-06-30 Thread liuml07
HADOOP-14443. Azure: Support retry and client side failover for authorization, 
SASKey and delegation token generation. Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38996fdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38996fdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38996fdc

Branch: refs/heads/trunk
Commit: 38996fdcf0987d1da00ce46f8284d8fcdce57329
Parents: bcba844
Author: Mingliang Liu 
Authored: Thu Jun 29 16:13:04 2017 -0700
Committer: Mingliang Liu 
Committed: Fri Jun 30 16:53:48 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  39 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 268 +++
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 225 ++--
 .../fs/azure/SecureWasbRemoteCallHelper.java| 210 +++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 259 +-
 .../hadoop/fs/azure/security/Constants.java |  20 +-
 .../hadoop/fs/azure/security/JsonUtils.java |  52 
 .../RemoteWasbDelegationTokenManager.java   | 162 +++
 .../hadoop/fs/azure/security/SecurityUtils.java |  86 --
 .../hadoop/fs/azure/security/TokenUtils.java|  60 +
 .../security/WasbDelegationTokenManager.java|  54 
 .../fs/azure/security/WasbTokenRenewer.java |  77 +-
 .../hadoop-azure/src/site/markdown/index.md |  44 ++-
 .../TestNativeAzureFileSystemAuthorization.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  | 228 +---
 15 files changed, 1170 insertions(+), 616 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 22f79ff..f92 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -27,9 +27,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.URL;
 import java.nio.charset.Charset;
-import java.security.PrivilegedExceptionAction;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -65,15 +63,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
 import org.apache.hadoop.fs.azure.security.Constants;
-import org.apache.hadoop.fs.azure.security.SecurityUtils;
+import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager;
+import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
-import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
-import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -1177,7 +1174,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   private UserGroupInformation ugi;
 
-  private String delegationToken = null;
+  private WasbDelegationTokenManager wasbDelegationTokenManager;
 
   public NativeAzureFileSystem() {
 // set store in initialize()
@@ -1327,9 +1324,7 @@ public class NativeAzureFileSystem extends FileSystem {
 }
 
 if (UserGroupInformation.isSecurityEnabled() && kerberosSupportEnabled) {
-  DelegationTokenAuthenticator authenticator = new 
KerberosDelegationTokenAuthenticator();
-  authURL = new DelegationTokenAuthenticatedURL(authenticator);
-  credServiceUrl = SecurityUtils.getCredServiceUrls(conf);
+  this.wasbDelegationTokenManager = new 
RemoteWasbDelegationTokenManager(conf);
 }
   }
 
@@ -3002,31 +2997,7 @@ public class NativeAzureFileSystem extends FileSystem {
   @Override
   public synchronized Token getDelegationToken(final String renewer) throws 
IOException {
 if (kerberosSupportEnabled) {
-  try {
-final 

[1/2] hadoop git commit: HADOOP-14443. Azure: Support retry and client side failover for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak

2017-06-30 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk bcba844d1 -> 38996fdcf


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index a0276cb5..fbd7f62 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -46,7 +46,7 @@ public class TestNativeAzureFileSystemAuthorization
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38996fdc/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index 77be1b8..f459b24 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -21,34 +21,48 @@ package org.apache.hadoop.fs.azure;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.http.*;
+import org.apache.hadoop.io.retry.RetryUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.ParseException;
+import org.apache.http.HeaderElement;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
 import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 
 import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.times;
 
 /**
  * Test class to hold all WasbRemoteCallHelper tests
  */
 public class TestWasbRemoteCallHelper
 extends AbstractWasbTestBase {
+  public static final String EMPTY_STRING = "";
+  private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
 Configuration conf = new Configuration();
 conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
-conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URL, 
"http://localhost/;);
+conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, 
"http://localhost1/,http://localhost2/;);
 return AzureBlobStorageTestAccount.create(conf);
   }
 
@@ -80,7 +94,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(999));
+
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
 // finished setting up mocks
 
 performop(mockHttpClient);
@@ -99,7 +113,7 @@ public class TestWasbRemoteCallHelper
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
 
Mockito.when(mockHttpClient.execute(Mockito.any())).thenReturn(mockHttpResponse);
-
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(200));
+

hadoop git commit: HDFS-12063. Ozone: Ozone shell: Multiple RPC calls for put/get key. Contributed by Yiqun Lin.

2017-06-30 Thread cliang
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ba647764f -> 0e0893930


HDFS-12063. Ozone: Ozone shell: Multiple RPC calls for put/get key. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e089393
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e089393
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e089393

Branch: refs/heads/HDFS-7240
Commit: 0e0893930c75c05e09d6176e5108bcf444ff3bf8
Parents: ba64776
Author: Chen Liang 
Authored: Fri Jun 30 14:36:26 2017 -0700
Committer: Chen Liang 
Committed: Fri Jun 30 14:36:26 2017 -0700

--
 .../hadoop/ozone/web/client/OzoneBucket.java|  13 +-
 .../ozone/web/client/OzoneRestClient.java   | 135 ++-
 .../hadoop/ozone/web/client/OzoneVolume.java|  10 +-
 .../hadoop/ozone/web/exceptions/ErrorTable.java |   4 +
 .../web/handlers/BucketProcessTemplate.java |   2 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |   2 +-
 .../web/handlers/VolumeProcessTemplate.java |   2 +-
 .../web/localstorage/OzoneMetadataManager.java  |   8 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |   6 +-
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |   6 +-
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |   4 +-
 .../hadoop/ozone/web/utils/OzoneUtils.java  |  20 +--
 .../org/apache/hadoop/ozone/web/TestUtils.java  |   6 +-
 .../hadoop/ozone/web/client/TestKeys.java   | 103 --
 14 files changed, 261 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e089393/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
index 7aedf96..1d8e31e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
@@ -249,8 +249,8 @@ public class OzoneBucket {
* @throws OzoneException
* @throws IOException
*/
-  private void executePutKey(HttpPut putRequest, CloseableHttpClient 
httpClient)
-  throws OzoneException, IOException {
+  public static void executePutKey(HttpPut putRequest,
+  CloseableHttpClient httpClient) throws OzoneException, IOException {
 HttpEntity entity = null;
 try {
   HttpResponse response = httpClient.execute(putRequest);
@@ -354,8 +354,8 @@ public class OzoneBucket {
* @throws IOException
* @throws OzoneException
*/
-  private void executeGetKey(HttpGet getRequest, CloseableHttpClient 
httpClient,
- OutputStream stream)
+  public static void executeGetKey(HttpGet getRequest,
+  CloseableHttpClient httpClient, OutputStream stream)
   throws IOException, OzoneException {
 
 HttpEntity entity = null;
@@ -477,9 +477,8 @@ public class OzoneBucket {
* @throws IOException
* @throws OzoneException
*/
-  private List executeListKeys(HttpGet getRequest,
-  CloseableHttpClient httpClient)
-  throws IOException, OzoneException {
+  public static List executeListKeys(HttpGet getRequest,
+  CloseableHttpClient httpClient) throws IOException, OzoneException {
 HttpEntity entity = null;
 List ozoneKeyList = new LinkedList();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e089393/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
index 2bc389f..9b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
@@ -18,6 +18,10 @@
 package org.apache.hadoop.ozone.web.client;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneClientUtils;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.ozone.web.headers.Header;
@@ -33,14 +37,20 @@ import org.apache.http.client.methods.HttpPost;
 

hadoop git commit: HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.

2017-06-30 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f6fcbf426 -> 802724630


HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce 
memory consumption. Contributed by Misha Dmitriev.

(cherry picked from commit bcba844d1144cc334e2babbc34c9d42eac1c203a)
(cherry picked from commit 94bc5cdbb3d0fd9133a8cab9d3daac6798e8d8dd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80272463
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80272463
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80272463

Branch: refs/heads/branch-2.8
Commit: 8027246306378a479de0c6fe74b1f56239567396
Parents: f6fcbf4
Author: Wei-Chiu Chuang 
Authored: Fri Jun 30 10:28:01 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jun 30 11:43:41 2017 -0700

--
 .../hdfs/server/namenode/INodeDirectory.java|  7 ++-
 .../snapshot/AbstractINodeDiffList.java | 53 +++-
 .../namenode/TestTruncateQuotaUpdate.java   |  1 +
 3 files changed, 46 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80272463/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 24c8815..c345440 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -65,8 +65,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
 return inode.asDirectory(); 
   }
 
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
+  // Profiling shows that most of the file lists are between 1 and 4 elements.
+  // Thus allocate the corresponding ArrayLists with a small initial capacity.
+  public static final int DEFAULT_FILES_PER_DIRECTORY = 2;
+
+  static final byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List children = null;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80272463/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 64825f1..df70958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 
 /**
  * A list of snapshot diffs for storing snapshot data.
@@ -35,17 +36,19 @@ abstract class AbstractINodeDiffList> 
 implements Iterable {
-  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
-  private final List diffs = new ArrayList();
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order.
+* Created lazily to avoid wasting memory by empty lists. */
+  private List diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
   public final List asList() {
-return Collections.unmodifiableList(diffs);
+return diffs != null ?
+Collections.unmodifiableList(diffs) : Collections.emptyList();
   }
   
-  /** Get the size of the list and then clear it. */
+  /** Clear the list. */
   public void clear() {
-diffs.clear();
+diffs = null;
   }
 
   /** @return an {@link AbstractINodeDiff}. */
@@ -66,6 +69,9 @@ abstract class AbstractINodeDiffList 0) {
@@ -103,6 +112,7 @@ abstract class AbstractINodeDiffList(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+}
   }
 
   /** @return the id of the last snapshot. */
@@ -139,10 +159,14 @@ abstract class AbstractINodeDiffList 0) {
@@ -275,11 +302,11 @@ abstract class AbstractINodeDiffList iterator() {
-return diffs.iterator();
+return diffs != null ? diffs.iterator() : Collections.emptyIterator();
   }
 
   @Override
   public String toString() {
-return 

hadoop git commit: HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.

2017-06-30 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2d6995292 -> 94bc5cdbb


HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce 
memory consumption. Contributed by Misha Dmitriev.

(cherry picked from commit bcba844d1144cc334e2babbc34c9d42eac1c203a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94bc5cdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94bc5cdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94bc5cdb

Branch: refs/heads/branch-2
Commit: 94bc5cdbb3d0fd9133a8cab9d3daac6798e8d8dd
Parents: 2d69952
Author: Wei-Chiu Chuang 
Authored: Fri Jun 30 10:28:01 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jun 30 11:42:51 2017 -0700

--
 .../hdfs/server/namenode/INodeDirectory.java|  7 ++-
 .../snapshot/AbstractINodeDiffList.java | 53 +++-
 .../namenode/TestTruncateQuotaUpdate.java   |  1 +
 3 files changed, 46 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94bc5cdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 24c8815..c345440 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -65,8 +65,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
 return inode.asDirectory(); 
   }
 
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
+  // Profiling shows that most of the file lists are between 1 and 4 elements.
+  // Thus allocate the corresponding ArrayLists with a small initial capacity.
+  public static final int DEFAULT_FILES_PER_DIRECTORY = 2;
+
+  static final byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List children = null;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94bc5cdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 64825f1..df70958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 
 /**
  * A list of snapshot diffs for storing snapshot data.
@@ -35,17 +36,19 @@ abstract class AbstractINodeDiffList> 
 implements Iterable {
-  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
-  private final List diffs = new ArrayList();
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order.
+* Created lazily to avoid wasting memory by empty lists. */
+  private List diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
   public final List asList() {
-return Collections.unmodifiableList(diffs);
+return diffs != null ?
+Collections.unmodifiableList(diffs) : Collections.emptyList();
   }
   
-  /** Get the size of the list and then clear it. */
+  /** Clear the list. */
   public void clear() {
-diffs.clear();
+diffs = null;
   }
 
   /** @return an {@link AbstractINodeDiff}. */
@@ -66,6 +69,9 @@ abstract class AbstractINodeDiffList 0) {
@@ -103,6 +112,7 @@ abstract class AbstractINodeDiffList(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+}
   }
 
   /** @return the id of the last snapshot. */
@@ -139,10 +159,14 @@ abstract class AbstractINodeDiffList 0) {
@@ -275,11 +302,11 @@ abstract class AbstractINodeDiffList iterator() {
-return diffs.iterator();
+return diffs != null ? diffs.iterator() : Collections.emptyIterator();
   }
 
   @Override
   public String toString() {
-return getClass().getSimpleName() + ": " + diffs;
+return 

hadoop git commit: HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce memory consumption. Contributed by Misha Dmitriev.

2017-06-30 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a9dc5f44 -> bcba844d1


HDFS-12042. Lazy initialize AbstractINodeDiffList#diffs for snapshots to reduce 
memory consumption. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcba844d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcba844d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcba844d

Branch: refs/heads/trunk
Commit: bcba844d1144cc334e2babbc34c9d42eac1c203a
Parents: 6a9dc5f
Author: Wei-Chiu Chuang 
Authored: Fri Jun 30 10:28:01 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Jun 30 10:28:01 2017 -0700

--
 .../hdfs/server/namenode/INodeDirectory.java|  7 ++-
 .../snapshot/AbstractINodeDiffList.java | 53 +++-
 .../namenode/TestTruncateQuotaUpdate.java   |  1 +
 3 files changed, 46 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index a29a118..4012783 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -65,8 +65,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
 return inode.asDirectory(); 
   }
 
-  protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
-  final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
+  // Profiling shows that most of the file lists are between 1 and 4 elements.
+  // Thus allocate the corresponding ArrayLists with a small initial capacity.
+  public static final int DEFAULT_FILES_PER_DIRECTORY = 2;
+
+  static final byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List children = null;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcba844d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 64825f1..98d8c53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 
 /**
  * A list of snapshot diffs for storing snapshot data.
@@ -35,17 +36,19 @@ abstract class AbstractINodeDiffList> 
 implements Iterable {
-  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
-  private final List diffs = new ArrayList();
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order.
+* Created lazily to avoid wasting memory by empty lists. */
+  private List diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
   public final List asList() {
-return Collections.unmodifiableList(diffs);
+return diffs != null ?
+Collections.unmodifiableList(diffs) : Collections.emptyList();
   }
   
-  /** Get the size of the list and then clear it. */
+  /** Clear the list. */
   public void clear() {
-diffs.clear();
+diffs = null;
   }
 
   /** @return an {@link AbstractINodeDiff}. */
@@ -66,6 +69,9 @@ abstract class AbstractINodeDiffList 0) {
@@ -103,6 +112,7 @@ abstract class AbstractINodeDiffList(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+}
   }
 
   /** @return the id of the last snapshot. */
@@ -139,10 +159,14 @@ abstract class AbstractINodeDiffList 0) {
@@ -275,11 +302,11 @@ abstract class AbstractINodeDiffList iterator() {
-return diffs.iterator();
+return diffs != null ? diffs.iterator() : Collections.emptyIterator();
   }
 
   @Override
   public String toString() {
-return getClass().getSimpleName() + ": " + diffs;
+return getClass().getSimpleName() + ": " + (diffs != null ? diffs : "[]");
   }
 }


[1/2] hadoop git commit: Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang."

2017-06-30 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3be2659f8 -> 6a9dc5f44


Revert "HDFS-12043. Add counters for block re-replication. Contributed by Chen 
Liang."

Accidentally committed the wrong patch version, reverting to fix that.

This reverts commit 900221f95ea9fe1936b4d5f277e6047ee8734eca.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2f0cbd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2f0cbd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2f0cbd9

Branch: refs/heads/trunk
Commit: a2f0cbd92f7e90909cf817c261a5fae13a9695b4
Parents: 3be2659
Author: Arpit Agarwal 
Authored: Fri Jun 30 10:19:27 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 30 10:19:27 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 +--
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 86 +---
 4 files changed, 7 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5ee30b..a0c4698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,7 +1873,6 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
-  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1886,7 +1885,6 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
-  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1902,7 +1900,6 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
-NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3730,8 +3727,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
-  String delHint) throws IOException {
+  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
+  throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3754,9 +3751,7 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  if (pendingReconstruction.decrement(storedBlock, node)) {
-NameNode.getNameNodeMetrics().incSuccessfulReReplications();
-  }
+  pendingReconstruction.decrement(storedBlock, node);
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f0cbd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 

[2/2] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

2017-06-30 Thread arp
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a9dc5f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a9dc5f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a9dc5f4

Branch: refs/heads/trunk
Commit: 6a9dc5f44b0c7945e3e9a56248cd4ff80d5c8f0f
Parents: a2f0cbd
Author: Arpit Agarwal 
Authored: Fri Jun 30 10:20:12 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 30 10:20:12 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 ++-
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 90 +++-
 4 files changed, 122 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
+NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-  throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+  String delHint) throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  pendingReconstruction.decrement(storedBlock, node);
+  if (pendingReconstruction.decrement(storedBlock, node)) {
+NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+  }
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a9dc5f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- 

hadoop git commit: YARN-6749. TestAppSchedulingInfo.testPriorityAccounting fails consistently. Contributed by Naganarasimha G R

2017-06-30 Thread bibinchundatt
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2956c584c -> f6fcbf426


YARN-6749. TestAppSchedulingInfo.testPriorityAccounting fails consistently. 
Contributed by Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6fcbf42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6fcbf42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6fcbf42

Branch: refs/heads/branch-2.8
Commit: f6fcbf426fd0489049c7ef498f6d04f688cb7fd1
Parents: 2956c58
Author: bibinchundatt 
Authored: Fri Jun 30 21:52:16 2017 +0530
Committer: bibinchundatt 
Committed: Fri Jun 30 21:52:16 2017 +0530

--
 .../resourcemanager/scheduler/TestAppSchedulingInfo.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6fcbf42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
index 6981f2b..b74f4f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
-import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -106,7 +106,7 @@ public class TestAppSchedulingInfo {
 
 // iterate to verify no ConcurrentModificationException
 for (Priority priority: info.getPriorities()) {
-  info.allocate(NodeType.OFF_SWITCH, null, priority, req1, null);
+  info.allocate(NodeType.OFF_SWITCH, mock(SchedulerNode.class), priority, 
req1, null);
 }
 Assert.assertEquals(1, info.getPriorities().size());
 Assert.assertEquals(req2.getPriority(),
@@ -117,7 +117,7 @@ public class TestAppSchedulingInfo {
 reqs.clear();
 reqs.add(req2);
 info.updateResourceRequests(reqs, false);
-info.allocate(NodeType.OFF_SWITCH, null, req2.getPriority(), req2, null);
+info.allocate(NodeType.OFF_SWITCH, mock(SchedulerNode.class), 
req2.getPriority(), req2, null);
 Assert.assertEquals(0, info.getPriorities().size());
 
 req1 = ResourceRequest.newInstance(pri1,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6655. Fix compilation failure in TestServiceApiUtil due to changes in YARN-6716.

2017-06-30 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 80a6e46f3 -> c0e1c0be3


YARN-6655. Fix compilation failure in TestServiceApiUtil due to changes in 
YARN-6716.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0e1c0be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0e1c0be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0e1c0be

Branch: refs/heads/yarn-native-services
Commit: c0e1c0be308456e3953533cfb3a2147926e81489
Parents: 80a6e46
Author: Billie Rinaldi 
Authored: Fri Jun 30 09:12:29 2017 -0700
Committer: Billie Rinaldi 
Committed: Fri Jun 30 09:12:29 2017 -0700

--
 .../src/test/java/org/apache/slider/utils/TestServiceApiUtil.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0e1c0be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
index 889cc04..28f36de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
@@ -463,7 +463,8 @@ public class TestServiceApiUtil {
 Application application = createValidApplication(null);
 application.setComponents(Arrays.asList(c, d, e));
 try {
-  ServiceApiUtil.validateAndResolveApplication(application, sfs);
+  ServiceApiUtil.validateAndResolveApplication(application, sfs,
+  CONF_DEFAULT_DNS);
   Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies");
 } catch (IllegalArgumentException ex) {
   assertEquals(String.format(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/35] hadoop git commit: HADOOP-14297. Update the documentation about the new ec codecs config keys. Contributed by Kai Sasaki.

2017-06-30 Thread stevel
HADOOP-14297. Update the documentation about the new ec codecs config keys. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9d8bdfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9d8bdfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9d8bdfd

Branch: refs/heads/HADOOP-13345
Commit: e9d8bdfdf576340196843dae92551cc36a87e95f
Parents: d6df0fd
Author: Wei-Chiu Chuang 
Authored: Wed Jun 28 13:53:15 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jun 28 13:53:54 2017 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9d8bdfd/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 69e8ef2..1c0a2de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -123,7 +123,7 @@ Deployment
   `io.erasurecode.codec.xor.rawcoders` for the XOR codec.
   User can also configure self-defined codec with configuration key like:
   `io.erasurecode.codec.self-defined-codec.rawcoders`.
-  The values for these key are lists of coder names with a fall-back mechanism.
+  The values for these key are lists of coder names with a fall-back 
mechanism. These codec factories are loaded in the order specified by the 
configuration values, until a codec is loaded successfully. The default RS and 
XOR codec configuration prefers native implementation over the pure Java one. 
There is no RS-LEGACY native codec implementation so the default is pure Java 
implementation only.
   All these codecs have implementations in pure Java. For default RS codec, 
there is also a native implementation which leverages Intel ISA-L library to 
improve the performance of codec. For XOR codec, a native implementation which 
leverages Intel ISA-L library to improve the performance of codec is also 
supported. Please refer to section "Enable Intel ISA-L" for more detail 
information.
   The default implementation for RS Legacy is pure Java, and the default 
implementations for default RS and XOR are native implementations using Intel 
ISA-L library.
 
@@ -138,13 +138,11 @@ Deployment
 
   HDFS native implementation of default RS codec leverages Intel ISA-L library 
to improve the encoding and decoding calculation. To enable and use Intel 
ISA-L, there are three steps.
   1. Build ISA-L library. Please refer to the official site 
"https://github.com/01org/isa-l/; for detail information.
-  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the 
source code. Use `-Dbundle.isal` to copy the contents of the `isal.lib` 
directory into the final tar file. Deploy Hadoop with the tar file. Make sure 
ISA-L is available on HDFS clients and DataNodes.
-  3. Configure the `io.erasurecode.codec.rs.rawcoder` key with value 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on 
HDFS clients and DataNodes.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the 
source code.
+  3. Use `-Dbundle.isal` to copy the contents of the `isal.lib` directory into 
the final tar file. Deploy Hadoop with the tar file. Make sure ISA-L is 
available on HDFS clients and DataNodes.
 
   To verify that ISA-L is correctly detected by Hadoop, run the `hadoop 
checknative` command.
 
-  To enable the native implementation of the XOR codec, perform the same first 
two steps as above to build and deploy Hadoop with ISA-L support. Afterwards, 
configure the `io.erasurecode.codec.xor.rawcoder` key with 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory` on 
both HDFS client and DataNodes.
-
 ### Administrative commands
 
   HDFS provides an `ec` subcommand to perform administrative commands related 
to erasure coding.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/35] hadoop git commit: Updating version for 3.0.0-beta1 development

2017-06-30 Thread stevel
Updating version for 3.0.0-beta1 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af2773f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af2773f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af2773f6

Branch: refs/heads/HADOOP-13345
Commit: af2773f609ba930825bab5d30767757c0e59aac7
Parents: 900221f
Author: Andrew Wang 
Authored: Thu Jun 29 17:57:40 2017 -0700
Committer: Andrew Wang 
Committed: Thu Jun 29 17:57:40 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 

[03/35] hadoop git commit: HDFS-11993. Add log info when connect to datanode socket address failed. Contributed by chencan

2017-06-30 Thread stevel
HDFS-11993. Add log info when connect to datanode socket address failed. 
Contributed by chencan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9d3412b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9d3412b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9d3412b

Branch: refs/heads/HADOOP-13345
Commit: a9d3412b4ce40f5ab5a18756ede7e0606b653171
Parents: 2c367b4
Author: Ravi Prakash 
Authored: Mon Jun 26 13:24:27 2017 -0700
Committer: Ravi Prakash 
Committed: Mon Jun 26 13:24:27 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9d3412b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 77f5a92..dcc997c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -585,8 +585,9 @@ public class DFSInputStream extends FSInputStream
   fetchBlockAt(target);
 } else {
   connectFailedOnce = true;
-  DFSClient.LOG.warn("Failed to connect to " + targetAddr + " for 
block"
-  + ", add to deadNodes and continue. " + ex, ex);
+  DFSClient.LOG.warn("Failed to connect to {} for block {}, " +
+  "add to deadNodes and continue. ", targetAddr,
+  targetBlock.getBlock(), ex);
   // Put chosen node into dead list, continue
   addToDeadNodes(chosenNode);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/35] hadoop git commit: HADOOP-14602. allow custom release notes/changelog during create-release

2017-06-30 Thread stevel
HADOOP-14602. allow custom release notes/changelog during create-release

Signed-off-by: Chris Douglas 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c52da7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c52da7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c52da7d

Branch: refs/heads/HADOOP-13345
Commit: 0c52da7d3e381ca59cd0ff72d143066a5c28d826
Parents: 16c8dbd
Author: Allen Wittenauer 
Authored: Wed Jun 28 07:37:09 2017 -0700
Committer: Allen Wittenauer 
Committed: Thu Jun 29 08:03:16 2017 -0700

--
 dev-support/bin/create-release | 36 +++-
 1 file changed, 31 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c52da7d/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 94351d3..b22e90b 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -50,6 +50,7 @@ function hadoop_abs
   declare obj=$1
   declare dir
   declare fn
+  declare ret
 
   if [[ ! -e ${obj} ]]; then
 return 1
@@ -62,7 +63,8 @@ function hadoop_abs
   fi
 
   dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
-  if [[ $? = 0 ]]; then
+  ret=$?
+  if [[ ${ret} = 0 ]]; then
 echo "${dir}${fn}"
 return 0
   fi
@@ -287,6 +289,7 @@ function usage
   echo "--mvncache=[path]   Path to the maven cache to use"
   echo "--nativeAlso build the native components"
   echo "--rc-label=[label]  Add this label to the builds"
+  echo "--security  Emergency security release"
   echo "--sign  Use .gnupg dir to sign the artifacts and jars"
   echo "--version=[version] Use an alternative version string"
 }
@@ -330,6 +333,9 @@ function option_parse
   --rc-label=*)
 RC_LABEL=${i#*=}
   ;;
+  --security)
+SECURITYRELEASE=true
+  ;;
   --sign)
 SIGN=true
   ;;
@@ -397,6 +403,14 @@ function option_parse
   MVN_ARGS=("-Dmaven.repo.local=${MVNCACHE}")
 fi
   fi
+
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+if [[ ! -d 
"${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}"
 ]]; then
+  hadoop_error "ERROR: 
${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}
 does not exist."
+  hadoop_error "ERROR: This directory and its contents are required to be 
manually created for a security release."
+  exit 1
+fi
+  fi
 }
 
 function dockermode
@@ -523,7 +537,7 @@ function makearelease
   big_console_header "Maven Build and Install"
 
   if [[ "${SIGN}" = true ]]; then
-signflags=("-Psign" "-Dgpg.useagent=true" -Dgpg.executable="${GPG}")
+signflags=("-Psign" "-Dgpg.useagent=true" "-Dgpg.executable=${GPG}")
   fi
 
   # Create SRC and BIN tarballs for release,
@@ -534,6 +548,14 @@ function makearelease
   "${signflags[@]}" \
   -DskipTests -Dtar $(hadoop_native_flags)
 
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+DOCFLAGS="-Pdocs"
+hadoop_error "WARNING: Skipping automatic changelog and release notes 
generation due to --security"
+  else
+DOCFLAGS="-Preleasedocs,docs"
+  fi
+
+
   # Create site for release
   # we need to do install again so that jdiff and
   # a few other things get registered in the maven
@@ -542,7 +564,8 @@ function makearelease
 "${MVN}" "${MVN_ARGS[@]}" install \
   site site:stage \
   -DskipTests \
-  -Pdist,src,releasedocs,docs
+  -Pdist,src \
+  "${DOCFLAGS}"
 
   big_console_header "Staging the release"
 
@@ -586,6 +609,7 @@ function makearelease
 function signartifacts
 {
   declare i
+  declare ret
 
   if [[ "${SIGN}" = false ]]; then
 for i in ${ARTIFACTS_DIR}/*; do
@@ -612,7 +636,8 @@ function signartifacts
 ${GPG} --verify --trustdb "${BASEDIR}/target/testkeysdb" \
   "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz.asc" \
 "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz"
-if [[ $? != 0 ]]; then
+ret=$?
+if [[ ${ret} != 0 ]]; then
   hadoop_error "ERROR: GPG key is not present in ${PUBKEYFILE}."
   hadoop_error "ERROR: This MUST be fixed. Exiting."
   exit 1
@@ -641,6 +666,7 @@ if [[ "${INDOCKER}" = true || "${DOCKERRAN}" = false ]]; 
then
   startgpgagent
 
   makearelease
+  releaseret=$?
 
   signartifacts
 
@@ -651,7 +677,7 @@ if [[ "${INDOCKER}" = true ]]; then
   exit $?
 fi
 
-if [[ $? == 0 ]]; then
+if [[ ${releaseret} == 0 ]]; then
   echo
   echo "Congratulations, you have successfully built the release"
   echo "artifacts for Apache Hadoop ${HADOOP_VERSION}${RC_LABEL}"


[05/35] hadoop git commit: MAPREDUCE-6904. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS in mapred-config.sh (rkanter)

2017-06-30 Thread stevel
MAPREDUCE-6904. HADOOP_JOB_HISTORY_OPTS should be HADOOP_JOB_HISTORYSERVER_OPTS 
in mapred-config.sh (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b87faf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b87faf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b87faf1

Branch: refs/heads/HADOOP-13345
Commit: 2b87faf166321d26e0fd3eecfdcbc74ff5a9d54a
Parents: 144753e
Author: Robert Kanter 
Authored: Mon Jun 26 17:35:55 2017 -0700
Committer: Robert Kanter 
Committed: Mon Jun 26 17:35:55 2017 -0700

--
 hadoop-mapreduce-project/bin/mapred-config.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b87faf1/hadoop-mapreduce-project/bin/mapred-config.sh
--
diff --git a/hadoop-mapreduce-project/bin/mapred-config.sh 
b/hadoop-mapreduce-project/bin/mapred-config.sh
index f370084..8f21d9a 100755
--- a/hadoop-mapreduce-project/bin/mapred-config.sh
+++ b/hadoop-mapreduce-project/bin/mapred-config.sh
@@ -47,7 +47,7 @@ function hadoop_subproject_init
 
   hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
 
-  hadoop_deprecate_envvar HADOOP_JOB_HISTORY_OPTS MAPRED_HISTORYSERVER_OPTS
+  hadoop_deprecate_envvar HADOOP_JOB_HISTORYSERVER_OPTS 
MAPRED_HISTORYSERVER_OPTS
 
   HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/35] hadoop git commit: HADOOP-14515. Addendum. Specifically configure zookeeper-related log levels in KMS log4j.

2017-06-30 Thread stevel
HADOOP-14515. Addendum. Specifically configure zookeeper-related log levels in 
KMS log4j.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb5ee3fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb5ee3fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb5ee3fa

Branch: refs/heads/HADOOP-13345
Commit: fb5ee3fafbe5a7390aa706cdeef12732051b58ee
Parents: a5c0476
Author: Xiao Chen 
Authored: Tue Jun 27 23:48:16 2017 -0700
Committer: Xiao Chen 
Committed: Tue Jun 27 23:49:09 2017 -0700

--
 .../hadoop-kms/src/main/conf/kms-log4j.properties   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb5ee3fa/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
index 15ff436..04a3cf3 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
@@ -34,4 +34,7 @@ log4j.additivity.kms-audit=false
 
 log4j.rootLogger=INFO, kms
 log4j.logger.org.apache.hadoop=INFO
-log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
+# make zookeeper log level an explicit config, and not changing with 
rootLogger.
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.curator=INFO
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/35] hadoop git commit: HDFS-11881. NameNode consumes a lot of memory for snapshot diff report generation. Contributed by Manoj Govindassamy.

2017-06-30 Thread stevel
HDFS-11881. NameNode consumes a lot of memory for snapshot diff report 
generation. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16c8dbde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16c8dbde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16c8dbde

Branch: refs/heads/HADOOP-13345
Commit: 16c8dbde574f49827fde5ee9add1861ee65d4645
Parents: ea1da39
Author: Wei-Chiu Chuang 
Authored: Thu Jun 29 06:38:41 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Jun 29 06:38:41 2017 -0700

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  5 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  5 +-
 .../hadoop/hdfs/TestSnapshotCommands.java   | 50 
 3 files changed, 56 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 63d0025..feb3061 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -186,6 +186,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.LimitInputStream;
 
@@ -1455,7 +1456,7 @@ public class PBHelperClient {
 String toSnapshot = reportProto.getToSnapshot();
 List list = reportProto
 .getDiffReportEntriesList();
-List entries = new ArrayList<>();
+List entries = new ChunkedArrayList<>();
 for (SnapshotDiffReportEntryProto entryProto : list) {
   DiffReportEntry entry = convert(entryProto);
   if (entry != null)
@@ -2392,7 +2393,7 @@ public class PBHelperClient {
   return null;
 }
 List entries = report.getDiffList();
-List entryProtos = new ArrayList<>();
+List entryProtos = new ChunkedArrayList<>();
 for (DiffReportEntry entry : entries) {
   SnapshotDiffReportEntryProto entryProto = convert(entry);
   if (entryProto != null)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
index a576c57..fcd80ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.util.Diff.ListType;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.SignedBytes;
+import org.apache.hadoop.util.ChunkedArrayList;
 
 /**
  * A class describing the difference between snapshots of a snapshottable
@@ -186,7 +187,7 @@ class SnapshotDiffInfo {
* @return A {@link SnapshotDiffReport} describing the difference
*/
   public SnapshotDiffReport generateReport() {
-List diffReportList = new ArrayList();
+List diffReportList = new ChunkedArrayList<>();
 for (Map.Entry drEntry : diffMap.entrySet()) {
   INode node = drEntry.getKey();
   byte[][] path = drEntry.getValue();
@@ -213,7 +214,7 @@ class SnapshotDiffInfo {
*/
   private List generateReport(ChildrenDiff dirDiff,
   byte[][] parentPath, boolean fromEarlier, Map 
renameMap) {
-List list = new ArrayList();
+List list = new ChunkedArrayList<>();
 List created = dirDiff.getList(ListType.CREATED);
 List deleted = dirDiff.getList(ListType.DELETED);
 byte[][] fullPath = new byte[parentPath.length + 1][];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16c8dbde/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java

[31/35] hadoop git commit: HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs error. Contributed by Steve Loughran

2017-06-30 Thread stevel
HADOOP-14596. AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs 
error. Contributed by Steve Loughran

Change-Id: I49173bf6163796903d64594a8ca8a4bd26ad2bfc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72993b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72993b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72993b33

Branch: refs/heads/HADOOP-13345
Commit: 72993b33b704991f2a0bf743f31b164e58a2dabc
Parents: ec97519
Author: Mingliang Liu 
Authored: Thu Jun 29 17:00:25 2017 -0700
Committer: Mingliang Liu 
Committed: Thu Jun 29 17:07:52 2017 -0700

--
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 26 +---
 1 file changed, 22 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72993b33/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 7d322a5..b88b7c1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -78,7 +79,8 @@ public class S3AInputStream extends FSInputStream implements 
CanSetReadahead {
   private final String key;
   private final long contentLength;
   private final String uri;
-  public static final Logger LOG = S3AFileSystem.LOG;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(S3AInputStream.class);
   private final S3AInstrumentation.InputStreamStatistics streamStatistics;
   private S3AEncryptionMethods serverSideEncryptionAlgorithm;
   private String serverSideEncryptionKey;
@@ -451,13 +453,27 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   // if the amount of data remaining in the current request is greater
   // than the readahead value: abort.
   long remaining = remainingInCurrentRequest();
+  LOG.debug("Closing stream {}: {}", reason,
+  forceAbort ? "abort" : "soft");
   boolean shouldAbort = forceAbort || remaining > readahead;
   if (!shouldAbort) {
 try {
   // clean close. This will read to the end of the stream,
   // so, while cleaner, can be pathological on a multi-GB object
+
+  // explicitly drain the stream
+  long drained = 0;
+  while (wrappedStream.read() >= 0) {
+drained++;
+  }
+  LOG.debug("Drained stream of {} bytes", drained);
+
+  // now close it
   wrappedStream.close();
-  streamStatistics.streamClose(false, remaining);
+  // this MUST come after the close, so that if the IO operations fail
+  // and an abort is triggered, the initial attempt's statistics
+  // aren't collected.
+  streamStatistics.streamClose(false, drained);
 } catch (IOException e) {
   // exception escalates to an abort
   LOG.debug("When closing {} stream for {}", uri, reason, e);
@@ -467,13 +483,15 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   if (shouldAbort) {
 // Abort, rather than just close, the underlying stream.  Otherwise, 
the
 // remaining object payload is read from S3 while closing the stream.
+LOG.debug("Aborting stream");
 wrappedStream.abort();
 streamStatistics.streamClose(true, remaining);
   }
-  LOG.debug("Stream {} {}: {}; streamPos={}, nextReadPos={}," +
+  LOG.debug("Stream {} {}: {}; remaining={} streamPos={},"
+  + " nextReadPos={}," +
   " request range {}-{} length={}",
   uri, (shouldAbort ? "aborted" : "closed"), reason,
-  pos, nextReadPos,
+  remaining, pos, nextReadPos,
   contentRangeStart, contentRangeFinish,
   length);
   wrappedStream = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/35] hadoop git commit: HADOOP-14546. Azure: Concurrent I/O does not work when secure.mode is enabled. Contributed by Thomas

2017-06-30 Thread stevel
HADOOP-14546. Azure: Concurrent I/O does not work when secure.mode is enabled. 
Contributed by Thomas


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e031c2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e031c2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e031c2c

Branch: refs/heads/HADOOP-13345
Commit: 7e031c2c18b8812ec9f843ed3b4abe9e6d12bb28
Parents: 686a634
Author: Mingliang Liu 
Authored: Tue Jun 27 17:32:07 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Jun 27 17:32:07 2017 -0700

--
 .../fs/azure/AzureNativeFileSystemStore.java|  4 +-
 .../hadoop/fs/azure/SendRequestIntercept.java   | 91 ++--
 .../fs/azure/AzureBlobStorageTestAccount.java   |  7 ++
 .../azure/TestAzureConcurrentOutOfBandIo.java   |  4 +-
 ...zureConcurrentOutOfBandIoWithSecureMode.java | 50 +++
 5 files changed, 69 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e031c2c/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 3fa1a62..d026220 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -852,7 +852,6 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 rootDirectory = container.getDirectoryReference("");
 
 canCreateOrModifyContainer = true;
-tolerateOobAppends = false;
   }
 
   /**
@@ -1911,8 +1910,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 // If reads concurrent to OOB writes are allowed, the interception will 
reset
 // the conditional header on all Azure blob storage read requests.
 if (bindConcurrentOOBIo) {
-  SendRequestIntercept.bind(storageInteractionLayer.getCredentials(),
-  operationContext, true);
+  SendRequestIntercept.bind(operationContext);
 }
 
 if (testHookOperationContext != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e031c2c/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
index 25c9eb4..924ecd3 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
@@ -35,7 +35,7 @@ import com.microsoft.azure.storage.StorageException;
 
 /**
  * Manages the lifetime of binding on the operation contexts to intercept send
- * request events to Azure storage.
+ * request events to Azure storage and allow concurrent OOB I/Os.
  */
 @InterfaceAudience.Private
 public final class SendRequestIntercept extends 
StorageEvent {
@@ -43,70 +43,22 @@ public final class SendRequestIntercept extends 
StorageEventhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/7e031c2c/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index b6c252f..2cdc2e7 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 
 import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME;
 import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_LOCAL_SAS_KEY_MODE;
+import static 
org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
 
 /**
  * Helper class to create WASB file systems backed by either a mock in-memory
@@ -335,6 +336,11 @@ public final class AzureBlobStorageTestAccount {
 
   public static AzureBlobStorageTestAccount createOutOfBandStore(
   int uploadBlockSize, int 

[08/35] hadoop git commit: HADOOP-14536. Update azure-storage sdk to version 5.3.0 Contributed by Georgi Chalakov

2017-06-30 Thread stevel
HADOOP-14536. Update azure-storage sdk to version 5.3.0
Contributed by Georgi Chalakov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94e39c6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94e39c6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94e39c6c

Branch: refs/heads/HADOOP-13345
Commit: 94e39c6c4e39efadb72d7765ad3cc4ba43e0687a
Parents: 07defa4
Author: Steve Loughran 
Authored: Tue Jun 27 15:09:03 2017 +0100
Committer: Steve Loughran 
Committed: Tue Jun 27 15:09:03 2017 +0100

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94e39c6c/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index afd2006..7909442 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1114,7 +1114,7 @@
   
 com.microsoft.azure
 azure-storage
-4.2.0
+5.3.0
  
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/35] hadoop git commit: HADOOP-14573. regression: Azure tests which capture logs failing with move to SLF4J. Contributed by Steve Loughran

2017-06-30 Thread stevel
HADOOP-14573. regression: Azure tests which capture logs failing with move to 
SLF4J. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/686a634f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/686a634f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/686a634f

Branch: refs/heads/HADOOP-13345
Commit: 686a634f01e454537d1a9d531330e60a60233ca4
Parents: 63ce159
Author: Mingliang Liu 
Authored: Tue Jun 27 16:48:47 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Jun 27 16:48:47 2017 -0700

--
 .../TestFileSystemOperationsWithThreads.java| 164 +--
 .../TestNativeAzureFileSystemClientLogging.java |  26 ++-
 2 files changed, 132 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/686a634f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
index 343391f..ce3cdee 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.azure;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -27,6 +28,7 @@ import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,8 +39,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Tests the Native Azure file system (WASB) using parallel threads for rename 
and delete operations.
@@ -68,8 +68,8 @@ public class TestFileSystemOperationsWithThreads extends 
AbstractWasbTestBase {
 fs.initialize(uri, conf);
 
 // Capture logs
-logs = LogCapturer.captureLogs(
-LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
+logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
+.getRootLogger()));
   }
 
   /*
@@ -131,17 +131,19 @@ public class TestFileSystemOperationsWithThreads extends 
AbstractWasbTestBase {
 
 // Validate from logs that threads are created.
 String content = logs.getOutput();
-assertTrue(content.contains("ms with threads: " + expectedThreadsCreated));
+assertInLog(content, "ms with threads: " + expectedThreadsCreated);
 
 // Validate thread executions
 for (int i = 0; i < expectedThreadsCreated; i++) {
-  assertTrue(content.contains("AzureBlobRenameThread-" + 
Thread.currentThread().getName() + "-" + i));
+  assertInLog(content,
+  "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + 
i);
 }
 
 // Also ensure that we haven't spawned extra threads.
 if (expectedThreadsCreated < renameThreads) {
   for (int i = expectedThreadsCreated; i < renameThreads; i++) {
-assertFalse(content.contains("AzureBlobRenameThread-" + 
Thread.currentThread().getName() + "-" + i));
+assertNotInLog(content,
+"AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" 
+ i);
   }
 }
   }
@@ -158,11 +160,12 @@ public class TestFileSystemOperationsWithThreads extends 
AbstractWasbTestBase {
 
 // Validate from logs that threads are created.
 String content = logs.getOutput();
-assertTrue(content.contains("ms with threads: " + renameThreads));
+assertInLog(content, "ms with threads: " + renameThreads);
 
 // Validate thread executions
 for (int i = 0; i < renameThreads; i++) {
-  assertTrue(content.contains("AzureBlobRenameThread-" + 
Thread.currentThread().getName() + "-" + i));
+  assertInLog(content,
+  "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + 
i);
 }
   }
 
@@ -184,11 +187,45 @@ public class TestFileSystemOperationsWithThreads extends 
AbstractWasbTestBase {
 
 // Validate from logs that threads are disabled.
 String content = logs.getOutput();
-assertTrue(content.contains("Disabling threads for Rename operation 

[17/35] hadoop git commit: YARN-6743. yarn.resourcemanager.zk-max-znode-size.bytes description needs spaces in yarn-default.xml (Contributed by Lori Loberg via Daniel Templeton)

2017-06-30 Thread stevel
YARN-6743. yarn.resourcemanager.zk-max-znode-size.bytes description needs 
spaces in yarn-default.xml
(Contributed by Lori Loberg via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25d891a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25d891a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25d891a7

Branch: refs/heads/HADOOP-13345
Commit: 25d891a784304fcf02f57bc7984c31af45003553
Parents: f99b6d1
Author: Daniel Templeton 
Authored: Wed Jun 28 13:17:58 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Jun 28 13:17:58 2017 -0700

--
 .../src/main/resources/yarn-default.xml   | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25d891a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d4b7bde..cbd5345 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -629,11 +629,11 @@
   
 
   
-   Specifies the maximum size of the data that can be stored
-   in a znode.Value should be same or less than jute.maxbuffer 
configured
-   in zookeeper.Default value configured is 1MB.
-   yarn.resourcemanager.zk-max-znode-size.bytes
-   1048576
+Specifies the maximum size of the data that can be stored
+  in a znode. Value should be same or less than jute.maxbuffer configured
+  in zookeeper. Default value configured is 1MB.
+yarn.resourcemanager.zk-max-znode-size.bytes
+1048576
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/35] hadoop git commit: HADOOP-14594. ITestS3AFileOperationCost::testFakeDirectoryDeletion to uncomment metric assertions. Contributed by Mingliang Liu

2017-06-30 Thread stevel
HADOOP-14594. ITestS3AFileOperationCost::testFakeDirectoryDeletion to uncomment 
metric assertions. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc4dfe9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc4dfe9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc4dfe9c

Branch: refs/heads/HADOOP-13345
Commit: bc4dfe9c9cb31f39e8f1532d5c5837faf2e92bde
Parents: 94e39c6
Author: Mingliang Liu 
Authored: Mon Jun 26 16:36:39 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Jun 27 13:19:14 2017 -0700

--
 .../hadoop/fs/s3a/ITestS3AFileOperationCost.java| 16 ++--
 1 file changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4dfe9c/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
index 7fb54b1..00171f0 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
@@ -199,21 +199,20 @@ public class ITestS3AFileOperationCost extends 
AbstractS3ATestBase {
 new MetricDiff(fs, Statistic.DIRECTORIES_CREATED);
 
 Path srcDir = new Path(srcBaseDir, "1/2/3/4/5/6");
-Path srcFilePath = new Path(srcDir, "source.txt");
 int srcDirDepth = directoriesInPath(srcDir);
 // one dir created, one removed
 mkdirs(srcDir);
 String state = "after mkdir(srcDir)";
 directoriesCreated.assertDiffEquals(state, 1);
-/*  TODO: uncomment once HADOOP-13222 is in
 deleteRequests.assertDiffEquals(state, 1);
 directoriesDeleted.assertDiffEquals(state, 0);
-fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth);
-*/
+// HADOOP-14255 deletes unnecessary fake directory objects in mkdirs()
+fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth - 1);
 reset(deleteRequests, directoriesCreated, directoriesDeleted,
 fakeDirectoriesDeleted);
 
 // creating a file should trigger demise of the src dir
+final Path srcFilePath = new Path(srcDir, "source.txt");
 touch(fs, srcFilePath);
 state = "after touch(fs, srcFilePath)";
 deleteRequests.assertDiffEquals(state, 1);
@@ -232,12 +231,9 @@ public class ITestS3AFileOperationCost extends 
AbstractS3ATestBase {
 
 int destDirDepth = directoriesInPath(destDir);
 directoriesCreated.assertDiffEquals(state, 1);
-/*  TODO: uncomment once HADOOP-13222 "s3a.mkdirs() to delete empty fake 
parent directories"
-is in
-deleteRequests.assertDiffEquals(state,1);
-directoriesDeleted.assertDiffEquals(state,0);
-fakeDirectoriesDeleted.assertDiffEquals(state,destDirDepth);
-*/
+deleteRequests.assertDiffEquals(state, 1);
+directoriesDeleted.assertDiffEquals(state, 0);
+fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth - 1);
 reset(deleteRequests, directoriesCreated, directoriesDeleted,
 fakeDirectoriesDeleted);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/35] hadoop git commit: YARN-6738. LevelDBCacheTimelineStore should reuse ObjectMapper instances. Contributed by Zoltan Haindrich

2017-06-30 Thread stevel
YARN-6738. LevelDBCacheTimelineStore should reuse ObjectMapper instances. 
Contributed by Zoltan Haindrich


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63ce1593
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63ce1593
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63ce1593

Branch: refs/heads/HADOOP-13345
Commit: 63ce1593c5b78eb172773e7498d9c321debe81e8
Parents: bc4dfe9
Author: Jason Lowe 
Authored: Tue Jun 27 17:12:42 2017 -0500
Committer: Jason Lowe 
Committed: Tue Jun 27 17:12:42 2017 -0500

--
 .../hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63ce1593/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
index ccf2d94..7379dd6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
@@ -286,6 +286,7 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
 }
   };
 }
+static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
 
 @SuppressWarnings("unchecked")
 private V getEntityForKey(byte[] key) throws IOException {
@@ -293,8 +294,7 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
   if (resultRaw == null) {
 return null;
   }
-  ObjectMapper entityMapper = new ObjectMapper();
-  return (V) entityMapper.readValue(resultRaw, TimelineEntity.class);
+  return (V) OBJECT_MAPPER.readValue(resultRaw, TimelineEntity.class);
 }
 
 private byte[] getStartTimeKey(K entityId) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/35] hadoop git commit: MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. Contributed by Nathan Roberts.

2017-06-30 Thread stevel
MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. 
Contributed by Nathan Roberts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5c0476a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5c0476a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5c0476a

Branch: refs/heads/HADOOP-13345
Commit: a5c0476a990ec1e7eb34ce2462a45aa52cc1350d
Parents: 7e031c2
Author: Akira Ajisaka 
Authored: Wed Jun 28 10:50:09 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Jun 28 10:50:09 2017 +0900

--
 .../v2/app/rm/RMContainerAllocator.java |  6 +-
 .../v2/app/rm/TestRMContainerAllocator.java | 73 ++--
 2 files changed, 73 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c0476a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 1f88a2c..0952797 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -919,7 +919,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 
   private void applyConcurrentTaskLimits() {
 int numScheduledMaps = scheduledRequests.maps.size();
-if (maxRunningMaps > 0 && numScheduledMaps > 0) {
+if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
+getJob().getTotalMaps() > maxRunningMaps) {
   int maxRequestedMaps = Math.max(0,
   maxRunningMaps - assignedRequests.maps.size());
   int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
@@ -936,7 +937,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 }
 
 int numScheduledReduces = scheduledRequests.reduces.size();
-if (maxRunningReduces > 0 && numScheduledReduces > 0) {
+if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
+getJob().getTotalReduces() > maxRunningReduces) {
   int maxRequestedReduces = Math.max(0,
   maxRunningReduces - assignedRequests.reduces.size());
   int reduceRequestLimit = Math.min(maxRequestedReduces,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c0476a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 933bd01..8879362 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -2782,14 +2782,77 @@ public class TestRMContainerAllocator {
   }
 
   @Test
+  public void testConcurrentTaskLimitsDisabledIfSmaller() throws Exception {
+final int MAP_COUNT = 1;
+final int REDUCE_COUNT = 1;
+final int MAP_LIMIT = 1;
+final int REDUCE_LIMIT = 1;
+Configuration conf = new Configuration();
+conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
+conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
+conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
+ApplicationId appId = ApplicationId.newInstance(1, 1);
+ApplicationAttemptId appAttemptId =
+ApplicationAttemptId.newInstance(appId, 1);
+JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+Job mockJob = mock(Job.class);
+when(mockJob.getReport()).thenReturn(
+MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+

[18/35] hadoop git commit: Add -E option in 'ls' to list erasure coding policy of each file and directory if applicable. Contributed by luhuichun via lei.

2017-06-30 Thread stevel
Add -E option in 'ls' to list erasure coding policy of each file and directory 
if applicable. Contributed by luhuichun via lei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6df0fdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6df0fdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6df0fdb

Branch: refs/heads/HADOOP-13345
Commit: d6df0fdbbda42b4ddab3810b5ac57336c6241ba7
Parents: 25d891a
Author: Lei Xu 
Authored: Wed Jun 28 13:47:23 2017 -0700
Committer: Lei Xu 
Committed: Wed Jun 28 13:47:23 2017 -0700

--
 .../java/org/apache/hadoop/fs/shell/Ls.java | 63 +++-
 .../src/site/markdown/FileSystemShell.md|  4 +-
 .../src/test/resources/testConf.xml |  6 +-
 .../test/resources/testErasureCodingConf.xml| 34 +++
 4 files changed, 89 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6df0fdb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 47e87f5..221b3cb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ContentSummary;
 
 /**
  * Get a listing of all files in that match the file patterns.
@@ -54,13 +55,14 @@ class Ls extends FsCommand {
   private static final String OPTION_MTIME = "t";
   private static final String OPTION_ATIME = "u";
   private static final String OPTION_SIZE = "S";
+  private static final String OPTION_ECPOLICY = "e";
 
   public static final String NAME = "ls";
   public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" +
   OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" +
   OPTION_HIDENONPRINTABLE + "] [-" + OPTION_RECURSIVE + "] [-" +
   OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" +
-  OPTION_ATIME + "] [ ...]";
+  OPTION_ATIME + "] [-" + OPTION_ECPOLICY +"] [ ...]";
 
   public static final String DESCRIPTION =
   "List the contents that match the specified file pattern. If " +
@@ -91,7 +93,9 @@ class Ls extends FsCommand {
   "  Reverse the order of the sort.\n" +
   "  -" + OPTION_ATIME +
   "  Use time of last access instead of modification for\n" +
-  "  display and sorting.";
+  "  display and sorting.\n"+
+  "  -" + OPTION_ECPOLICY +
+  "  Display the erasure coding policy of files and directories.\n";
 
   protected final SimpleDateFormat dateFormat =
 new SimpleDateFormat("-MM-dd HH:mm");
@@ -104,6 +108,7 @@ class Ls extends FsCommand {
   private boolean orderTime;
   private boolean orderSize;
   private boolean useAtime;
+  private boolean displayECPolicy;
   private Comparator orderComparator;
 
   protected boolean humanReadable = false;
@@ -129,7 +134,7 @@ class Ls extends FsCommand {
 CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
 OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
 OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
-OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
+OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
 cf.parse(args);
 pathOnly = cf.getOpt(OPTION_PATHONLY);
 dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
@@ -140,6 +145,7 @@ class Ls extends FsCommand {
 orderTime = cf.getOpt(OPTION_MTIME);
 orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
 useAtime = cf.getOpt(OPTION_ATIME);
+displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
 if (args.isEmpty()) args.add(Path.CUR_DIR);
 
 initialiseOrderComparator();
@@ -245,25 +251,42 @@ class Ls extends FsCommand {
   return;
 }
 FileStatus stat = item.stat;
-String line = String.format(lineFormat,
-(stat.isDirectory() ? "d" : "-"),
-stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
-(stat.isFile() ? stat.getReplication() : "-"),
-stat.getOwner(),
-stat.getGroup(),
-formatSize(stat.getLen()),
-dateFormat.format(new Date(isUseAtime()
-? stat.getAccessTime()
-: stat.getModificationTime())),
-isHideNonPrintable() ? new 

[02/35] hadoop git commit: HDFS-11956. Do not require a storage ID or target storage IDs when writing a block. Contributed by Ewan Higgs.

2017-06-30 Thread stevel
HDFS-11956. Do not require a storage ID or target storage IDs when writing a 
block. Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c367b46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c367b46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c367b46

Branch: refs/heads/HADOOP-13345
Commit: 2c367b464c86a7d67a2b8dd82ae804d169957573
Parents: 06c8ca3
Author: Andrew Wang 
Authored: Mon Jun 26 11:20:07 2017 -0700
Committer: Andrew Wang 
Committed: Mon Jun 26 11:20:07 2017 -0700

--
 .../token/block/BlockTokenSecretManager.java|  1 +
 .../hdfs/server/datanode/DataXceiver.java   | 17 +++---
 .../security/token/block/TestBlockToken.java| 35 
 3 files changed, 48 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c367b46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 8be22d9..da830a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -112,6 +112,7 @@ public class BlockTokenSecretManager extends
* @param blockPoolId block pool ID
* @param encryptionAlgorithm encryption algorithm to use
* @param numNNs number of namenodes possible
+   * @param useProto should we use new protobuf style tokens
*/
   public BlockTokenSecretManager(long keyUpdateInterval,
   long tokenLifetime, int nnIndex, int numNNs,  String blockPoolId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c367b46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index d42e330..8ffd3a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -695,11 +695,18 @@ class DataXceiver extends Receiver implements Runnable {
 if (targetStorageTypes.length > 0) {
   System.arraycopy(targetStorageTypes, 0, storageTypes, 1, nst);
 }
-int nsi = targetStorageIds.length;
-String[] storageIds = new String[nsi + 1];
-storageIds[0] = storageId;
-if (targetStorageTypes.length > 0) {
-  System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
+
+// To support older clients, we don't pass in empty storageIds
+final int nsi = targetStorageIds.length;
+final String[] storageIds;
+if (nsi > 0) {
+  storageIds = new String[nsi + 1];
+  storageIds[0] = storageId;
+  if (targetStorageTypes.length > 0) {
+System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
+  }
+} else {
+  storageIds = new String[0];
 }
 checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
 BlockTokenIdentifier.AccessMode.WRITE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c367b46/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 747f295..aaddb36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -774,4 +774,39 @@ public class TestBlockToken {
 testBlockTokenSerialization(false);
 testBlockTokenSerialization(true);
   }
+
+  private void testBadStorageIDCheckAccess(boolean enableProtobuf)
+  throws IOException {
+BlockTokenSecretManager sm = new BlockTokenSecretManager(
+blockKeyUpdateInterval, blockTokenLifetime, 0, 

[35/35] hadoop git commit: Merge branch 'trunk' into HADOOP-13345

2017-06-30 Thread stevel
Merge branch 'trunk' into HADOOP-13345


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf36cbd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf36cbd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf36cbd3

Branch: refs/heads/HADOOP-13345
Commit: cf36cbd35677cdd18b0f07ed52e1a78215113299
Parents: 886d680 3be2659
Author: Steve Loughran 
Authored: Fri Jun 30 15:35:20 2017 +0100
Committer: Steve Loughran 
Committed: Fri Jun 30 15:35:20 2017 +0100

--
 dev-support/bin/create-release  |  36 +++-
 hadoop-assemblies/pom.xml   |   4 +-
 hadoop-build-tools/pom.xml  |   2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml |   4 +-
 .../hadoop-client-check-invariants/pom.xml  |   4 +-
 .../hadoop-client-check-test-invariants/pom.xml |   4 +-
 .../hadoop-client-integration-tests/pom.xml |   4 +-
 .../hadoop-client-minicluster/pom.xml   |   4 +-
 .../hadoop-client-runtime/pom.xml   |   4 +-
 hadoop-client-modules/hadoop-client/pom.xml |   4 +-
 hadoop-client-modules/pom.xml   |   2 +-
 .../hadoop-cloud-storage/pom.xml|   4 +-
 hadoop-cloud-storage-project/pom.xml|   4 +-
 .../hadoop-annotations/pom.xml  |   4 +-
 .../hadoop-auth-examples/pom.xml|   4 +-
 hadoop-common-project/hadoop-auth/pom.xml   |   4 +-
 hadoop-common-project/hadoop-common/pom.xml |   4 +-
 .../java/org/apache/hadoop/fs/shell/Ls.java |  63 --
 .../erasurecode/coder/ErasureEncodingStep.java  |   2 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |   1 -
 .../hadoop/io/erasurecode/jni_xor_decoder.c |   5 +-
 .../hadoop/io/erasurecode/jni_xor_encoder.c |   2 +-
 .../src/site/markdown/FileSystemShell.md|   4 +-
 .../erasurecode/TestCodecRawCoderMapping.java   |  15 +-
 .../src/test/resources/testConf.xml |   6 +-
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../src/main/conf/kms-log4j.properties  |   5 +-
 hadoop-common-project/hadoop-minikdc/pom.xml|   4 +-
 hadoop-common-project/hadoop-nfs/pom.xml|   4 +-
 hadoop-common-project/pom.xml   |   4 +-
 hadoop-dist/pom.xml |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  19 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   5 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  22 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  17 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   5 +-
 .../hdfs/server/protocol/SlowDiskReports.java   |   5 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   4 +
 .../src/main/proto/erasurecoding.proto  |  14 ++
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |   4 +-
 .../hadoop-hdfs-native-client/pom.xml   |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |   4 +-
 .../dev-support/findbugsExcludeFile.xml |   5 -
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   4 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  28 +++
 .../hdfs/qjournal/server/JournalNode.java   |  16 +-
 .../token/block/BlockTokenSecretManager.java|   1 +
 .../server/blockmanagement/BlockManager.java|  13 +-
 .../blockmanagement/DatanodeDescriptor.java |   4 +-
 .../server/blockmanagement/DatanodeManager.java |   1 +
 .../PendingReconstructionBlocks.java|   8 +-
 .../hdfs/server/common/HdfsServerConstants.java |   7 +-
 .../hdfs/server/datanode/DataStorage.java   |  12 +-
 .../hdfs/server/datanode/DataXceiver.java   |  17 +-
 .../hdfs/server/datanode/DiskBalancer.java  |  12 +-
 .../namenode/ErasureCodingPolicyManager.java|  56 ++
 .../server/namenode/FSDirErasureCodingOp.java   |  12 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  56 +-
 .../namenode/NNStorageRetentionManager.java |  27 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../namenode/metrics/NameNodeMetrics.java   |  18 ++
 .../namenode/snapshot/SnapshotDiffInfo.java |   5 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   6 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  99 +-
 .../offlineImageViewer/ImageLoaderCurrent.java  |  10 +-
 .../src/site/markdown/HDFSCommands.md   |   4 +
 .../src/site/markdown/HDFSErasureCoding.md  |  20 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  48 +
 .../hadoop/hdfs/TestSnapshotCommands.java   |  50 +
 .../security/token/block/TestBlockToken.java|  35 
 .../blockmanagement/TestDatanodeManager.java|  51 +
 

[06/35] hadoop git commit: HDFS-12045. Add log when Diskbalancer volume is transient storage type. Contributed by steven-wugang.

2017-06-30 Thread stevel
HDFS-12045. Add log when Diskbalancer volume is transient storage type. 
Contributed by steven-wugang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8641a2c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8641a2c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8641a2c0

Branch: refs/heads/HADOOP-13345
Commit: 8641a2c08b0ce0f1f23cf2d508baccc4f627a385
Parents: 2b87faf
Author: Anu Engineer 
Authored: Tue Jun 27 00:39:47 2017 -0700
Committer: Anu Engineer 
Committed: Tue Jun 27 00:39:47 2017 -0700

--
 .../hadoop/hdfs/server/datanode/DiskBalancer.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8641a2c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 0c75001..6b2cd52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -1011,15 +1011,19 @@ public class DiskBalancer {
 return;
   }
 
+  if (source.isTransientStorage() || dest.isTransientStorage()) {
+final String errMsg = "Disk Balancer - Unable to support " +
+"transient storage type.";
+LOG.error(errMsg);
+item.setErrMsg(errMsg);
+return;
+  }
+
   List poolIters = new LinkedList<>();
   startTime = Time.now();
   item.setStartTime(startTime);
   secondsElapsed = 0;
 
-  if (source.isTransientStorage() || dest.isTransientStorage()) {
-return;
-  }
-
   try {
 openPoolIters(source, poolIters);
 if (poolIters.size() == 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/35] hadoop git commit: YARN-5311. Document graceful decommission CLI and usage. Contributed by Elek, Marton.

2017-06-30 Thread stevel
YARN-5311. Document graceful decommission CLI and usage. Contributed by Elek, 
Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e3eebc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e3eebc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e3eebc9

Branch: refs/heads/HADOOP-13345
Commit: 4e3eebc943835077e3dd0df9e0b9239ae604cb89
Parents: 990aa34
Author: Junping Du 
Authored: Wed Jun 28 15:32:04 2017 -0700
Committer: Junping Du 
Committed: Wed Jun 28 15:32:04 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/site/markdown/GracefulDecommission.md   | 168 +++
 .../src/site/markdown/ResourceManagerRest.md|   2 +-
 3 files changed, 173 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3eebc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ca71d35..a6d3360 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -831,6 +831,10 @@ public class YarnConfiguration extends Configuration {
   RM_PREFIX + "nodemanager-graceful-decommission-timeout-secs";
   public static final int DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT = 3600;
 
+  /**
+   * Period in seconds of the poll timer task inside 
DecommissioningNodesWatcher
+   * to identify and take care of DECOMMISSIONING nodes missing regular heart 
beat.
+   */
   public static final String RM_DECOMMISSIONING_NODES_WATCHER_POLL_INTERVAL =
   RM_PREFIX + "decommissioning-nodes-watcher.poll-interval-secs";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e3eebc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
new file mode 100644
index 000..2acb3d2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md
@@ -0,0 +1,168 @@
+
+
+
+Graceful Decommission of Yarn Nodes
+===
+
+* [Overview](#overview)
+* [Features](#features)
+  * [NodesListManager detects and handles include and exclude list 
changes](#nodeslistmanager-detects-and-handles-include-and-exclude-list-changes)
+  * [RMNode handles decommission events](#rmnode-handles-decommission-events)
+  * [Automatic and asynchronous tracking of decommissioning nodes 
status](#automatic-and-asynchronous-tracking-of-decommissioning-nodes-status)
+  * [Per-Node decommission timeout 
support](#per-node-decommission-timeout-support)
+* [Configuration](#configuration)
+
+
+
+Overview
+
+
+Yarn is scalable very easily: any new NodeManager could join to the configured 
ResourceManager and start to execute jobs. But to achieve full elasticity we 
need a decommissioning process which helps to remove existing nodes and 
down-scale the cluster.
+
+Yarn Nodes could be decommissioned NORMAL or GRACEFUL.
+
+Normal Decommission of Yarn Nodes means an immediate shutdown.
+
+Graceful Decommission of Yarn Nodes is the mechanism to decommission NMs while 
minimize the impact to running applications. Once a node is in DECOMMISSIONING 
state, RM won't schedule new containers on it and will wait for running 
containers and applications to complete (or until decommissioning timeout 
exceeded) before transition the node into DECOMMISSIONED.
+
+## Quick start
+
+To do a normal decommissioning:
+
+1. Start a Yarn cluster (with NodeManageres and ResourceManager)
+2. Start a yarn job (for example with `yarn jar...` )
+3. Add `yarn.resourcemanager.nodes.exclude-path` property to your 
`yarn-site.xml` (Note: you don't need to restart the ResourceManager)
+4. Create a text file (the location is defined in the previous step) with one 
line which contains the name of a selected NodeManager 
+5. Call `./bin/yarn rmadmin  -refreshNodes`
+6. Result: The nodemanager is decommissioned *immediately*
+
+In the next sections we will cover some more detailed usage (for example: 
using graceful 

[04/35] hadoop git commit: HDFS-12033. DatanodeManager picking EC recovery tasks should also consider the number of regular replication tasks. Contributed by Lei (Eddy) Xu.

2017-06-30 Thread stevel
HDFS-12033. DatanodeManager picking EC recovery tasks should also consider the 
number of regular replication tasks. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/144753e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/144753e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/144753e8

Branch: refs/heads/HADOOP-13345
Commit: 144753e87f4a9daa51200be05ff2bb760bf38169
Parents: a9d3412
Author: Lei Xu 
Authored: Mon Jun 26 15:43:50 2017 -0700
Committer: Lei Xu 
Committed: Mon Jun 26 15:43:50 2017 -0700

--
 .../server/blockmanagement/DatanodeManager.java |  1 +
 .../blockmanagement/TestDatanodeManager.java| 51 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/144753e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a786c6a..1d09751 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1661,6 +1661,7 @@ public class DatanodeManager {
 if (pendingList != null) {
   cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
   pendingList));
+  maxTransfers -= pendingList.size();
 }
 // check pending erasure coding tasks
 List pendingECList = nodeinfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/144753e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 30e2aaf..de002f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -44,13 +44,21 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
+import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
@@ -491,4 +499,47 @@ public class TestDatanodeManager {
 Assert.assertEquals("Unexpected host or host in unexpected position",
 "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
   }
+
+  @Test
+  public void testPendingRecoveryTasks() throws IOException {
+FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+Configuration conf = new Configuration();
+DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));
+
+int maxTransfers = 20;
+int numPendingTasks = 7;
+int numECTasks = maxTransfers - numPendingTasks;
+
+DatanodeDescriptor nodeInfo = Mockito.mock(DatanodeDescriptor.class);
+Mockito.when(nodeInfo.isRegistered()).thenReturn(true);
+

[34/35] hadoop git commit: YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. Contributed by Jian He

2017-06-30 Thread stevel
YARN-6694. Add certain envs to the default yarn.nodemanager.env-whitelist. 
Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be2659f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be2659f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be2659f

Branch: refs/heads/HADOOP-13345
Commit: 3be2659f83965a312d1095f03b7a95c7781c10af
Parents: af2773f
Author: Xuan 
Authored: Thu Jun 29 20:10:35 2017 -0700
Committer: Xuan 
Committed: Thu Jun 29 20:10:35 2017 -0700

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be2659f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cbd5345..81c9cb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1013,7 +1013,7 @@
   
 Environment variables that containers may override rather 
than use NodeManager's default.
 yarn.nodemanager.env-whitelist
-
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME
+
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/35] hadoop git commit: Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin."

2017-06-30 Thread stevel
Revert "HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by 
Yiqun Lin."

This reverts commit 89a8edc0149e3f31a5ade9a0927c4b6332cf6b1a.

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/441378e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/441378e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/441378e7

Branch: refs/heads/HADOOP-13345
Commit: 441378e7e4609d89b7181dacc8ba92b253a962df
Parents: 5a75f73
Author: Andrew Wang 
Authored: Thu Jun 29 13:54:16 2017 -0700
Committer: Andrew Wang 
Committed: Thu Jun 29 13:54:16 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 ++---
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml |  5 
 .../hdfs/qjournal/server/JournalNode.java   | 16 +---
 .../hdfs/server/common/HdfsServerConstants.java |  7 +
 .../hdfs/server/datanode/DataStorage.java   | 12 +++--
 .../namenode/NNStorageRetentionManager.java | 27 +---
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +++-
 9 files changed, 33 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 1f6022c..8acda61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2883,12 +2883,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 synchronized (DFSClient.class) {
   if (STRIPED_READ_THREAD_POOL == null) {
-// Only after thread pool is fully constructed then save it to
-// volatile field.
-ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
+STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
 numThreads, 60, "StripedRead-", true);
-threadPool.allowCoreThreadTimeOut(true);
-STRIPED_READ_THREAD_POOL = threadPool;
+STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 496389a..8095c2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,9 +101,8 @@ public final class SlowDiskReports {
 }
 
 boolean areEqual;
-for (Map.Entry> entry : this.slowDisks
-.entrySet()) {
-  if (!entry.getValue().equals(that.slowDisks.get(entry.getKey( {
+for (String disk : this.slowDisks.keySet()) {
+  if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 9270990..be54efb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -252,9 +252,4 @@
 
 
 
-
-
-
-
-
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/441378e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
--
diff --git 

[01/35] hadoop git commit: HDFS-12032. Inaccurate comment on DatanodeDescriptor#getNumberOfBlocksToBeErasureCoded.

2017-06-30 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 886d680e1 -> cf36cbd35


HDFS-12032. Inaccurate comment on 
DatanodeDescriptor#getNumberOfBlocksToBeErasureCoded.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06c8ca3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06c8ca3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06c8ca3b

Branch: refs/heads/HADOOP-13345
Commit: 06c8ca3bb330c1763d31ed37309e7552dcd3e7aa
Parents: 48f4a22
Author: Andrew Wang 
Authored: Mon Jun 26 10:54:01 2017 -0700
Committer: Andrew Wang 
Committed: Mon Jun 26 10:54:01 2017 -0700

--
 .../hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06c8ca3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 4b87fd4..57348a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -644,14 +644,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /**
-   * The number of work items that are pending to be replicated
+   * The number of work items that are pending to be replicated.
*/
   int getNumberOfBlocksToBeReplicated() {
 return pendingReplicationWithoutTargets + replicateBlocks.size();
   }
 
   /**
-   * The number of work items that are pending to be replicated
+   * The number of work items that are pending to be reconstructed.
*/
   @VisibleForTesting
   public int getNumberOfBlocksToBeErasureCoded() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/35] hadoop git commit: HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused (Contributed by Chen Liang via Daniel Templeton)

2017-06-30 Thread stevel
HADOOP-14611. NetworkTopology.DEFAULT_HOST_LEVEL is unused
(Contributed by Chen Liang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a75f738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a75f738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a75f738

Branch: refs/heads/HADOOP-13345
Commit: 5a75f73893567151f525950cc1a15b3f1bfeac26
Parents: b08cc97
Author: Daniel Templeton 
Authored: Thu Jun 29 12:28:43 2017 -0700
Committer: Daniel Templeton 
Committed: Thu Jun 29 12:28:43 2017 -0700

--
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a75f738/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 1018d58..278bf72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -45,7 +45,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceStability.Unstable
 public class NetworkTopology {
   public final static String DEFAULT_RACK = "/default-rack";
-  public final static int DEFAULT_HOST_LEVEL = 2;
   public static final Logger LOG =
   LoggerFactory.getLogger(NetworkTopology.class);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/35] hadoop git commit: HDFS-12040. TestFsDatasetImpl.testCleanShutdownOfVolume fails. Contributed by hu xiaodong.

2017-06-30 Thread stevel
HDFS-12040. TestFsDatasetImpl.testCleanShutdownOfVolume fails. Contributed by 
hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07defa4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07defa4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07defa4c

Branch: refs/heads/HADOOP-13345
Commit: 07defa4c09db6e8c552167019ca0d2444cfc8fe7
Parents: 8641a2c
Author: Akira Ajisaka 
Authored: Tue Jun 27 20:49:10 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Jun 27 20:49:26 2017 +0900

--
 .../hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07defa4c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 2a3bf79..a30329c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -687,7 +687,7 @@ public class TestFsDatasetImpl {
   @Override public Boolean get() {
   return volume.getReferenceCount() == 0;
 }
-  }, 100, 10);
+  }, 100, 1000);
   assertThat(dataNode.getFSDataset().getNumFailedVolumes(), is(1));
 
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/35] hadoop git commit: HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.

2017-06-30 Thread stevel
HDFS-12043. Add counters for block re-replication. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900221f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900221f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900221f9

Branch: refs/heads/HADOOP-13345
Commit: 900221f95ea9fe1936b4d5f277e6047ee8734eca
Parents: 72993b3
Author: Arpit Agarwal 
Authored: Thu Jun 29 17:15:13 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 29 17:15:13 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 13 ++-
 .../PendingReconstructionBlocks.java|  8 +-
 .../namenode/metrics/NameNodeMetrics.java   | 18 
 .../TestPendingReconstruction.java  | 86 +++-
 4 files changed, 118 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a0c4698..a5ee30b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
 (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
   int priority) {
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be reconstructed from any node
   LOG.debug("Block {} cannot be reconstructed from any node", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
   neededReconstruction.remove(block, priority);
   blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
   " it has enough replicas", block);
+  NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
   return null;
 }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   if (pendingNum > 0) {
 // Wait the previous reconstruction to finish.
+NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
 return null;
   }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
* The given node is reporting that it received a certain block.
*/
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-  throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+  String delHint) throws IOException {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
 BlockInfo storedBlock = getStoredBlock(block);
 if (storedBlock != null &&
 block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-  pendingReconstruction.decrement(storedBlock, node);
+  if (pendingReconstruction.decrement(storedBlock, node)) {
+NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+  }
 }
 processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
 delHintNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900221f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 2221d1d..0f20daa 100644
--- 

[30/35] hadoop git commit: YARN-6751. Display reserved resources in web UI per queue (Contributed by Abdullah Yousufi via Daniel Templeton)

2017-06-30 Thread stevel
YARN-6751. Display reserved resources in web UI per queue
(Contributed by Abdullah Yousufi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec975197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec975197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec975197

Branch: refs/heads/HADOOP-13345
Commit: ec975197799417a1d5727dedc395fe6c15c30eb2
Parents: 441378e
Author: Daniel Templeton 
Authored: Thu Jun 29 16:52:46 2017 -0700
Committer: Daniel Templeton 
Committed: Thu Jun 29 16:53:50 2017 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/FSQueue.java   | 7 +++
 .../yarn/server/resourcemanager/webapp/FairSchedulerPage.java | 6 --
 .../resourcemanager/webapp/dao/FairSchedulerQueueInfo.java| 6 ++
 3 files changed, 17 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 12b1b83..1016823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -57,6 +57,7 @@ public abstract class FSQueue implements Queue, Schedulable {
 
   private Resource fairShare = Resources.createResource(0, 0);
   private Resource steadyFairShare = Resources.createResource(0, 0);
+  private Resource reservedResource = Resources.createResource(0, 0);
   private final String name;
   protected final FairScheduler scheduler;
   private final YarnAuthorizationProvider authorizer;
@@ -161,6 +162,12 @@ public abstract class FSQueue implements Queue, 
Schedulable {
 this.maxShare = maxShare;
   }
 
+  public Resource getReservedResource() {
+reservedResource.setMemorySize(metrics.getReservedMB());
+reservedResource.setVirtualCores(metrics.getReservedVirtualCores());
+return reservedResource;
+  }
+
   @Override
   public Resource getMaxShare() {
 return maxShare;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec975197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 544275e..5f46841 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -75,7 +75,8 @@ public class FairSchedulerPage extends RmView {
   _("Num Active Applications:", qinfo.getNumActiveApplications()).
   _("Num Pending Applications:", qinfo.getNumPendingApplications()).
   _("Min Resources:", qinfo.getMinResources().toString()).
-  _("Max Resources:", qinfo.getMaxResources().toString());
+  _("Max Resources:", qinfo.getMaxResources().toString()).
+  _("Reserved Resources:", qinfo.getReservedResources().toString());
   int maxApps = qinfo.getMaxApplications();
   if (maxApps < Integer.MAX_VALUE) {
   ri._("Max Running Applications:", qinfo.getMaxApplications());
@@ -103,7 +104,8 @@ public class FairSchedulerPage extends RmView {
   ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
   _("Used Resources:", qinfo.getUsedResources().toString()).
   _("Min Resources:", qinfo.getMinResources().toString()).
- 

[15/35] hadoop git commit: HADOOP-14190. Add more on S3 regions to the s3a documentation. Contributed by Steve Loughran

2017-06-30 Thread stevel
HADOOP-14190. Add more on S3 regions to the s3a documentation.
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee243e52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee243e52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee243e52

Branch: refs/heads/HADOOP-13345
Commit: ee243e5289212aa2912d191035802ea023367e19
Parents: fb5ee3f
Author: Steve Loughran 
Authored: Wed Jun 28 10:22:13 2017 +0100
Committer: Steve Loughran 
Committed: Wed Jun 28 10:22:13 2017 +0100

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 109 +++
 .../hadoop-aws/src/test/resources/core-site.xml |  81 ++
 2 files changed, 168 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee243e52/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 8c8df1b..182f060 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -29,7 +29,9 @@ HADOOP_OPTIONAL_TOOLS in hadoop-env.sh has 'hadoop-aws' in 
the list.
 
 ### Features
 
-**NOTE: `s3:` has been phased out. Use `s3n:` or `s3a:` instead.**
+**NOTE: `s3:` has been phased out; `s3n:`, while
+distributed should now be considered deprecated.
+Please use `s3a:` as the connector to data hosted in S3.**
 
 1. The second-generation, `s3n:` filesystem, making it easy to share
 data between hadoop and other applications via the S3 object store.
@@ -892,7 +894,7 @@ from placing its declaration on the command line.
   any call to setReadahead() is made to an open stream.
 
 
-### Configurations different S3 buckets
+### Configuring different S3 buckets
 
 Different S3 buckets can be accessed with different S3A client configurations.
 This allows for different endpoints, data read and write strategies, as well
@@ -964,10 +966,11 @@ then declare the path to the appropriate credential file 
in
 a bucket-specific version of the property 
`fs.s3a.security.credential.provider.path`.
 
 
-### Working with buckets in different regions
+### Using Per-Bucket Configuration to access data round the world
 
-S3 Buckets are hosted in different regions, the default being US-East.
-The client talks to it by default, under the URL `s3.amazonaws.com`
+S3 Buckets are hosted in different "regions", the default being "US-East".
+The S3A client talks to this region by default, issing HTTP requests
+to the server `s3.amazonaws.com`.
 
 S3A can work with buckets from any region. Each region has its own
 S3 endpoint, documented [by 
Amazon](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region).
@@ -987,50 +990,112 @@ While it is generally simpler to use the default 
endpoint, working with
 V4-signing-only regions (Frankfurt, Seoul) requires the endpoint to be 
identified.
 Expect better performance from direct connections —traceroute will give you 
some insight.
 
-Examples:
+If the wrong endpoint is used, the request may fail. This may be reported as a 
301/redirect error,
+or as a 400 Bad Request: take these as cues to check the endpoint setting of
+a bucket.
 
-The default endpoint:
+Here is a list of properties defining all AWS S3 regions, current as of June 
2017:
 
 ```xml
+
 
-  fs.s3a.endpoint
+  central.endpoint
   s3.amazonaws.com
 
-```
 
-Frankfurt
+
+  canada.endpoint
+  s3.ca-central-1.amazonaws.com
+
 
-```xml
 
-  fs.s3a.endpoint
+  frankfurt.endpoint
   s3.eu-central-1.amazonaws.com
 
-```
 
-Seoul
+
+  ireland.endpoint
+  s3-eu-west-1.amazonaws.com
+
 
-```xml
 
-  fs.s3a.endpoint
+  london.endpoint
+  s3.eu-west-2.amazonaws.com
+
+
+
+  mumbai.endpoint
+  s3.ap-south-1.amazonaws.com
+
+
+
+  ohio.endpoint
+  s3.us-east-2.amazonaws.com
+
+
+
+  oregon.endpoint
+  s3-us-west-2.amazonaws.com
+
+
+
+  sao-paolo.endpoint
+  s3-sa-east-1.amazonaws.com
+
+
+
+  seoul.endpoint
   s3.ap-northeast-2.amazonaws.com
 
-```
 
-If the wrong endpoint is used, the request may fail. This may be reported as a 
301/redirect error,
-or as a 400 Bad Request.
+
+  singapore.endpoint
+  s3-ap-southeast-1.amazonaws.com
+
+
+
+  sydney.endpoint
+  s3-ap-southeast-2.amazonaws.com
+
+
+
+  tokyo.endpoint
+  s3-ap-northeast-1.amazonaws.com
+
+
+
+  virginia.endpoint
+  ${central.endpoint}
+
+```
 
 
-If you are trying to mix endpoints for different buckets, use a per-bucket 
endpoint
-declaration. For example:
+This list can be used to specify the endpoint of individual buckets, for 
example
+for buckets in the central 

[23/35] hadoop git commit: MAPREDUCE-6536. hadoop-pipes doesn't use maven properties for openssl

2017-06-30 Thread stevel
MAPREDUCE-6536. hadoop-pipes doesn't use maven properties for openssl

Signed-off-by: Ravi Prakash 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20ba86d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20ba86d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20ba86d6

Branch: refs/heads/HADOOP-13345
Commit: 20ba86d66a47492aa2488d01c6c7cc4fcbef1673
Parents: c1edca1
Author: Allen Wittenauer 
Authored: Wed Jun 28 11:53:09 2017 -0700
Committer: Allen Wittenauer 
Committed: Wed Jun 28 17:33:44 2017 -0700

--
 hadoop-tools/hadoop-pipes/pom.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ba86d6/hadoop-tools/hadoop-pipes/pom.xml
--
diff --git a/hadoop-tools/hadoop-pipes/pom.xml 
b/hadoop-tools/hadoop-pipes/pom.xml
index 1061d9c..457f3d3 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -39,6 +39,9 @@
   
 false
   
+  
+
+  
   
 
   
@@ -53,6 +56,7 @@
   ${basedir}/src
   
 
${sun.arch.data.model}
+${openssl.prefix} 
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/35] hadoop git commit: HDFS-11870. Add CLI cmd to enable/disable an erasure code policy. Contributed by lufei.

2017-06-30 Thread stevel
HDFS-11870. Add CLI cmd to enable/disable an erasure code policy. Contributed 
by lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f99b6d19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f99b6d19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f99b6d19

Branch: refs/heads/HADOOP-13345
Commit: f99b6d19de77c6e730fed8444f8848a7e63d6130
Parents: ee243e5
Author: Wei-Chiu Chuang 
Authored: Wed Jun 28 10:53:52 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jun 28 10:54:25 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  12 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  22 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  17 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  30 
 .../src/main/proto/ClientNamenodeProtocol.proto |   4 +
 .../src/main/proto/erasurecoding.proto  |  14 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java |  28 
 .../namenode/ErasureCodingPolicyManager.java|  56 +++
 .../server/namenode/FSDirErasureCodingOp.java   |  12 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  56 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  99 +++-
 .../src/site/markdown/HDFSCommands.md   |   4 +
 .../src/site/markdown/HDFSErasureCoding.md  |  12 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  48 ++
 .../test/resources/testErasureCodingConf.xml| 162 +++
 17 files changed, 611 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f99b6d19/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index d114f0f..1f6022c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2783,6 +2783,18 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 namenode.removeErasureCodingPolicy(ecPolicyName);
   }
 
+  public void enableErasureCodingPolicy(String ecPolicyName)
+  throws IOException {
+checkOpen();
+namenode.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  public void disableErasureCodingPolicy(String ecPolicyName)
+  throws IOException {
+checkOpen();
+namenode.disableErasureCodingPolicy(ecPolicyName);
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
 checkOpen();
 return new DFSInotifyEventInputStream(namenode, tracer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f99b6d19/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f8af4ab..34c631a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2619,6 +2619,28 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   /**
+   * Enable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be enabled.
+   * @throws IOException
+   */
+  public void enableErasureCodingPolicy(String ecPolicyName)
+  throws IOException {
+dfs.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  /**
+   * Disable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be disabled.
+   * @throws IOException
+   */
+  public void disableErasureCodingPolicy(String ecPolicyName)
+  throws IOException {
+dfs.disableErasureCodingPolicy(ecPolicyName);
+  }
+
+  /**
* Unset the erasure coding policy from the source path.
*
* @param path The directory to unset the policy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f99b6d19/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
--
diff --git 

[20/35] hadoop git commit: HADOOP-14609. NPE in AzureNativeFileSystemStore.checkContainer() if StorageException lacks an error code. Contributed by Steve Loughran

2017-06-30 Thread stevel
HADOOP-14609. NPE in AzureNativeFileSystemStore.checkContainer() if 
StorageException lacks an error code. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/990aa34d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/990aa34d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/990aa34d

Branch: refs/heads/HADOOP-13345
Commit: 990aa34de23c625163745ebc338483065d955bbe
Parents: e9d8bdf
Author: Mingliang Liu 
Authored: Wed Jun 28 14:18:59 2017 -0700
Committer: Mingliang Liu 
Committed: Wed Jun 28 14:18:59 2017 -0700

--
 .../hadoop/fs/azure/AzureNativeFileSystemStore.java   | 10 +-
 .../org/apache/hadoop/fs/azure/SelfRenewingLease.java |  4 ++--
 .../apache/hadoop/fs/azure/TestBlobDataValidation.java|  6 +++---
 3 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/990aa34d/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index d026220..5fa964a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -1194,8 +1194,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 container.downloadAttributes(getInstrumentedContext());
 currentKnownContainerState = ContainerState.Unknown;
   } catch (StorageException ex) {
-if (ex.getErrorCode().equals(
-StorageErrorCode.RESOURCE_NOT_FOUND.toString())) {
+if (StorageErrorCode.RESOURCE_NOT_FOUND.toString()
+.equals(ex.getErrorCode())) {
   currentKnownContainerState = ContainerState.DoesntExist;
 } else {
   throw ex;
@@ -1596,7 +1596,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   if (t != null && t instanceof StorageException) {
 StorageException se = (StorageException) t;
 // If we got this exception, the blob should have already been created
-if (!se.getErrorCode().equals("LeaseIdMissing")) {
+if (!"LeaseIdMissing".equals(se.getErrorCode())) {
   throw new AzureException(e);
 }
   } else {
@@ -2427,7 +2427,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   // 2. It got there after one-or-more retries THEN
   // we swallow the exception.
   if (e.getErrorCode() != null &&
-  e.getErrorCode().equals("BlobNotFound") &&
+  "BlobNotFound".equals(e.getErrorCode()) &&
   operationContext.getRequestResults().size() > 1 &&
   operationContext.getRequestResults().get(0).getException() != null) {
 LOG.debug("Swallowing delete exception on retry: {}", e.getMessage());
@@ -2478,7 +2478,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   Throwable t = e.getCause();
   if(t != null && t instanceof StorageException) {
 StorageException se = (StorageException) t;
-if(se.getErrorCode().equals(("LeaseIdMissing"))){
+if ("LeaseIdMissing".equals(se.getErrorCode())){
   SelfRenewingLease lease = null;
   try {
 lease = acquireLease(key);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/990aa34d/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 76098f3..00d5e99 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -82,7 +82,7 @@ public class SelfRenewingLease {
 // Throw again if we don't want to keep waiting.
 // We expect it to be that the lease is already present,
 // or in some cases that the blob does not exist.
-if (!e.getErrorCode().equals("LeaseAlreadyPresent")) {
+if (!"LeaseAlreadyPresent".equals(e.getErrorCode())) {
   LOG.info(
 "Caught exception when trying to get lease on blob "
 + blobWrapper.getUri().toString() + ". " + 

[22/35] hadoop git commit: YARN-6280. Introduce deselect query param to skip ResourceRequest from getApp/getApps REST API. Contributed by Lantao Jin.

2017-06-30 Thread stevel
YARN-6280. Introduce deselect query param to skip ResourceRequest from 
getApp/getApps REST API. Contributed by Lantao Jin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1edca10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1edca10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1edca10

Branch: refs/heads/HADOOP-13345
Commit: c1edca101c32a5999100bc6031784274d416b599
Parents: 4e3eebc
Author: Sunil G 
Authored: Wed Jun 28 15:40:58 2017 -0700
Committer: Sunil G 
Committed: Wed Jun 28 15:40:58 2017 -0700

--
 .../resourcemanager/webapp/DeSelectFields.java  | 127 +
 .../webapp/RMWebServiceProtocol.java|   7 +-
 .../resourcemanager/webapp/RMWebServices.java   |  18 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  29 ++-
 .../webapp/TestRMWebServices.java   |   4 +-
 .../webapp/TestRMWebServicesApps.java   |  64 +++
 .../src/site/markdown/ResourceManagerRest.md| 178 ++-
 7 files changed, 409 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1edca10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
new file mode 100644
index 000..258bbfa
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DeSelectFields.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+/**
+ * DeSelectFields make the /apps api more flexible.
+ * It can be used to strip off more fields if there's such use case in future.
+ * You can simply extend it via two steps:
+ *  1. add a DeSelectType enum with a string literals
+ *  2. write your logical based on
+ * the return of method contains(DeSelectType)
+ */
+public class DeSelectFields {
+  private static final Log LOG =
+  LogFactory.getLog(DeSelectFields.class.getName());
+
+  private final Set types;
+
+  public DeSelectFields() {
+this.types = new HashSet();
+  }
+
+  /**
+   * Initial DeSelectFields with unselected fields.
+   * @param unselectedFields a set of unselected field.
+   */
+  public void initFields(Set unselectedFields) {
+if (unselectedFields == null) {
+  return;
+}
+for (String field : unselectedFields) {
+  if (!field.trim().isEmpty()) {
+String[] literalsArray = field.split(",");
+for (String literals : literalsArray) {
+  if (literals != null && !literals.trim().isEmpty()) {
+DeSelectType type = DeSelectType.obtainType(literals);
+if (type == null) {
+  LOG.warn("Invalid deSelects string " + literals.trim());
+  DeSelectType[] typeArray = DeSelectType.values();
+  String allSuppportLiterals = Arrays.toString(typeArray);
+  throw new BadRequestException("Invalid deSelects string "
+  + literals.trim() + " specified. It should be one of "
+  + allSuppportLiterals);
+} else {
+  this.types.add(type);
+}
+  }
+}
+  }
+}
+  }
+
+  /**
+   * Determine the 

[27/35] hadoop git commit: HADOOP-14601. Azure: Reuse ObjectMapper. Contributed by Mingliang Liu

2017-06-30 Thread stevel
HADOOP-14601. Azure: Reuse ObjectMapper. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b08cc973
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b08cc973
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b08cc973

Branch: refs/heads/HADOOP-13345
Commit: b08cc973964b4eb7e7a7445a440b19d3a0f3d4d5
Parents: 0c52da7
Author: Mingliang Liu 
Authored: Tue Jun 27 16:27:09 2017 -0700
Committer: Mingliang Liu 
Committed: Thu Jun 29 09:22:00 2017 -0700

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java |  8 +---
 .../apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java | 10 +-
 .../apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java  |  7 ---
 3 files changed, 14 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b08cc973/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index d605e81..22f79ff 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -46,6 +46,7 @@ import com.fasterxml.jackson.core.JsonParseException;
 import com.fasterxml.jackson.core.JsonParser;
 import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -109,6 +110,9 @@ public class NativeAzureFileSystem extends FileSystem {
 private static final int FORMATTING_BUFFER = 1;
 private boolean committed;
 public static final String SUFFIX = "-RenamePending.json";
+private static final ObjectReader READER = new ObjectMapper()
+.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
+.readerFor(JsonNode.class);
 
 // Prepare in-memory information needed to do or redo a folder rename.
 public FolderRenamePending(String srcKey, String dstKey, SelfRenewingLease 
lease,
@@ -168,11 +172,9 @@ public class NativeAzureFileSystem extends FileSystem {
   String contents = new String(bytes, 0, l, Charset.forName("UTF-8"));
 
   // parse the JSON
-  ObjectMapper objMapper = new ObjectMapper();
-  objMapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
   JsonNode json = null;
   try {
-json = objMapper.readValue(contents, JsonNode.class);
+json = READER.readValue(contents);
 this.committed = true;
   } catch (JsonMappingException e) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b08cc973/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 387d911..0e9c700 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -24,6 +24,7 @@ import java.net.URISyntaxException;
 import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 
+import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 import org.apache.hadoop.conf.Configuration;
@@ -56,6 +57,9 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
   public static final Logger LOG =
   LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
 
+  private static final ObjectReader RESPONSE_READER = new ObjectMapper()
+  .readerFor(RemoteSASKeyGenerationResponse.class);
+
   /**
* Container SAS Key generation OP name. {@value}
*/
@@ -276,11 +280,7 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 httpGet.setHeader("Cookie", AuthenticatedURL.AUTH_COOKIE + "=" + 
token);
   }
   String responseBody = remoteCallHelper.makeRemoteGetRequest(httpGet);
-
-  ObjectMapper objectMapper = new ObjectMapper();
-  return