hadoop git commit: YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger via kasha)

2016-04-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 413e5b492 -> ddb140798


YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger 
via kasha)

(cherry picked from commit e82f961a3925aadf9e53a009820a48ba9e4f78b6)
(cherry picked from commit 2b97a50eec8e9f7167a44b8ca0391fce0aae571c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddb14079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddb14079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddb14079

Branch: refs/heads/branch-2.8
Commit: ddb1407980f26c1b8435d566edb43c159abf1060
Parents: 413e5b4
Author: Karthik Kambatla 
Authored: Thu Apr 7 17:05:29 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Apr 7 17:35:06 2016 -0700

--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  1 +
 .../nodemanager/TestNodeManagerResync.java  | 33 +---
 2 files changed, 23 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddb14079/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index ad983fe..72769bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -284,6 +284,7 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 return;
   }
   this.isStopped = true;
+  sendOutofBandHeartBeat();
   try {
 statusUpdater.join();
 registerWithRM();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddb14079/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index e8c4634..b3d44f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -108,6 +108,7 @@ public class TestNodeManagerResync {
   static final String user = "nobody";
   private FileContext localFS;
   private CyclicBarrier syncBarrier;
+  private CyclicBarrier updateBarrier;
   private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
   private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
   private final NodeManagerEvent resyncEvent =
@@ -125,6 +126,7 @@ public class TestNodeManagerResync {
 remoteLogsDir.mkdirs();
 nmLocalDir.mkdirs();
 syncBarrier = new CyclicBarrier(2);
+updateBarrier = new CyclicBarrier(2);
   }
 
   @After
@@ -803,9 +805,11 @@ public class TestNodeManagerResync {
 
.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
 assertEquals(Resource.newInstance(1024, 1),
 containerStatus.getCapability());
+updateBarrier.await();
 // Call the actual rebootNodeStatusUpdaterAndRegisterWithRM().
 // This function should be synchronized with
 // increaseContainersResource().
+updateBarrier.await();
 super.rebootNodeStatusUpdaterAndRegisterWithRM();
 // Check status after registerWithRM
 containerStatus = getContainerManager()
@@ -831,17 +835,24 @@ public class TestNodeManagerResync {
 List increaseTokens = new ArrayList();
 // Add increase request.
 Resource targetResource = Resource.newInstance(4096, 2);
-try {
-  

hadoop git commit: YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger via kasha)

2016-04-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 46352de1a -> 2b97a50ee


YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger 
via kasha)

(cherry picked from commit e82f961a3925aadf9e53a009820a48ba9e4f78b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b97a50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b97a50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b97a50e

Branch: refs/heads/branch-2
Commit: 2b97a50eec8e9f7167a44b8ca0391fce0aae571c
Parents: 46352de
Author: Karthik Kambatla 
Authored: Thu Apr 7 17:05:29 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Apr 7 17:30:54 2016 -0700

--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  1 +
 .../nodemanager/TestNodeManagerResync.java  | 33 +---
 2 files changed, 23 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b97a50e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index ad983fe..72769bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -284,6 +284,7 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 return;
   }
   this.isStopped = true;
+  sendOutofBandHeartBeat();
   try {
 statusUpdater.join();
 registerWithRM();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b97a50e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index e8c4634..b3d44f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -108,6 +108,7 @@ public class TestNodeManagerResync {
   static final String user = "nobody";
   private FileContext localFS;
   private CyclicBarrier syncBarrier;
+  private CyclicBarrier updateBarrier;
   private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
   private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
   private final NodeManagerEvent resyncEvent =
@@ -125,6 +126,7 @@ public class TestNodeManagerResync {
 remoteLogsDir.mkdirs();
 nmLocalDir.mkdirs();
 syncBarrier = new CyclicBarrier(2);
+updateBarrier = new CyclicBarrier(2);
   }
 
   @After
@@ -803,9 +805,11 @@ public class TestNodeManagerResync {
 
.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
 assertEquals(Resource.newInstance(1024, 1),
 containerStatus.getCapability());
+updateBarrier.await();
 // Call the actual rebootNodeStatusUpdaterAndRegisterWithRM().
 // This function should be synchronized with
 // increaseContainersResource().
+updateBarrier.await();
 super.rebootNodeStatusUpdaterAndRegisterWithRM();
 // Check status after registerWithRM
 containerStatus = getContainerManager()
@@ -831,17 +835,24 @@ public class TestNodeManagerResync {
 List increaseTokens = new ArrayList();
 // Add increase request.
 Resource targetResource = Resource.newInstance(4096, 2);
-try {
-  increaseTokens.add(getContainerToken(targetResource));
-  

hadoop git commit: YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger via kasha)

2016-04-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk a62637a41 -> e82f961a3


YARN-4756. Unnecessary wait in Node Status Updater during reboot. (Eric Badger 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e82f961a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e82f961a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e82f961a

Branch: refs/heads/trunk
Commit: e82f961a3925aadf9e53a009820a48ba9e4f78b6
Parents: a62637a
Author: Karthik Kambatla 
Authored: Thu Apr 7 17:05:29 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Apr 7 17:05:29 2016 -0700

--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  1 +
 .../nodemanager/TestNodeManagerResync.java  | 33 +---
 2 files changed, 23 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82f961a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index ad983fe..72769bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -284,6 +284,7 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 return;
   }
   this.isStopped = true;
+  sendOutofBandHeartBeat();
   try {
 statusUpdater.join();
 registerWithRM();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82f961a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index e8c4634..b3d44f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -108,6 +108,7 @@ public class TestNodeManagerResync {
   static final String user = "nobody";
   private FileContext localFS;
   private CyclicBarrier syncBarrier;
+  private CyclicBarrier updateBarrier;
   private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
   private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
   private final NodeManagerEvent resyncEvent =
@@ -125,6 +126,7 @@ public class TestNodeManagerResync {
 remoteLogsDir.mkdirs();
 nmLocalDir.mkdirs();
 syncBarrier = new CyclicBarrier(2);
+updateBarrier = new CyclicBarrier(2);
   }
 
   @After
@@ -803,9 +805,11 @@ public class TestNodeManagerResync {
 
.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
 assertEquals(Resource.newInstance(1024, 1),
 containerStatus.getCapability());
+updateBarrier.await();
 // Call the actual rebootNodeStatusUpdaterAndRegisterWithRM().
 // This function should be synchronized with
 // increaseContainersResource().
+updateBarrier.await();
 super.rebootNodeStatusUpdaterAndRegisterWithRM();
 // Check status after registerWithRM
 containerStatus = getContainerManager()
@@ -831,17 +835,24 @@ public class TestNodeManagerResync {
 List increaseTokens = new ArrayList();
 // Add increase request.
 Resource targetResource = Resource.newInstance(4096, 2);
-try {
-  increaseTokens.add(getContainerToken(targetResource));
-  IncreaseContainersResourceRequest increaseRequest =
-  

[28/50] [abbrv] hadoop git commit: YARN-4769. Add support for CSRF header in the dump capacity scheduler logs and kill app buttons in RM web UI. Contributed by Varun Vasudev

2016-04-07 Thread wangda
YARN-4769. Add support for CSRF header in the dump capacity scheduler logs and 
kill app buttons in RM web UI. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93bacda0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93bacda0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93bacda0

Branch: refs/heads/YARN-3368
Commit: 93bacda08bc546612f9278b31f5c38107867630a
Parents: aede8c1
Author: Jian He 
Authored: Wed Apr 6 16:13:47 2016 -0700
Committer: Jian He 
Committed: Wed Apr 6 16:13:47 2016 -0700

--
 .../security/http/RestCsrfPreventionFilter.java |  2 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 20 
 .../webapp/CapacitySchedulerPage.java   |  2 ++
 3 files changed, 23 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93bacda0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
index c0f7e39..33579b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
@@ -62,7 +62,7 @@ public class RestCsrfPreventionFilter implements Filter {
   public static final String CUSTOM_METHODS_TO_IGNORE_PARAM =
   "methods-to-ignore";
   static final String  BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*";
-  static final String HEADER_DEFAULT = "X-XSRF-HEADER";
+  public static final String HEADER_DEFAULT = "X-XSRF-HEADER";
   static final String  METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE";
   private String  headerName = HEADER_DEFAULT;
   private Set methodsToIgnore = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93bacda0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 44ed223..69beef2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -24,12 +24,14 @@ import static 
org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
+import java.util.Map;
 
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -143,6 +145,7 @@ public class AppBlock extends HtmlBlock {
   .append(" type: 'PUT',")
   .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
   .append(" contentType: 'application/json',")
+  .append(getCSRFHeaderString(conf))
   .append(" data: '{\"state\":\"KILLED\"}',")
   .append(" dataType: 'json'")
   .append(" }).done(function(data){")
@@ -369,4 +372,21 @@ public class AppBlock extends HtmlBlock {
   protected LogAggregationStatus getLogAggregationStatus() {
 return null;
   }
+
+  public static String getCSRFHeaderString(Configuration conf) {
+String ret = "";
+if (conf.getBoolean(YarnConfiguration.RM_CSRF_ENABLED, false)) {
+  ret = " headers : { '";
+  Map filterParams = RestCsrfPreventionFilter
+  .getFilterParams(conf, YarnConfiguration.RM_CSRF_PREFIX);
+  if (filterParams
+  .containsKey(RestCsrfPreventionFilter.CUSTOM_HEADER_PARAM)) {
+ret += filterParams.get(RestCsrfPreventionFilter.CUSTOM_HEADER_PARAM);
+

[43/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
new file mode 100644
index 000..4e68da0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+import Constants from 'yarn-ui/constants';
+
+moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
+});
+
+test('Basic creation test', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+  assert.ok(route.model);
+});
+
+test('Test getting container log', function(assert) {
+  var response = {
+  logs: "This is syslog",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve) {
+resolve(response);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+/**
+ * This can happen when an empty response is sent from server
+ */
+test('Test non HTTP error while getting container log', function(assert) {
+  var error = {};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+test('Test HTTP error while getting container log', function(assert) {
+  var error = {errors: [{status: 404, responseText: 'Not Found'}]};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(5);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.errors);
+ assert.equal(value.errors.length, 1);
+ assert.equal(value.errors[0].status, 404);
+ assert.equal(value.errors[0].responseText, 'Not Found');
+   });
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
--
diff 

[35/50] [abbrv] hadoop git commit: HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. Contributed by Kai Zheng.

2016-04-07 Thread wangda
HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c18a53c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c18a53c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c18a53c

Branch: refs/heads/YARN-3368
Commit: 3c18a53cbd2efabb2ad108d63a0b0b558424115f
Parents: 8d29e24
Author: Uma Maheswara Rao G 
Authored: Wed Apr 6 22:50:24 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Wed Apr 6 22:50:24 2016 -0700

--
 .../hadoop/hdfs/util/StripedBlockUtil.java  |   22 +-
 .../erasurecode/ErasureCodingWorker.java| 1016 +-
 .../erasurecode/StripedBlockReader.java |  202 
 .../erasurecode/StripedBlockWriter.java |  196 
 .../datanode/erasurecode/StripedReader.java |  466 
 .../erasurecode/StripedReconstructor.java   |  273 +
 .../datanode/erasurecode/StripedWriter.java |  313 ++
 .../datanode/erasurecode/package-info.java  |   26 +
 .../hadoop/hdfs/TestReconstructStripedFile.java |   11 +-
 9 files changed, 1555 insertions(+), 970 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 0819376..c8827d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -141,6 +141,12 @@ public class StripedBlockUtil {
 return locatedBlock;
   }
 
+  public static ExtendedBlock constructInternalBlock(
+  ExtendedBlock blockGroup, ErasureCodingPolicy ecPolicy,
+  int idxInBlockGroup) {
+return constructInternalBlock(blockGroup, ecPolicy.getCellSize(),
+ecPolicy.getNumDataUnits(), idxInBlockGroup);
+  }
   /**
* This method creates an internal {@link ExtendedBlock} at the given index
* of a block group.
@@ -154,21 +160,28 @@ public class StripedBlockUtil {
 return block;
   }
 
+  public static long getInternalBlockLength(long dataSize,
+ErasureCodingPolicy ecPolicy,
+int idxInBlockGroup) {
+return getInternalBlockLength(dataSize, ecPolicy.getCellSize(),
+ecPolicy.getNumDataUnits(), idxInBlockGroup);
+  }
+
   /**
* Get the size of an internal block at the given index of a block group
*
* @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
* @param numDataBlocks The number of data blocks
-   * @param i The logical index in the striped block group
+   * @param idxInBlockGroup The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
   public static long getInternalBlockLength(long dataSize,
-  int cellSize, int numDataBlocks, int i) {
+  int cellSize, int numDataBlocks, int idxInBlockGroup) {
 Preconditions.checkArgument(dataSize >= 0);
 Preconditions.checkArgument(cellSize > 0);
 Preconditions.checkArgument(numDataBlocks > 0);
-Preconditions.checkArgument(i >= 0);
+Preconditions.checkArgument(idxInBlockGroup >= 0);
 // Size of each stripe (only counting data blocks)
 final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
@@ -180,7 +193,8 @@ public class StripedBlockUtil {
 
 final int numStripes = (int) ((dataSize - 1) / stripeSize + 1);
 return (numStripes - 1L)*cellSize
-+ lastCellSize(lastStripeDataLen, cellSize, numDataBlocks, i);
++ lastCellSize(lastStripeDataLen, cellSize,
+numDataBlocks, idxInBlockGroup);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 4bcb291..e7c5abc 100644
--- 

[40/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
new file mode 100644
index 000..c546bf7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
@@ -0,0 +1,19 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+Sorry, Error Occured.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
new file mode 100644
index 000..588ea44
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
@@ -0,0 +1,20 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+404, Not Found
+Please Check your URL

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
index e58d6bd..3a79080 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
@@ -1,3 +1,3 @@
 {{app-table table-id="apps-table" arr=model}}
-{{simple-table table-id="apps-table" bFilter=true colTypes="elapsed-time" 
colTargets="7"}}
-{{outlet}}
\ No newline at end of file
+{{simple-table table-id="apps-table" bFilter=true colsOrder="0,desc" 
colTypes="natural elapsed-time" colTargets="0 7"}}
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
new file mode 100644
index 000..9cc3b0f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
@@ -0,0 +1,36 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}

[20/50] [abbrv] hadoop git commit: HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by Brahma Reddy Battula.

2016-04-07 Thread wangda
HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd320a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd320a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd320a8

Branch: refs/heads/YARN-3368
Commit: 0cd320a8463efe19a6228f9fe14693aa37ac8a10
Parents: 500e5a5
Author: Ravi Prakash 
Authored: Tue Apr 5 13:41:19 2016 -0700
Committer: Ravi Prakash 
Committed: Tue Apr 5 13:41:19 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd320a8/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 3d9ca42..a9c3304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -310,7 +310,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-{#helper_relative_time 
value="{lastContact}"/}
+{lastContact}s
 
   
 {capacity|fmt_bytes}



[05/50] [abbrv] hadoop git commit: HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails (Lin Yiqun via iwasakims)

2016-04-07 Thread wangda
HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails 
(Lin Yiqun via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/154d2532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/154d2532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/154d2532

Branch: refs/heads/YARN-3368
Commit: 154d2532cf015e9ab9141864bd3ab0d6100ef597
Parents: 7280550
Author: Masatake Iwasaki 
Authored: Tue Apr 5 03:19:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Tue Apr 5 03:19:48 2016 +0900

--
 .../hdfs/server/namenode/TestDecommissioningStatus.java | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/154d2532/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 789ee6f..1e7312a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -57,8 +57,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -78,8 +78,8 @@ public class TestDecommissioningStatus {
 
   final ArrayList decommissionedNodes = new 
ArrayList(numDatanodes);
   
-  @BeforeClass
-  public static void setUp() throws Exception {
+  @Before
+  public void setUp() throws Exception {
 conf = new HdfsConfiguration();
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
 false);
@@ -113,8 +113,8 @@ public class TestDecommissioningStatus {
 Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
   }
 
-  @AfterClass
-  public static void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
 if (localFileSys != null ) cleanupFile(localFileSys, dir);
 if(fileSys != null) fileSys.close();
 if(cluster != null) cluster.shutdown();



[50/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to 
mvn, and fix licenses. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7ef482a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7ef482a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7ef482a

Branch: refs/heads/YARN-3368
Commit: e7ef482a1e956f87983f236b02e7c09f1a12cada
Parents: b6139a7
Author: Wangda Tan 
Authored: Mon Mar 21 14:03:13 2016 -0700
Committer: Wangda Tan 
Committed: Thu Apr 7 14:52:41 2016 -0700

--
 .gitignore  |  13 +
 BUILDING.txt|   4 +-
 LICENSE.txt |  80 +
 dev-support/docker/Dockerfile   |   5 +
 .../src/site/markdown/YarnUI2.md|  40 +++
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 -
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 ---
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 -
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 --
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 --
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 --
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 -
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 --
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  20 --
 .../app/adapters/cluster-metric.js  |  20 --
 .../app/adapters/yarn-app-attempt.js|  32 --
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  26 --
 .../app/adapters/yarn-container-log.js  |  74 -
 .../app/adapters/yarn-container.js  |  43 ---
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 ---
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  20 --
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ---
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 --
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 -
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 -
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 ---
 .../app/components/base-chart-component.js  | 109 ---
 .../app/components/container-table.js   |   4 -
 .../app/components/donut-chart.js   | 148 --
 .../app/components/item-selector.js |  21 --
 .../app/components/queue-configuration-table.js |   4 -
 .../app/components/queue-navigator.js   |   4 -
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 -
 .../app/components/simple-table.js  |  58 
 .../app/components/timeline-view.js | 250 
 .../app/components/tree-selector.js | 257 
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 --
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 --
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/application.js  |  55 
 .../app/controllers/cluster-overview.js |   5 -
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 -
 .../app/controllers/yarn-queue.js   |   6 -
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 --
 .../app/helpers/log-files-comma.js  |  48 ---
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 ---
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 -
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 --
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 -
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 ---
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 -
 .../app/models/yarn-container-log.js|  25 --
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 ---
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ---
 .../app/models/yarn-node-container.js   |  57 
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 ---
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 -
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 --
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 -
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  29 --
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../hadoop-yarn-ui/app/routes/application.js|  38 ---
 .../app/routes/cluster-overview.js  |  11 -
 .../hadoop-yarn-ui/app/routes/index.js  |  29 --
 .../app/routes/yarn-app-attempt.js  |  21 --
 .../hadoop-yarn-ui/app/routes/yarn-app.js   |  10 -
 

[07/50] [abbrv] hadoop git commit: YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira AJISAKA via gtcarrera9)

2016-04-07 Thread wangda
YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira 
AJISAKA via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61de417
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61de417
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61de417

Branch: refs/heads/YARN-3368
Commit: f61de4173684aa1767cef20b3cb4d54df20273cd
Parents: a7d1fb0
Author: Li Lu 
Authored: Mon Apr 4 14:39:47 2016 -0700
Committer: Li Lu 
Committed: Mon Apr 4 14:40:27 2016 -0700

--
 .../hadoop-yarn-site/src/site/markdown/TimelineServer.md   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61de417/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 9283e58..f20bd2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -56,6 +56,7 @@ With the introduction of the timeline server, the Application 
History Server bec
 the Timeline Server.
 
 Generic information includes application level data such as 
+
 * queue-name, 
 * user information and the like set in the `ApplicationSubmissionContext`,
 * a list of application-attempts that ran for an application
@@ -192,6 +193,7 @@ selected if this policy is `HTTPS_ONLY`.
  UI Hosting Configuration
 
 The timeline service can host multiple UIs if enabled. The service can support 
both static web sites hosted in a directory or war files bundled. The web UI is 
then hosted on the timeline service HTTP port under the path configured.
+
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.ui-names` | Comma separated list of UIs that will be 
hosted. Defaults to `none`. |



[30/50] [abbrv] hadoop git commit: HDFS-10267. Extra "synchronized" on FsDatasetImpl#recoverAppend and FsDatasetImpl#recoverClose

2016-04-07 Thread wangda
HDFS-10267. Extra "synchronized" on FsDatasetImpl#recoverAppend and 
FsDatasetImpl#recoverClose


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bd7cbc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bd7cbc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bd7cbc2

Branch: refs/heads/YARN-3368
Commit: 4bd7cbc29d142fc56324156333b9a8a7d7b68042
Parents: 3be1ab4
Author: Colin Patrick Mccabe 
Authored: Wed Apr 6 12:36:54 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Wed Apr 6 21:07:31 2016 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java | 234 ++-
 2 files changed, 180 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bd7cbc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 240345c..7e4e8eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1268,7 +1268,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   }
 
   @Override  // FsDatasetSpi
-  public synchronized ReplicaHandler recoverAppend(
+  public ReplicaHandler recoverAppend(
   ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
 LOG.info("Recover failed append to " + b);
 
@@ -1301,7 +1301,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   }
 
   @Override // FsDatasetSpi
-  public synchronized Replica recoverClose(ExtendedBlock b, long newGS,
+  public Replica recoverClose(ExtendedBlock b, long newGS,
   long expectedBlockLen) throws IOException {
 LOG.info("Recover failed close " + b);
 while (true) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bd7cbc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 751089f..42e80fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -44,8 +44,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 import com.google.common.collect.Iterators;
 import org.apache.commons.logging.Log;
@@ -90,6 +92,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
@@ -161,7 +164,7 @@ public class TestBlockRecovery {
   }
 
   private final long
-  TEST_LOCK_HOG_DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS = 10L;
+  TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS = 10L;
 
   /**
* Starts an instance of DataNode
@@ -175,11 +178,10 @@ public class TestBlockRecovery {
 conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
-if (currentTestName.getMethodName().equals(
-"testInitReplicaRecoveryDoesNotHogLock")) {
+if (currentTestName.getMethodName().contains("DoesNotHoldLock")) {
   // This test requires a very long value for the xceiver stop timeout.
   conf.setLong(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
-  TEST_LOCK_HOG_DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS);
+  TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS);
 }
 

[25/50] [abbrv] hadoop git commit: HDFS-10192. Namenode safemode not coming out during failover. Contributed by Brahma Reddy Battula.

2016-04-07 Thread wangda
HDFS-10192. Namenode safemode not coming out during failover. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/221b3a87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/221b3a87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/221b3a87

Branch: refs/heads/YARN-3368
Commit: 221b3a8722f84f8e9ad0a98eea38a12cc4ad2f24
Parents: de96d7c
Author: Jing Zhao 
Authored: Wed Apr 6 10:42:59 2016 -0700
Committer: Jing Zhao 
Committed: Wed Apr 6 10:42:59 2016 -0700

--
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  1 +
 .../TestBlockManagerSafeMode.java   | 14 +++-
 .../hdfs/server/namenode/ha/TestHASafeMode.java | 35 
 4 files changed, 50 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 66ab789..104d723 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1974,7 +1974,7 @@ public class BlockManager implements BlockStatsMXBean {
 return bmSafeMode.leaveSafeMode(force);
   }
 
-  void checkSafeMode() {
+  public void checkSafeMode() {
 bmSafeMode.checkSafeMode();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9ff4be6..681fc96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1154,6 +1154,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 } finally {
   startingActiveService = false;
+  blockManager.checkSafeMode();
   writeUnlock();
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index cb749c7..a347669 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -66,6 +66,7 @@ public class TestBlockManagerSafeMode {
   private static final long BLOCK_THRESHOLD = (long)(BLOCK_TOTAL * THRESHOLD);
   private static final int EXTENSION = 1000; // 1 second
 
+  private FSNamesystem fsn;
   private BlockManager bm;
   private DatanodeManager dn;
   private BlockManagerSafeMode bmSafeMode;
@@ -90,7 +91,7 @@ public class TestBlockManagerSafeMode {
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
 DATANODE_NUM);
 
-FSNamesystem fsn = mock(FSNamesystem.class);
+fsn = mock(FSNamesystem.class);
 doReturn(true).when(fsn).hasWriteLock();
 doReturn(true).when(fsn).hasReadLock();
 doReturn(true).when(fsn).isRunning();
@@ -163,6 +164,17 @@ public class TestBlockManagerSafeMode {
 setBlockSafe(BLOCK_THRESHOLD);
 bmSafeMode.checkSafeMode();
 assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+
+// should stay in PENDING_THRESHOLD during transitionToActive
+doReturn(true).when(fsn).inTransitionToActive();
+Whitebox.setInternalState(bmSafeMode, "extension", 0);
+setSafeModeStatus(BMSafeModeStatus.PENDING_THRESHOLD);
+setBlockSafe(BLOCK_THRESHOLD);
+bmSafeMode.checkSafeMode();
+

[39/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
new file mode 100644
index 000..21a715c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('serializer:yarn-node-app', 'Unit | Serializer | NodeApp', {
+});
+
+test('Basic creation test', function(assert) {
+  let serializer = this.subject();
+
+  assert.ok(serializer);
+  assert.ok(serializer.normalizeSingleResponse);
+  assert.ok(serializer.normalizeArrayResponse);
+  assert.ok(serializer.internalNormalizeSingleResponse);
+});
+
+test('normalizeArrayResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+apps: {
+  app: [{
+id:"application_1456251210105_0001", state:"FINISHED", user:"root"
+  },{
+id:"application_1456251210105_0002", state:"RUNNING",user:"root",
+containerids:["container_e38_1456251210105_0002_01_01",
+"container_e38_1456251210105_0002_01_02"]
+  }]
+}
+  };
+  assert.expect(15);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 2);
+  assert.equal(response.data[0].attributes.containers, undefined);
+  assert.equal(response.data[1].attributes.containers.length, 2);
+  assert.deepEqual(response.data[1].attributes.containers,
+  payload.apps.app[1].containerids);
+  for (var i = 0; i < 2; i++) {
+assert.equal(response.data[i].type, modelClass.modelName);
+assert.equal(response.data[i].id, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.appId, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.state, payload.apps.app[i].state);
+assert.equal(response.data[i].attributes.user, payload.apps.app[i].user);
+  }
+});
+
+test('normalizeArrayResponse no apps test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = { apps: null };
+  assert.expect(5);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 1);
+  assert.equal(response.data[0].type, modelClass.modelName);
+  assert.equal(response.data[0].id, "dummy");
+  assert.equal(response.data[0].attributes.appId, undefined);
+});
+
+test('normalizeSingleResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+app: {id:"application_1456251210105_0001", state:"FINISHED", user:"root"}
+  };
+  assert.expect(7);
+  var response =
+  serializer.normalizeSingleResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(payload.app.id, response.data.id);
+  assert.equal(modelClass.modelName, response.data.type);
+  assert.equal(payload.app.id, response.data.attributes.appId);
+  assert.equal(payload.app.state, response.data.attributes.state);
+  assert.equal(payload.app.user, response.data.attributes.user);
+  assert.equal(response.data.attributes.containers, undefined);
+});
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6139a7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
new file mode 100644
index 000..1f08467
--- 

[16/50] [abbrv] hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.

2016-04-07 Thread wangda
YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00058167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00058167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00058167

Branch: refs/heads/YARN-3368
Commit: 00058167431475c6e63c80207424f1d365569e3a
Parents: 9174645
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 09:01:08 2016 -0700

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00058167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 



[24/50] [abbrv] hadoop git commit: MAPREDUCE-6670. TestJobListCache#testEviction sometimes fails on Windows with timeout. Contributed by Gergely Novák.

2016-04-07 Thread wangda
MAPREDUCE-6670. TestJobListCache#testEviction sometimes fails on Windows with 
timeout. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de96d7c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de96d7c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de96d7c8

Branch: refs/heads/YARN-3368
Commit: de96d7c88a42cd54bd88ce2de63122998e967efa
Parents: b41e65e
Author: Junping Du 
Authored: Wed Apr 6 08:32:35 2016 -0700
Committer: Junping Du 
Committed: Wed Apr 6 08:32:35 2016 -0700

--
 .../java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de96d7c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
index 6ebbb7c..3ccc222 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
@@ -45,7 +45,7 @@ public class TestJobListCache {
 cache.values().size());
   }
 
-  @Test (timeout = 1000)
+  @Test (timeout = 5000)
   public void testEviction() throws InterruptedException {
 int maxSize = 2;
 JobListCache cache = new JobListCache(maxSize, 1000);



[19/50] [abbrv] hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-07 Thread wangda
YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e5a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e5a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e5a59

Branch: refs/heads/YARN-3368
Commit: 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f
Parents: 3020634
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:00:31 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e5a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index e86c4f9..8c0b8c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queueus*. Existing applications 

[14/50] [abbrv] hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula.

2016-04-07 Thread wangda
YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be28bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be28bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be28bcc

Branch: refs/heads/YARN-3368
Commit: 6be28bcc461292b24589dae17a235b3eaadc07ed
Parents: 1cbcd4a
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 06:57:54 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d5b64c1..25c558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -603,6 +603,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -611,6 +612,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -620,6 +622,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -27,9 +27,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import 

[21/50] [abbrv] hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-07 Thread wangda
HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ba1e5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ba1e5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ba1e5af

Branch: refs/heads/YARN-3368
Commit: 9ba1e5af06070ba01dcf46e1a4c66713a1d43352
Parents: 0cd320a
Author: Kihwal Lee 
Authored: Tue Apr 5 16:26:18 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:26:18 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 46 ++--
 1 file changed, 33 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba1e5af/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index ed53512..b8fc30d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -39,6 +44,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -49,22 +57,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-setNNs();
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
 
-cluster.transitionToActive(0);
+setNNs();
+fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



[22/50] [abbrv] hadoop git commit: YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode is used to change label of a node. (Sunil G via wangda)

2016-04-07 Thread wangda
YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode 
is used to change label of a node. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21eb4284
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21eb4284
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21eb4284

Branch: refs/heads/YARN-3368
Commit: 21eb4284487d6f8e4beedb8a0c3168e952f224fc
Parents: 9ba1e5a
Author: Wangda Tan 
Authored: Tue Apr 5 16:24:11 2016 -0700
Committer: Wangda Tan 
Committed: Tue Apr 5 16:24:11 2016 -0700

--
 .../scheduler/capacity/AbstractCSQueue.java |  6 +++
 .../scheduler/capacity/CSQueueUtils.java|  2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 40 +++-
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 6e715fb..c7d6d02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -591,6 +591,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.incUsed(nodeLabel, resourceToInc);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.incUsedResource(nodeLabel, resourceToInc, null);
 }
@@ -604,6 +607,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.decUsed(nodeLabel, resourceToDec);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.decUsedResource(nodeLabel, resourceToDec, null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 9cdcb72..0166d83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -180,7 +180,7 @@ class CSQueueUtils {
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
-  private static void updateUsedCapacity(final ResourceCalculator rc,
+  public static void updateUsedCapacity(final ResourceCalculator rc,
   final Resource totalPartitionResource, final Resource minimumAllocation,
   ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
   String nodePartition) {


[02/50] [abbrv] hadoop git commit: YARN-4746. yarn web services should convert parse failures of appId, appAttemptId and containerId to 400. Contributed by Bibin A Chundatt

2016-04-07 Thread wangda
YARN-4746. yarn web services should convert parse failures of appId, 
appAttemptId and containerId to 400. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5092c941
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5092c941
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5092c941

Branch: refs/heads/YARN-3368
Commit: 5092c94195a63bd2c3e36d5a74b4c061cea1b847
Parents: da614ca
Author: naganarasimha 
Authored: Mon Apr 4 16:25:03 2016 +0530
Committer: naganarasimha 
Committed: Mon Apr 4 16:25:03 2016 +0530

--
 .../apache/hadoop/yarn/util/ConverterUtils.java | 16 --
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 22 ++
 .../hadoop/yarn/server/webapp/WebServices.java  | 22 +++---
 .../nodemanager/webapp/NMWebServices.java   |  6 ++--
 .../webapp/TestNMWebServicesApps.java   |  9 --
 .../resourcemanager/webapp/RMWebServices.java   | 32 ++--
 .../webapp/TestRMWebServicesApps.java   | 24 +--
 .../TestRMWebServicesAppsModification.java  | 10 --
 8 files changed, 87 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index e9674cf..acd29fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -122,8 +122,20 @@ public class ConverterUtils {
   public static ApplicationId toApplicationId(RecordFactory recordFactory,
   String appIdStr) {
 Iterator it = _split(appIdStr).iterator();
-it.next(); // prefix. TODO: Validate application prefix
-return toApplicationId(recordFactory, it);
+if (!it.next().equals(APPLICATION_PREFIX)) {
+  throw new IllegalArgumentException("Invalid ApplicationId prefix: "
+  + appIdStr + ". The valid ApplicationId should start with prefix "
+  + APPLICATION_PREFIX);
+}
+try {
+  return toApplicationId(recordFactory, it);
+} catch (NumberFormatException n) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  n);
+} catch (NoSuchElementException e) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  e);
+}
   }
 
   private static ApplicationId toApplicationId(RecordFactory recordFactory,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f8e67ee..faf4a77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -33,9 +33,14 @@ import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RMHAUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
 
 @Private
 @Evolving
@@ -378,4 +383,21 @@ public class WebAppUtils {
 }
 return password;
   }
+
+  public static ApplicationId parseApplicationId(RecordFactory recordFactory,
+  String appId) {
+if (appId == null || appId.isEmpty()) {
+  throw new NotFoundException("appId, " + appId + ", is empty or null");
+}
+ApplicationId aid = null;
+try {
+  aid = 

[33/50] [abbrv] hadoop git commit: HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary directory in tests (Contributed by Steve Loughran and Vinayakumar B

2016-04-07 Thread wangda
HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary 
directory in tests (Contributed by Steve Loughran and Vinayakumar B

This closes #89


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d29e245
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d29e245
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d29e245

Branch: refs/heads/YARN-3368
Commit: 8d29e2451f5ca60f864c7ece16722c0abdd1c657
Parents: 654cd1d
Author: Vinayakumar B 
Authored: Thu Apr 7 10:12:00 2016 +0530
Committer: Vinayakumar B 
Committed: Thu Apr 7 10:15:24 2016 +0530

--
 .../apache/hadoop/conf/TestConfiguration.java   |  8 +-
 .../crypto/TestCryptoStreamsForLocalFS.java |  5 +-
 .../apache/hadoop/crypto/key/TestKeyShell.java  |  5 +-
 .../org/apache/hadoop/fs/FSTestWrapper.java |  3 +-
 .../fs/FileContextMainOperationsBaseTest.java   |  4 +-
 .../apache/hadoop/fs/FileContextTestHelper.java |  6 +-
 .../apache/hadoop/fs/FileContextURIBase.java|  6 +-
 .../apache/hadoop/fs/FileSystemTestHelper.java  |  4 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |  9 +--
 .../hadoop/fs/TestChecksumFileSystem.java   |  5 +-
 .../org/apache/hadoop/fs/TestDFVariations.java  |  2 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |  4 +-
 .../hadoop/fs/TestFileContextResolveAfs.java|  8 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  9 +--
 .../org/apache/hadoop/fs/TestFsShellCopy.java   |  6 +-
 .../apache/hadoop/fs/TestFsShellReturnCode.java |  8 +-
 .../org/apache/hadoop/fs/TestFsShellTouch.java  |  6 +-
 .../hadoop/fs/TestGetFileBlockLocations.java|  5 +-
 .../hadoop/fs/TestHarFileSystemBasics.java  |  5 +-
 .../java/org/apache/hadoop/fs/TestHardLink.java |  5 +-
 .../org/apache/hadoop/fs/TestListFiles.java | 14 ++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  7 +-
 .../fs/TestLocalFileSystemPermission.java   | 18 ++---
 .../java/org/apache/hadoop/fs/TestPath.java |  6 +-
 .../java/org/apache/hadoop/fs/TestTrash.java|  6 +-
 .../apache/hadoop/fs/TestTruncatedInputBug.java |  4 +-
 .../hadoop/fs/sftp/TestSFTPFileSystem.java  |  3 +-
 .../apache/hadoop/fs/shell/TestPathData.java|  5 +-
 .../apache/hadoop/fs/shell/TestTextCommand.java |  4 +-
 .../hadoop/fs/viewfs/TestViewfsFileStatus.java  |  7 +-
 .../apache/hadoop/ha/ClientBaseWithFixes.java   |  4 +-
 .../http/TestAuthenticationSessionCookie.java   |  5 +-
 .../apache/hadoop/http/TestHttpCookieFlag.java  |  5 +-
 .../hadoop/http/TestHttpServerLifecycle.java|  5 +-
 .../apache/hadoop/http/TestSSLHttpServer.java   |  5 +-
 .../org/apache/hadoop/io/TestArrayFile.java |  6 +-
 .../org/apache/hadoop/io/TestBloomMapFile.java  |  6 +-
 .../java/org/apache/hadoop/io/TestMapFile.java  |  6 +-
 .../org/apache/hadoop/io/TestSequenceFile.java  | 48 +--
 .../hadoop/io/TestSequenceFileAppend.java   |  4 +-
 .../io/TestSequenceFileSerialization.java   |  4 +-
 .../apache/hadoop/io/TestSequenceFileSync.java  |  5 +-
 .../java/org/apache/hadoop/io/TestSetFile.java  |  5 +-
 .../apache/hadoop/io/compress/TestCodec.java| 23 +++---
 .../apache/hadoop/io/file/tfile/TestTFile.java  |  4 +-
 .../io/file/tfile/TestTFileByteArrays.java  |  4 +-
 .../io/file/tfile/TestTFileComparator2.java |  4 +-
 .../io/file/tfile/TestTFileComparators.java |  5 +-
 .../hadoop/io/file/tfile/TestTFileSeek.java |  4 +-
 .../file/tfile/TestTFileSeqFileComparison.java  |  5 +-
 .../hadoop/io/file/tfile/TestTFileSplit.java|  4 +-
 .../hadoop/io/file/tfile/TestTFileStreams.java  |  4 +-
 .../file/tfile/TestTFileUnsortedByteArrays.java |  5 +-
 .../apache/hadoop/io/file/tfile/TestVLong.java  |  4 +-
 .../apache/hadoop/io/nativeio/TestNativeIO.java | 10 +--
 .../TestSharedFileDescriptorFactory.java|  4 +-
 .../sink/RollingFileSystemSinkTestBase.java |  6 +-
 .../apache/hadoop/security/TestCredentials.java |  4 +-
 .../hadoop/security/TestLdapGroupsMapping.java  |  7 +-
 .../hadoop/security/alias/TestCredShell.java|  4 +-
 .../alias/TestCredentialProviderFactory.java|  5 +-
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  4 +-
 .../ssl/TestReloadingX509TrustManager.java  |  6 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  6 +-
 .../apache/hadoop/test/GenericTestUtils.java| 84 +++-
 .../java/org/apache/hadoop/util/JarFinder.java  |  4 +-
 .../hadoop/util/TestApplicationClassLoader.java |  4 +-
 .../org/apache/hadoop/util/TestClasspath.java   |  5 +-
 .../hadoop/util/TestGenericOptionsParser.java   |  2 +-
 .../apache/hadoop/util/TestHostsFileReader.java |  4 +-
 .../org/apache/hadoop/util/TestJarFinder.java   |  9 ++-
 .../java/org/apache/hadoop/util/TestRunJar.java |  8 +-
 .../java/org/apache/hadoop/util/TestShell.java  |  4 +-
 

[12/50] [abbrv] hadoop git commit: YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin A Chundatt

2016-04-07 Thread wangda
YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin 
A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/776b549e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/776b549e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/776b549e

Branch: refs/heads/YARN-3368
Commit: 776b549e2ac20a68a5513cbcaac0edc33233dc03
Parents: 552237d
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:47:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:47:25 2016 +0530

--
 .../resourcemanager/webapp/NodesPage.java   | 53 +---
 .../resourcemanager/webapp/TestNodesPage.java   | 37 --
 2 files changed, 45 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/776b549e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 9603468..7063421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -101,6 +100,7 @@ class NodesPage extends RmView {
   LOG.debug("Unexpected state filter for inactive RM node");
 }
   }
+  StringBuilder nodeTableData = new StringBuilder("[\n");
   for (RMNode ni : rmNodes) {
 if (stateFilter != null) {
   NodeState state = ni.getState();
@@ -129,27 +129,40 @@ class NodesPage extends RmView {
 NodeInfo info = new NodeInfo(ni, sched);
 int usedMemory = (int) info.getUsedMemory();
 int availableMemory = (int) info.getAvailableMemory();
-TR> row =
-tbody.tr().td(StringUtils.join(",", info.getNodeLabels()))
-.td(info.getRack()).td(info.getState()).td(info.getNodeId());
+nodeTableData.append("[\"")
+.append(StringUtils.join(",", 
info.getNodeLabels())).append("\",\"")
+.append(info.getRack()).append("\",\"").append(info.getState())
+.append("\",\"").append(info.getNodeId());
 if (isInactive) {
-  row.td()._("N/A")._();
+  nodeTableData.append("\",\"").append("N/A").append("\",\"");
 } else {
   String httpAddress = info.getNodeHTTPAddress();
-  row.td().a("//" + httpAddress, httpAddress)._();
+  nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
 }
-row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._()
-._(Times.format(info.getLastHealthUpdate()))._()
-.td(info.getHealthReport())
-.td(String.valueOf(info.getNumContainers())).td().br()
-.$title(String.valueOf(usedMemory))._()
-._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br()
-.$title(String.valueOf(availableMemory))._()
-._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._()
-.td(String.valueOf(info.getUsedVirtualCores()))
-.td(String.valueOf(info.getAvailableVirtualCores()))
-.td(ni.getNodeManagerVersion())._();
+nodeTableData.append("")
+

[41/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-04-07 Thread wangda
YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6139a7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6139a7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6139a7d

Branch: refs/heads/YARN-3368
Commit: b6139a7d1605f58f43b5d19a3c70b3a878e9eb33
Parents: 6292390
Author: Wangda Tan 
Authored: Mon Mar 21 13:13:02 2016 -0700
Committer: Wangda Tan 
Committed: Thu Apr 7 14:52:41 2016 -0700

--
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |   5 +-
 .../app/adapters/cluster-metric.js  |   5 +-
 .../app/adapters/yarn-app-attempt.js|   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |   3 +-
 .../app/adapters/yarn-container-log.js  |  74 +
 .../app/adapters/yarn-container.js  |   5 +-
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 +
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ++
 .../app/components/simple-table.js  |  38 -
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 +++
 .../app/controllers/application.js  |  55 +++
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 
 .../app/helpers/log-files-comma.js  |  48 ++
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 +
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  14 +-
 .../app/models/yarn-container-log.js|  25 +++
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ++
 .../app/models/yarn-node-container.js   |  57 +++
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 +++
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  13 ++
 .../hadoop-yarn-ui/app/routes/application.js|  38 +
 .../hadoop-yarn-ui/app/routes/index.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   4 +-
 .../app/routes/yarn-container-log.js|  55 +++
 .../hadoop-yarn-ui/app/routes/yarn-node-app.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-node-apps.js |  29 
 .../app/routes/yarn-node-container.js   |  30 
 .../app/routes/yarn-node-containers.js  |  28 
 .../hadoop-yarn-ui/app/routes/yarn-node.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-nodes.js |  25 +++
 .../app/serializers/yarn-container-log.js   |  39 +
 .../app/serializers/yarn-node-app.js|  86 +++
 .../app/serializers/yarn-node-container.js  |  74 +
 .../hadoop-yarn-ui/app/serializers/yarn-node.js |  56 +++
 .../app/serializers/yarn-rm-node.js |  77 ++
 .../app/templates/application.hbs   |   4 +-
 .../hadoop-yarn-ui/app/templates/error.hbs  |  19 +++
 .../hadoop-yarn-ui/app/templates/notfound.hbs   |  20 +++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   4 +-
 .../app/templates/yarn-container-log.hbs|  36 +
 .../app/templates/yarn-node-app.hbs |  60 
 .../app/templates/yarn-node-apps.hbs|  51 +++
 .../app/templates/yarn-node-container.hbs   |  70 +
 .../app/templates/yarn-node-containers.hbs  |  58 +++
 .../hadoop-yarn-ui/app/templates/yarn-node.hbs  |  94 
 .../hadoop-yarn-ui/app/templates/yarn-nodes.hbs |  65 
 .../hadoop-yarn-ui/app/utils/converter.js   |  21 ++-
 .../hadoop-yarn-ui/app/utils/sorter.js  |  42 -
 .../hadoop-yarn/hadoop-yarn-ui/bower.json   |   2 +-
 .../hadoop-yarn-ui/config/environment.js|   1 -
 .../unit/adapters/yarn-container-log-test.js|  73 +
 .../tests/unit/adapters/yarn-node-app-test.js   |  93 +++
 .../unit/adapters/yarn-node-container-test.js   |  93 +++
 .../tests/unit/adapters/yarn-node-test.js   |  42 +
 .../tests/unit/adapters/yarn-rm-node-test.js|  44 ++
 .../unit/models/yarn-container-log-test.js  |  48 ++
 .../tests/unit/models/yarn-node-app-test.js |  65 
 .../unit/models/yarn-node-container-test.js |  78 ++
 .../tests/unit/models/yarn-node-test.js |  58 +++
 .../tests/unit/models/yarn-rm-node-test.js  |  95 
 .../unit/routes/yarn-container-log-test.js  | 120 +++
 .../tests/unit/routes/yarn-node-app-test.js |  56 +++
 .../tests/unit/routes/yarn-node-apps-test.js|  60 
 .../unit/routes/yarn-node-container-test.js |  

[18/50] [abbrv] hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-07 Thread wangda
YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30206346
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30206346
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30206346

Branch: refs/heads/YARN-3368
Commit: 30206346cf13fe1b7267f86e7c210b77c86b88c9
Parents: 85ec557
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:22 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30206346/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



[46/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
new file mode 100644
index 000..f7ec020
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  // Map: 
+  map : undefined,
+
+  // Normalized data for d3
+  treeData: undefined,
+
+  // folded queues, folded[] == true means  is folded
+  foldedQueues: { },
+
+  // maxDepth
+  maxDepth: 0,
+
+  // num of leaf queue, folded queue is treated as leaf queue
+  numOfLeafQueue: 0,
+
+  // mainSvg
+  mainSvg: undefined,
+
+  // Init data
+  initData: function() {
+this.map = { };
+this.treeData = { };
+this.maxDepth = 0;
+this.numOfLeafQueue = 0;
+
+this.get("model")
+  .forEach(function(o) {
+this.map[o.id] = o;
+  }.bind(this));
+
+var selected = this.get("selected");
+
+this.initQueue("root", 1, this.treeData);
+  },
+
+  // get Children array of given queue
+  getChildrenNamesArray: function(q) {
+var namesArr = [];
+
+// Folded queue's children is empty
+if (this.foldedQueues[q.get("name")]) {
+  return namesArr;
+}
+
+var names = q.get("children");
+if (names) {
+  names.forEach(function(name) {
+namesArr.push(name);
+  });
+}
+
+return namesArr;
+  },
+
+  // Init queues
+  initQueue: function(queueName, depth, node) {
+if ((!queueName) || (!this.map[queueName])) {
+  // Queue is not existed
+  return;
+}
+
+if (depth > this.maxDepth) {
+  this.maxDepth = this.maxDepth + 1;
+}
+
+var queue = this.map[queueName];
+
+var names = this.getChildrenNamesArray(queue);
+
+node.name = queueName;
+node.parent = queue.get("parent");
+node.queueData = queue;
+
+if (names.length > 0) {
+  node.children = [];
+
+  names.forEach(function(name) {
+var childQueueData = {};
+node.children.push(childQueueData);
+this.initQueue(name, depth + 1, childQueueData);
+  }.bind(this));
+} else {
+  this.numOfLeafQueue = this.numOfLeafQueue + 1;
+}
+  },
+
+  update: function(source, root, tree, diagonal) {
+var duration = 300;
+var i = 0;
+
+// Compute the new tree layout.
+var nodes = tree.nodes(root).reverse();
+var links = tree.links(nodes);
+
+// Normalize for fixed-depth.
+nodes.forEach(function(d) { d.y = d.depth * 200; });
+
+// Update the nodes…
+var node = this.mainSvg.selectAll("g.node")
+  .data(nodes, function(d) { return d.id || (d.id = ++i); });
+
+// Enter any new nodes at the parent's previous position.
+var nodeEnter = node.enter().append("g")
+  .attr("class", "node")
+  .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
+  .on("click", function(d,i){
+if (d.queueData.get("name") != this.get("selected")) {
+document.location.href = "yarnQueue/" + d.queueData.get("name");
+}
+  }.bind(this));
+  // .on("click", click);
+
+nodeEnter.append("circle")
+  .attr("r", 1e-6)
+  .style("fill", function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap <= 60.0) {
+  return "LimeGreen";
+} else if (usedCap <= 100.0) {
+  return "DarkOrange";
+} else {
+  return "LightCoral";
+}
+  });
+
+// append percentage
+nodeEnter.append("text")
+  .attr("x", function(d) { return 0; })
+  .attr("dy", ".35em")
+  .attr("text-anchor", function(d) { return "middle"; })
+  .text(function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap >= 100.0) {
+

[06/50] [abbrv] hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-07 Thread wangda
HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d1fb0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d1fb0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d1fb0c

Branch: refs/heads/YARN-3368
Commit: a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584
Parents: 154d253
Author: Kihwal Lee 
Authored: Mon Apr 4 16:39:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:40:00 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e4ee02..fb0c1c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -574,6 +574,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 773a64c..398935d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -300,11 +300,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownstream() throws IOException {}
+
   public void noRegistration() throws IOException { }
 }


[37/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62923909/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
new file mode 100644
index 000..d39885e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
@@ -0,0 +1,29 @@
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName,
+attributes: payload
+  };
+
+  return this._super(store, primaryModelClass, fixedPayload, id,
+requestType);
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = [
+this.normalizeSingleResponse(store, primaryModelClass,
+  payload.clusterMetrics, 1, requestType)
+  ];
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62923909/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
new file mode 100644
index 000..c5394d0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
@@ -0,0 +1,49 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  
+  if (payload.appAttempt) {
+payload = payload.appAttempt;  
+  }
+  
+  var fixedPayload = {
+id: payload.appAttemptId,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  startTime: Converter.timeStampToDate(payload.startTime),
+  finishedTime: Converter.timeStampToDate(payload.finishedTime),
+  containerId: payload.containerId,
+  nodeHttpAddress: payload.nodeHttpAddress,
+  nodeId: payload.nodeId,
+  state: payload.nodeId,
+  logsLink: payload.logsLink
+}
+  };
+
+  return fixedPayload;
+},
+
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var p = this.internalNormalizeSingleResponse(store, 
+primaryModelClass, payload, id, requestType);
+  return { data: p };
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
+return this.internalNormalizeSingleResponse(store, primaryModelClass,
+  singleApp, singleApp.id, requestType);
+  }, this);
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62923909/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
new file mode 100644
index 000..a038fff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
@@ -0,0 +1,66 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  if (payload.app) {
+payload = payload.app;  
+  }
+  
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  appName: payload.name,
+  user: payload.user,
+  queue: payload.queue,
+  state: payload.state,
+  startTime: Converter.timeStampToDate(payload.startedTime),
+  elapsedTime: 

[36/50] [abbrv] hadoop git commit: HADOOP-12909. Change ipc.Client to support asynchronous calls. Contributed by Xiaobing Zhou

2016-04-07 Thread wangda
HADOOP-12909. Change ipc.Client to support asynchronous calls.  Contributed by  
Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a62637a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a62637a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a62637a4

Branch: refs/heads/YARN-3368
Commit: a62637a413ad88c4273d3251892b8fc1c05afa34
Parents: 3c18a53
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Apr 7 14:01:33 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Apr 7 14:02:51 2016 +0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  73 +++-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 346 +++
 .../java/org/apache/hadoop/ipc/TestIPC.java |  29 +-
 3 files changed, 436 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a62637a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index fb11cb7..489c354 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -62,6 +62,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,6 +97,7 @@ import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AbstractFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.CodedOutputStream;
 
@@ -107,7 +109,7 @@ import com.google.protobuf.CodedOutputStream;
  */
 @InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", 
"Yarn" })
 @InterfaceStability.Evolving
-public class Client {
+public class Client implements AutoCloseable {
   
   public static final Log LOG = LogFactory.getLog(Client.class);
 
@@ -116,6 +118,20 @@ public class Client {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
+  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
+  private static final ThreadLocal asynchronousMode =
+  new ThreadLocal() {
+@Override
+protected Boolean initialValue() {
+  return false;
+}
+  };
+
+  @SuppressWarnings("unchecked")
+  @Unstable
+  public static  Future getReturnValue() {
+return (Future) returnValue.get();
+  }
 
   /** Set call id and retry count for the next call. */
   public static void setCallIdAndRetryCount(int cid, int rc) {
@@ -1354,8 +1370,8 @@ public class Client {
   ConnectionId remoteId, int serviceClass,
   AtomicBoolean fallbackToSimpleAuth) throws IOException {
 final Call call = createCall(rpcKind, rpcRequest);
-Connection connection = getConnection(remoteId, call, serviceClass,
-  fallbackToSimpleAuth);
+final Connection connection = getConnection(remoteId, call, serviceClass,
+fallbackToSimpleAuth);
 try {
   connection.sendRpcRequest(call); // send the rpc request
 } catch (RejectedExecutionException e) {
@@ -1366,6 +1382,51 @@ public class Client {
   throw new IOException(e);
 }
 
+if (isAsynchronousMode()) {
+  Future returnFuture = new AbstractFuture() {
+@Override
+public Writable get() throws InterruptedException, ExecutionException {
+  try {
+set(getRpcResponse(call, connection));
+  } catch (IOException ie) {
+setException(ie);
+  }
+  return super.get();
+}
+  };
+
+  returnValue.set(returnFuture);
+  return null;
+} else {
+  return getRpcResponse(call, connection);
+}
+  }
+
+  /**
+   * Check if RPC is in asynchronous mode or not.
+   *
+   * @returns true, if RPC is in asynchronous mode, otherwise false for
+   *  synchronous mode.
+   */
+  @Unstable
+  static boolean isAsynchronousMode() {
+return asynchronousMode.get();
+  }
+
+  /**
+   * Set RPC to asynchronous or synchronous mode.
+   *
+   

[08/50] [abbrv] hadoop git commit: HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via cmccabe)

2016-04-07 Thread wangda
HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via 
cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f65f5b18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f65f5b18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f65f5b18

Branch: refs/heads/YARN-3368
Commit: f65f5b18fd4647e868b8d2a2c035a3b64dc16aa8
Parents: f61de41
Author: Colin Patrick Mccabe 
Authored: Mon Apr 4 16:30:32 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 4 16:30:32 2016 -0700

--
 BUILDING.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f65f5b18/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 408cae1..c7a91da 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -75,6 +75,7 @@ Optional packages:
   $ sudo apt-get install snappy libsnappy-dev
 * Intel ISA-L library for erasure coding
   Please refer to 
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+  (OR https://github.com/01org/isa-l)
 * Bzip2
   $ sudo apt-get install bzip2 libbz2-dev
 * Jansson (C Library for JSON)
@@ -188,11 +189,12 @@ Maven build goals:
 
  Intel ISA-L build options:
 
-   Intel ISA-L is a erasure coding library that can be utilized by the native 
code.
+   Intel ISA-L is an erasure coding library that can be utilized by the native 
code.
It is currently an optional component, meaning that Hadoop can be built with
or without this dependency. Note the library is used via dynamic module. 
Please
reference the official site for the library details.
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+   (OR https://github.com/01org/isa-l)
 
   * Use -Drequire.isal to fail the build if libisal.so is not found.
 If this option is not specified and the isal library is missing,



[45/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
new file mode 100644
index 000..89858bf
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model(param) {
+return Ember.RSVP.hash({
+  selected : param.queue_name,
+  queues: this.store.findAll('yarnQueue'),
+  selectedQueue : undefined,
+  apps: undefined, // apps of selected queue
+});
+  },
+
+  afterModel(model) {
+model.selectedQueue = this.store.peekRecord('yarnQueue', model.selected);
+model.apps = this.store.findAll('yarnApp');
+model.apps.forEach(function(o) {
+  console.log(o);
+})
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
new file mode 100644
index 000..7da6f6d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default Ember.Route.extend({
+  beforeModel() {
+this.transitionTo('yarnQueues.root');
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
new file mode 100644
index 000..3686c83
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+

[48/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
deleted file mode 100644
index a038fff..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
+++ /dev/null
@@ -1,66 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  if (payload.app) {
-payload = payload.app;  
-  }
-  
-  var fixedPayload = {
-id: id,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  appName: payload.name,
-  user: payload.user,
-  queue: payload.queue,
-  state: payload.state,
-  startTime: Converter.timeStampToDate(payload.startedTime),
-  elapsedTime: Converter.msToElapsedTime(payload.elapsedTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  finalStatus: payload.finalStatus,
-  progress: payload.progress,
-  diagnostics: payload.diagnostics,
-  amContainerLogs: payload.amContainerLogs,
-  amHostHttpAddress: payload.amHostHttpAddress,
-  logAggregationStatus: payload.logAggregationStatus,
-  unmanagedApplication: payload.unmanagedApplication,
-  amNodeLabelExpression: payload.amNodeLabelExpression,
-  priority: payload.priority,
-  allocatedMB: payload.allocatedMB,
-  allocatedVCores: payload.allocatedVCores,
-  runningContainers: payload.runningContainers,
-  memorySeconds: payload.memorySeconds,
-  vcoreSeconds: payload.vcoreSeconds,
-  preemptedResourceMB: payload.preemptedResourceMB,
-  preemptedResourceVCores: payload.preemptedResourceVCores,
-  numNonAMContainerPreempted: payload.numNonAMContainerPreempted,
-  numAMContainerPreempted: payload.numAMContainerPreempted
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  // return expected is { data: [ {}, {} ] }
-  var normalizedArrayResponse = {};
-
-  // payload has apps : { app: [ {},{},{} ]  }
-  // need some error handling for ex apps or app may not be defined.
-  normalizedArrayResponse.data = payload.apps.app.map(singleApp => {
-return this.internalNormalizeSingleResponse(store, primaryModelClass,
-  singleApp, singleApp.id, requestType);
-  }, this);
-  return normalizedArrayResponse;
-}
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-container-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-container-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-container-log.js
deleted file mode 100644
index 9e10615..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-container-log.js
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-// Convert plain text response into JSON.
-// ID is of the form nodeAddress!containerId!fileName
-var splits = Converter.splitForContainerLogs(id);
-var 

[09/50] [abbrv] hadoop git commit: HDFS-8496. Calling stopWriter() with FSDatasetImpl lock held may block other threads (cmccabe)

2016-04-07 Thread wangda
HDFS-8496. Calling stopWriter() with FSDatasetImpl lock held may block other 
threads (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6b1a818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6b1a818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6b1a818

Branch: refs/heads/YARN-3368
Commit: f6b1a818124cc42688c4c5acaf537d96cf00e43b
Parents: f65f5b1
Author: Colin Patrick Mccabe 
Authored: Mon Apr 4 18:00:26 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 4 18:02:15 2016 -0700

--
 .../hdfs/server/datanode/ReplicaInPipeline.java |  54 ---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 145 +--
 .../datanode/fsdataset/impl/ReplicaMap.java |   2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java | 137 --
 4 files changed, 257 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6b1a818/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 5caca15..7326846 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -22,6 +22,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -44,7 +45,7 @@ public class ReplicaInPipeline extends ReplicaInfo
   private long bytesAcked;
   private long bytesOnDisk;
   private byte[] lastChecksum;  
-  private Thread writer;
+  private AtomicReference writer = new AtomicReference();
 
   /**
* Bytes reserved for this replica on the containing volume.
@@ -97,7 +98,7 @@ public class ReplicaInPipeline extends ReplicaInfo
 super( blockId, len, genStamp, vol, dir);
 this.bytesAcked = len;
 this.bytesOnDisk = len;
-this.writer = writer;
+this.writer.set(writer);
 this.bytesReserved = bytesToReserve;
 this.originalBytesReserved = bytesToReserve;
   }
@@ -110,7 +111,7 @@ public class ReplicaInPipeline extends ReplicaInfo
 super(from);
 this.bytesAcked = from.getBytesAcked();
 this.bytesOnDisk = from.getBytesOnDisk();
-this.writer = from.writer;
+this.writer.set(from.writer.get());
 this.bytesReserved = from.bytesReserved;
 this.originalBytesReserved = from.originalBytesReserved;
   }
@@ -175,18 +176,11 @@ public class ReplicaInPipeline extends ReplicaInfo
 return new ChunkChecksum(getBytesOnDisk(), lastChecksum);
   }
 
-  /**
-   * Set the thread that is writing to this replica
-   * @param writer a thread writing to this replica
-   */
-  public void setWriter(Thread writer) {
-this.writer = writer;
-  }
-  
   public void interruptThread() {
-if (writer != null && writer != Thread.currentThread() 
-&& writer.isAlive()) {
-  this.writer.interrupt();
+Thread thread = writer.get();
+if (thread != null && thread != Thread.currentThread() 
+&& thread.isAlive()) {
+  thread.interrupt();
 }
   }
 
@@ -196,17 +190,35 @@ public class ReplicaInPipeline extends ReplicaInfo
   }
   
   /**
+   * Attempt to set the writer to a new value.
+   */
+  public boolean attemptToSetWriter(Thread prevWriter, Thread newWriter) {
+return writer.compareAndSet(prevWriter, newWriter);
+  }
+
+  /**
* Interrupt the writing thread and wait until it dies
* @throws IOException the waiting is interrupted
*/
   public void stopWriter(long xceiverStopTimeout) throws IOException {
-if (writer != null && writer != Thread.currentThread() && 
writer.isAlive()) {
-  writer.interrupt();
+while (true) {
+  Thread thread = writer.get();
+  if ((thread == null) || (thread == Thread.currentThread()) ||
+  (!thread.isAlive())) {
+if (writer.compareAndSet(thread, null) == true) {
+  return; // Done
+}
+// The writer changed.  Go back to the start of the loop and attempt to
+// stop the new writer.
+continue;
+  }
+  thread.interrupt();
   try {
-writer.join(xceiverStopTimeout);
-if (writer.isAlive()) {
-  final String msg = "Join on 

[42/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
deleted file mode 100644
index 5877589..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-node', 'Unit | Model | Node', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.totalVmemAllocatedContainersMB);
-  assert.ok(model.vmemCheckEnabled);
-  assert.ok(model.pmemCheckEnabled);
-  assert.ok(model.nodeHealthy);
-  assert.ok(model.lastNodeUpdateTime);
-  assert.ok(model.healthReport);
-  assert.ok(model.nmStartupTime);
-  assert.ok(model.nodeManagerBuildVersion);
-  assert.ok(model.hadoopBuildVersion);
-});
-
-test('test fields', function(assert) {
-  let model = this.subject();
-
-  assert.expect(4);
-  Ember.run(function () {
-model.set("totalVmemAllocatedContainersMB", 4096);
-model.set("totalPmemAllocatedContainersMB", 2048);
-model.set("totalVCoresAllocatedContainers", 4);
-model.set("hadoopBuildVersion", "3.0.0-SNAPSHOT");
-assert.equal(model.get("totalVmemAllocatedContainersMB"), 4096);
-assert.equal(model.get("totalPmemAllocatedContainersMB"), 2048);
-assert.equal(model.get("totalVCoresAllocatedContainers"), 4);
-assert.equal(model.get("hadoopBuildVersion"), "3.0.0-SNAPSHOT");
-  });
-});
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
deleted file mode 100644
index 4fd2517..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-rm-node', 'Unit | Model | RMNode', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.rack);
-  assert.ok(model.state);
-  assert.ok(model.nodeHostName);
-  assert.ok(model.nodeHTTPAddress);
-  assert.ok(model.lastHealthUpdate);
-  assert.ok(model.healthReport);
-  assert.ok(model.numContainers);
-  assert.ok(model.usedMemoryMB);
-  assert.ok(model.availMemoryMB);
-  assert.ok(model.usedVirtualCores);
-  assert.ok(model.availableVirtualCores);
-  assert.ok(model.version);
-  assert.ok(model.nodeLabels);
-  

[34/50] [abbrv] hadoop git commit: HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. Contributed by Kai Zheng.

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
new file mode 100644
index 000..a0a5f83
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.erasurecode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.DataChecksum;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.BitSet;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+
+/**
+ * StripedReconstructor reconstruct one or more missed striped block in the
+ * striped block group, the minimum number of live striped blocks should be
+ * no less than data block number.
+ *
+ * | <- Striped Block Group -> |
+ *  blk_0  blk_1   blk_2(*)   blk_3   ...   <- A striped block group
+ *|  |   |  |
+ *v  v   v  v
+ * +--+   +--+   +--+   +--+
+ * |cell_0|   |cell_1|   |cell_2|   |cell_3|  ...
+ * +--+   +--+   +--+   +--+
+ * |cell_4|   |cell_5|   |cell_6|   |cell_7|  ...
+ * +--+   +--+   +--+   +--+
+ * |cell_8|   |cell_9|   |cell10|   |cell11|  ...
+ * +--+   +--+   +--+   +--+
+ *  ... ...   ... ...
+ *
+ *
+ * We use following steps to reconstruct striped block group, in each round, we
+ * reconstruct bufferSize data until finish, the
+ * bufferSize is configurable and may be less or larger than
+ * cell size:
+ * step1: read bufferSize data from minimum number of sources
+ *required by reconstruction.
+ * step2: decode data for targets.
+ * step3: transfer data to targets.
+ *
+ * In step1, try to read bufferSize data from minimum number
+ * of sources , if there is corrupt or stale sources, read from new source
+ * will be scheduled. The best sources are remembered for next round and
+ * may be updated in each round.
+ *
+ * In step2, typically if source blocks we read are all data blocks, we
+ * need to call encode, and if there is one parity block, we need to call
+ * decode. Notice we only read once and reconstruct all missed striped block
+ * if they are more than one.
+ *
+ * In step3, send the reconstructed data to targets by constructing packet
+ * and send them directly. Same as continuous block replication, we
+ * don't check the packet ack. Since the datanode doing the reconstruction work
+ * are one of the source datanodes, so the reconstructed data are sent
+ * remotely.
+ *
+ * There are some points we can do further improvements in next phase:
+ * 1. we can read the block file directly on the local datanode,
+ *currently we use remote block reader. (Notice short-circuit is not
+ *a good choice, see inline comments).
+ * 2. We need to check the packet ack for EC reconstruction? Since EC
+ *reconstruction is more expensive than 

[01/50] [abbrv] hadoop git commit: HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula. [Forced Update!]

2016-04-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 f97298166 -> e7ef482a1 (forced update)


HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da614ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da614ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da614ca5

Branch: refs/heads/YARN-3368
Commit: da614ca5dc26562d7ecd5d7c5743fa52c3c17342
Parents: 1e6f929
Author: Akira Ajisaka 
Authored: Mon Apr 4 17:46:56 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Apr 4 17:48:08 2016 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 42 --
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 58 
 2 files changed, 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index b855c48..e2d6ecd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -23,7 +23,6 @@ import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.List;
 import java.util.Map;
@@ -381,47 +380,6 @@ public class FileUtil {
 
   }
 
-  @Deprecated
-  /** Copy all files in a directory to one output file (merge). */
-  public static boolean copyMerge(FileSystem srcFS, Path srcDir,
-  FileSystem dstFS, Path dstFile,
-  boolean deleteSource,
-  Configuration conf, String addString) throws 
IOException {
-dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
-
-if (!srcFS.getFileStatus(srcDir).isDirectory())
-  return false;
-
-OutputStream out = dstFS.create(dstFile);
-
-try {
-  FileStatus contents[] = srcFS.listStatus(srcDir);
-  Arrays.sort(contents);
-  for (int i = 0; i < contents.length; i++) {
-if (contents[i].isFile()) {
-  InputStream in = srcFS.open(contents[i].getPath());
-  try {
-IOUtils.copyBytes(in, out, conf, false);
-if (addString!=null)
-  out.write(addString.getBytes("UTF-8"));
-
-  } finally {
-in.close();
-  }
-}
-  }
-} finally {
-  out.close();
-}
-
-
-if (deleteSource) {
-  return srcFS.delete(srcDir, true);
-} else {
-  return true;
-}
-  }
-
   /** Copy local files to a FileSystem. */
   public static boolean copy(File src,
  FileSystem dstFS, Path dst,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index f7464b7..a9ef5c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -19,11 +19,9 @@ package org.apache.hadoop.fs;
 
 import org.junit.*;
 
-import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.InetAddress;
@@ -49,7 +47,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.tools.tar.TarEntry;
 import org.apache.tools.tar.TarOutputStream;
 
-import javax.print.attribute.URISyntax;
 
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
@@ -526,61 +523,6 @@ public class TestFileUtil {
 validateAndSetWritablePermissions(false, ret);
   }
   
-  @Test (timeout = 3)
-  public void testCopyMergeSingleDirectory() throws IOException {
-setupDirs();
-boolean copyMergeResult = copyMerge("partitioned", "tmp/merged");
-Assert.assertTrue("Expected successful copyMerge result.", 
copyMergeResult);
-File merged = new File(TEST_DIR, "tmp/merged");
-Assert.assertTrue("File tmp/merged must 

[04/50] [abbrv] hadoop git commit: HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. (Contributed by Steve Loughran)

2016-04-07 Thread wangda
HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. 
(Contributed by Steve Loughran)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7280550a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7280550a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7280550a

Branch: refs/heads/YARN-3368
Commit: 7280550a8f668df8aa32e4630db4ead49e9b8b6d
Parents: 89c9347
Author: Arpit Agarwal 
Authored: Mon Apr 4 10:50:11 2016 -0700
Committer: Arpit Agarwal 
Committed: Mon Apr 4 10:50:11 2016 -0700

--
 .../java/org/apache/hadoop/net/NetUtils.java| 15 +--
 .../org/apache/hadoop/net/TestNetUtils.java | 47 +++-
 2 files changed, 38 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 2c3661a..4050107 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -782,12 +782,21 @@ public class NetUtils {
   + ": " + exception
   + ";"
   + see("EOFException"));
+} else if (exception instanceof SocketException) {
+  // Many of the predecessor exceptions are subclasses of SocketException,
+  // so must be handled before this
+  return wrapWithMessage(exception,
+  "Call From "
+  + localHost + " to " + destHost + ":" + destPort
+  + " failed on socket exception: " + exception
+  + ";"
+  + see("SocketException"));
 }
 else {
   return (IOException) new IOException("Failed on local exception: "
-   + exception
-   + "; Host Details : "
-   + 
getHostDetailsAsString(destHost, destPort, localHost))
+ + exception
+ + "; Host Details : "
+ + getHostDetailsAsString(destHost, destPort, localHost))
   .initCause(exception);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index c93ede8..e59ac77 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -72,7 +72,7 @@ public class TestNetUtils {
* This is a regression test for HADOOP-6722.
*/
   @Test
-  public void testAvoidLoopbackTcpSockets() throws Exception {
+  public void testAvoidLoopbackTcpSockets() throws Throwable {
 Configuration conf = new Configuration();
 
 Socket socket = NetUtils.getDefaultSocketFactory(conf)
@@ -88,11 +88,11 @@ public class TestNetUtils {
   fail("Should not have connected");
 } catch (ConnectException ce) {
   System.err.println("Got exception: " + ce);
-  assertTrue(ce.getMessage().contains("resulted in a loopback"));
+  assertInException(ce, "resulted in a loopback");
 } catch (SocketException se) {
   // Some TCP stacks will actually throw their own Invalid argument 
exception
   // here. This is also OK.
-  assertTrue(se.getMessage().contains("Invalid argument"));
+  assertInException(se, "Invalid argument");
 }
   }
   
@@ -188,15 +188,11 @@ public class TestNetUtils {
   }  
 
   @Test
-  public void testVerifyHostnamesNoException() {
+  public void testVerifyHostnamesNoException() throws UnknownHostException {
 String[] names = {"valid.host.com", "1.com"};
-try {
-  NetUtils.verifyHostnames(names);
-} catch (UnknownHostException e) {
-  fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
-}
+NetUtils.verifyHostnames(names);
   }
-  
+
   /** 
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@@ -267,7 +263,18 @@ public class TestNetUtils {
 assertRemoteDetailsIncluded(wrapped);
 assertInException(wrapped, "/EOFException");
   }
-  
+
+  @Test
+  public void 

[38/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-04-07 Thread wangda
YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62923909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62923909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62923909

Branch: refs/heads/YARN-3368
Commit: 62923909d63ec61c4df853b33361875ddaae2df7
Parents: a62637a
Author: Wangda Tan 
Authored: Tue Dec 8 16:37:50 2015 -0800
Committer: Wangda Tan 
Committed: Thu Apr 7 14:52:09 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 +
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 +++
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 +
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 ++
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 +++
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 ++
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 +
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 ++
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  19 ++
 .../app/adapters/cluster-metric.js  |  19 ++
 .../app/adapters/yarn-app-attempt.js|  31 +++
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  25 ++
 .../app/adapters/yarn-container.js  |  42 +++
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  19 ++
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 ++
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 +
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 +
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 +++
 .../app/components/base-chart-component.js  | 109 
 .../app/components/container-table.js   |   4 +
 .../app/components/donut-chart.js   | 148 ++
 .../app/components/item-selector.js |  21 ++
 .../app/components/queue-configuration-table.js |   4 +
 .../app/components/queue-navigator.js   |   4 +
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 +++
 .../app/components/simple-table.js  |  30 ++
 .../app/components/timeline-view.js | 250 +
 .../app/components/tree-selector.js | 257 ++
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/cluster-overview.js |   5 +
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 +
 .../app/controllers/yarn-queue.js   |   6 +
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 ++
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 +
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 +++
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 +
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 +++
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 ++
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 +
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  16 ++
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../app/routes/cluster-overview.js  |  11 +
 .../app/routes/yarn-app-attempt.js  |  21 ++
 .../hadoop-yarn-ui/app/routes/yarn-app.js   |  10 +
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   8 +
 .../hadoop-yarn-ui/app/routes/yarn-queue.js |  20 ++
 .../app/routes/yarn-queues/index.js |   5 +
 .../app/routes/yarn-queues/queues-selector.js   |   7 +
 .../app/serializers/cluster-info.js |  29 ++
 .../app/serializers/cluster-metric.js   |  29 ++
 .../app/serializers/yarn-app-attempt.js |  49 
 .../hadoop-yarn-ui/app/serializers/yarn-app.js  |  66 +
 .../app/serializers/yarn-container.js   |  54 
 .../app/serializers/yarn-queue.js   | 127 +
 .../hadoop-yarn-ui/app/styles/app.css   | 141 ++
 .../app/templates/application.hbs   |  25 ++
 .../app/templates/cluster-overview.hbs  |  56 
 .../app/templates/components/.gitkeep   |   0
 .../templates/components/app-attempt-table.hbs  |  28 ++
 .../app/templates/components/app-table.hbs  |  62 +
 .../templates/components/container-table.hbs|  36 +++
 .../components/queue-configuration-table.hbs|  40 +++
 .../templates/components/queue-navigator.hbs|  18 ++
 .../app/templates/components/timeline-view.hbs  |  35 +++
 .../app/templates/yarn-app-attempt.hbs  |  12 +
 .../hadoop-yarn-ui/app/templates/yarn-app.hbs   | 145 ++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   3 +
 .../hadoop-yarn-ui/app/templates/yarn-queue.hbs |  48 
 .../hadoop-yarn-ui/app/utils/converter.js   |  74 +
 

[27/50] [abbrv] hadoop git commit: HDFS-9945. Datanode command for evicting writers. Contributed by Kihwal Lee

2016-04-07 Thread wangda
HDFS-9945. Datanode command for evicting writers. Contributed by Kihwal Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aede8c10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aede8c10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aede8c10

Branch: refs/heads/YARN-3368
Commit: aede8c10ecad4f2a8802a834e4bd0b8286cebade
Parents: 188f652
Author: Eric Payne 
Authored: Wed Apr 6 20:20:14 2016 +
Committer: Eric Payne 
Committed: Wed Apr 6 20:20:14 2016 +

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  7 +++
 .../ClientDatanodeProtocolTranslatorPB.java | 12 +
 .../src/main/proto/ClientDatanodeProtocol.proto | 10 
 ...tDatanodeProtocolServerSideTranslatorPB.java | 15 ++
 .../hdfs/server/datanode/BlockReceiver.java |  3 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 +++
 .../hdfs/server/datanode/DataXceiver.java   | 48 +++
 .../hdfs/server/datanode/DataXceiverServer.java |  6 +++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 21 +
 .../TestClientProtocolForPipelineRecovery.java  | 49 
 10 files changed, 170 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aede8c10/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 08547c1..e541388 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -122,6 +122,13 @@ public interface ClientDatanodeProtocol {
   void shutdownDatanode(boolean forUpgrade) throws IOException;
 
   /**
+   * Evict clients that are writing to a datanode.
+   *
+   * @throws IOException
+   */
+  void evictWriters() throws IOException;
+
+  /**
* Obtains datanode info
*
* @return software/config version and uptime of the datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aede8c10/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 2fd..6aaa025 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
@@ -97,6 +98,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
   private static final GetBalancerBandwidthRequestProto
   VOID_GET_BALANCER_BANDWIDTH =
   GetBalancerBandwidthRequestProto.newBuilder().build();
+  private final static EvictWritersRequestProto VOID_EVICT_WRITERS =
+  EvictWritersRequestProto.newBuilder().build();
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
@@ -244,6 +247,15 @@ public class ClientDatanodeProtocolTranslatorPB implements
   }
 
   @Override
+  public void evictWriters() throws IOException {
+try {
+  rpcProxy.evictWriters(NULL_CONTROLLER, VOID_EVICT_WRITERS);
+} catch (ServiceException e) {
+  throw ProtobufHelper.getRemoteException(e);
+}
+  }
+
+  @Override
   public DatanodeLocalInfo 

[13/50] [abbrv] hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla

2016-04-07 Thread wangda
YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cbcd4a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cbcd4a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cbcd4a4

Branch: refs/heads/YARN-3368
Commit: 1cbcd4a491e6a57d466c2897335614dc6770b475
Parents: 776b549
Author: Jason Lowe 
Authored: Tue Apr 5 13:40:19 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:40:19 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acee57..66b293f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -648,6 +648,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  /**
* RM proxy users' prefix
*/
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";


[47/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-04-07 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ef482a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
new file mode 100644
index 000..12ad127
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -0,0 +1,230 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd;>
+  
+hadoop-yarn
+org.apache.hadoop
+3.0.0-SNAPSHOT
+  
+  4.0.0
+  org.apache.hadoop
+  hadoop-yarn-ui
+  3.0.0-SNAPSHOT
+  Apache Hadoop YARN UI
+  ${packaging.type}
+
+  
+jar
+src/main/webapp
+node
+v0.12.2
+2.10.0
+false
+  
+
+  
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/main/webapp/node/**/*
+src/main/webapp/node_modules/**/*
+src/main/webapp/bower_components/**/*
+src/main/webapp/.tmp/**/*
+src/main/webapp/dist/**/*
+src/main/webapp/tmp/**/*
+src/main/webapp/.bowerrc
+src/main/webapp/.editorconfig
+src/main/webapp/.ember-cli
+src/main/webapp/.gitignore
+src/main/webapp/.jshintrc
+src/main/webapp/jsconfig.json
+src/main/webapp/tests/.jshintrc
+src/main/webapp/blueprints/.jshintrc
+src/main/webapp/.travis.yml
+src/main/webapp/.watchmanconfig
+src/main/webapp/bower.json
+src/main/webapp/ember-cli-build.js
+src/main/webapp/package.json
+src/main/webapp/testem.json
+src/main/webapp/public/assets/images/*
+src/main/webapp/public/robots.txt
+.jshintrc
+.travis.yml
+.ember-cli
+.editorconfig
+.watchmanconfig
+public/crossdomain.xml
+  
+
+  
+
+  
+ maven-clean-plugin
+ 3.0.0
+ 
+false
+
+   
+  ${basedir}/dist
+   
+   
+  ${basedir}/tmp
+   
+   
+  ${basedir}/src/main/webapp/dist
+   
+   
+  
${basedir}/src/main/webapp/bower_components
+   
+   
+  
${basedir}/src/main/webapp/node_modules
+   
+   
+  ${basedir}/src/main/webapp/tmp
+   
+   
+  ${basedir}/src/main/webapp/node
+   
+
+ 
+  
+
+  
+
+  
+
+  yarn-ui
+
+  
+false
+  
+
+  
+war
+  
+
+  
+
+  
+  
+exec-maven-plugin
+org.codehaus.mojo
+
+  
+generate-sources
+npm install
+
+  exec
+
+
+  ${webappDir}
+  npm
+  
+install
+  
+
+  
+  
+generate-sources
+bower install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+generate-sources
+bower --allow-root install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+ember build
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  ember
+  
+build
+-prod
+  
+
+  
+  
+ember test
+generate-resources
+
+  exec
+
+
+  ${skipTests}
+  ${webappDir}
+  ember
+  
+test
+  
+
+  
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-war-plugin
+
+  

[11/50] [abbrv] hadoop git commit: YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws NPE. Contributed by Sunil G

2016-04-07 Thread wangda
YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws 
NPE. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/552237d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/552237d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/552237d4

Branch: refs/heads/YARN-3368
Commit: 552237d4a34ab10fa5f9ec7aad7942f2a110993e
Parents: 818d6b7
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:25:32 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:26:19 2016 +0530

--
 .../resourcemanager/recovery/TestZKRMStateStorePerf.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/552237d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
index 4b0b06a..bd25def 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
@@ -91,7 +91,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 if (appTokenMgr != null) {
   appTokenMgr.stop();
 }
-curatorTestingServer.stop();
+if (curatorTestingServer != null) {
+  curatorTestingServer.stop();
+}
   }
 
   private void initStore(String hostPort) {
@@ -99,8 +101,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 RMContext rmContext = mock(RMContext.class);
 
 conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_ZK_ADDRESS,
-optHostPort.or(curatorTestingServer.getConnectString()));
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort
+.or((curatorTestingServer == null) ? "" : curatorTestingServer
+.getConnectString()));
 conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
 
 store = new ZKRMStateStore();



[23/50] [abbrv] hadoop git commit: YARN-4906. Capture container start/finish time in container metrics. Contributed by Jian He.

2016-04-07 Thread wangda
YARN-4906. Capture container start/finish time in container metrics. 
Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b41e65e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b41e65e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b41e65e5

Branch: refs/heads/YARN-3368
Commit: b41e65e5bc9459b4d950a2c53860a223f1a0d2ec
Parents: 21eb428
Author: Varun Vasudev 
Authored: Wed Apr 6 13:41:33 2016 +0530
Committer: Varun Vasudev 
Committed: Wed Apr 6 13:41:33 2016 +0530

--
 .../container/ContainerImpl.java| 22 
 .../monitor/ContainerMetrics.java   | 18 
 .../containermanager/TestAuxServices.java   |  2 +-
 .../container/TestContainer.java| 11 ++
 4 files changed, 52 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b41e65e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index da8a3a6..a43a005 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerMetrics;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStartMonitoringEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStopMonitoringEvent;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -100,6 +101,7 @@ public class ContainerImpl implements Container {
   private boolean wasLaunched;
   private long containerLocalizationStartTime;
   private long containerLaunchStartTime;
+  private ContainerMetrics containerMetrics;
   private static Clock clock = SystemClock.getInstance();
 
   /** The NM-wide configuration - not specific to this container */
@@ -147,6 +149,21 @@ public class ContainerImpl implements Container {
 this.readLock = readWriteLock.readLock();
 this.writeLock = readWriteLock.writeLock();
 this.context = context;
+boolean containerMetricsEnabled =
+conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
+YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
+
+if (containerMetricsEnabled) {
+  long flushPeriod =
+  conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
+  YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);
+  long unregisterDelay = conf.getLong(
+  YarnConfiguration.NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS,
+  YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS);
+  containerMetrics = ContainerMetrics
+  .forContainer(containerId, flushPeriod, unregisterDelay);
+  containerMetrics.recordStartTime(clock.getTime());
+}
 
 stateMachine = stateMachineFactory.make(this);
   }
@@ -989,6 +1006,11 @@ public class ContainerImpl implements Container {
 @SuppressWarnings("unchecked")
 public void transition(ContainerImpl container, ContainerEvent event) {
   container.metrics.releaseContainer(container.resource);
+  if (container.containerMetrics != null) {
+container.containerMetrics
+.recordFinishTimeAndExitCode(clock.getTime(), container.exitCode);
+container.containerMetrics.finished();
+  }
   container.sendFinishedEvents();
   //if the current state is NEW it means the CONTAINER_INIT was never 
   

[03/50] [abbrv] hadoop git commit: HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning an empty list. author: Pieter Reuse. - omitted new S3A subclass

2016-04-07 Thread wangda
HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning 
an empty list. author: Pieter Reuse. - omitted new S3A subclass


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89c93475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89c93475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89c93475

Branch: refs/heads/YARN-3368
Commit: 89c93475116ee475645cf81cc80f155f830e61de
Parents: 5092c94
Author: Steve Loughran 
Authored: Mon Apr 4 17:00:35 2016 +0100
Committer: Steve Loughran 
Committed: Mon Apr 4 17:02:04 2016 +0100

--
 .../s3a/TestS3AContractGetFileStatus.java   | 31 
 1 file changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c93475/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
new file mode 100644
index 000..d7b8fe3
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.contract.s3a;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+
+public class TestS3AContractGetFileStatus extends 
AbstractContractGetFileStatusTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+return new S3AContract(conf);
+  }
+
+}



[17/50] [abbrv] hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-07 Thread wangda
HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85ec5573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85ec5573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85ec5573

Branch: refs/heads/YARN-3368
Commit: 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5
Parents: 0005816
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:22:48 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85ec5573/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 7e6c7e3..fb11cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if 

[10/50] [abbrv] hadoop git commit: HDFS-9917. IBR accumulate more objects when SNN was down for sometime. (Contributed by Brahma Reddy Battula)

2016-04-07 Thread wangda
HDFS-9917. IBR accumulate more objects when SNN was down for sometime. 
(Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/818d6b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/818d6b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/818d6b79

Branch: refs/heads/YARN-3368
Commit: 818d6b799eead13a17a0214172df60a269b046fb
Parents: f6b1a81
Author: Vinayakumar B 
Authored: Tue Apr 5 09:49:39 2016 +0800
Committer: Vinayakumar B 
Committed: Tue Apr 5 09:49:39 2016 +0800

--
 .../hdfs/server/datanode/BPServiceActor.java|  5 +
 .../datanode/IncrementalBlockReportManager.java |  9 ++
 .../server/datanode/TestBPOfferService.java | 96 +++-
 3 files changed, 107 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 49f64c2..39f8219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -798,6 +798,11 @@ class BPServiceActor implements Runnable {
   // and re-register
   register(nsInfo);
   scheduler.scheduleHeartbeat();
+  // HDFS-9917,Standby NN IBR can be very huge if standby namenode is down
+  // for sometime.
+  if (state == HAServiceState.STANDBY) {
+ibrManager.clearIBRs();
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
index b9b348a..e95142d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
@@ -258,4 +258,13 @@ class IncrementalBlockReportManager {
   }
 }
   }
+
+  void clearIBRs() {
+pendingIBRs.clear();
+  }
+
+  @VisibleForTesting
+  int getPendingIBRSize() {
+return pendingIBRs.size();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 95a103e..29db702 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -30,6 +30,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -48,10 +49,12 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import 

[29/50] [abbrv] hadoop git commit: MAPREDUCE-6647. MR usage counters use the resources requested instead of the resources allocated (haibochen via rkanter)

2016-04-07 Thread wangda
MAPREDUCE-6647. MR usage counters use the resources requested instead of the 
resources allocated (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be1ab48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be1ab48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be1ab48

Branch: refs/heads/YARN-3368
Commit: 3be1ab485f557c8a3c6a5066453f24d8d61f30be
Parents: 93bacda
Author: Robert Kanter 
Authored: Wed Apr 6 17:15:43 2016 -0700
Committer: Robert Kanter 
Committed: Wed Apr 6 17:15:43 2016 -0700

--
 .../v2/app/job/impl/TaskAttemptImpl.java| 41 
 .../apache/hadoop/mapreduce/v2/app/MRApp.java   | 10 -
 .../hadoop/mapreduce/v2/app/TestRecovery.java   | 29 --
 .../v2/app/job/impl/TestTaskAttempt.java| 37 +++---
 4 files changed, 74 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be1ab48/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 8fff7de..5f0a622 100755
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -1406,29 +1406,36 @@ public abstract class TaskAttemptImpl implements
   
   private static void updateMillisCounters(JobCounterUpdateEvent jce,
   TaskAttemptImpl taskAttempt) {
-TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
+// if container/resource if not allocated, do not update
+if (null == taskAttempt.container ||
+null == taskAttempt.container.getResource()) {
+  return;
+}
 long duration = (taskAttempt.getFinishTime() - 
taskAttempt.getLaunchTime());
-int mbRequired =
-taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
-int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, 
taskType);
-
+Resource allocatedResource = taskAttempt.container.getResource();
+int mbAllocated = allocatedResource.getMemory();
+int vcoresAllocated = allocatedResource.getVirtualCores();
 int minSlotMemSize = taskAttempt.conf.getInt(
-  YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-  YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
-
-int simSlotsRequired =
-minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
-/ minSlotMemSize);
+YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
+int simSlotsAllocated = minSlotMemSize == 0 ? 0 :
+(int) Math.ceil((float) mbAllocated / minSlotMemSize);
 
+TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
 if (taskType == TaskType.MAP) {
-  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * 
duration);
-  jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
-  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * 
vcoresRequired);
+  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS,
+  simSlotsAllocated * duration);
+  jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbAllocated);
+  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS,
+  duration * vcoresAllocated);
   jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
 } else {
-  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * 
duration);
-  jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * 
mbRequired);
-  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * 
vcoresRequired);
+  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES,
+  simSlotsAllocated * duration);
+  jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES,
+  duration * mbAllocated);
+  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES,
+  duration * vcoresAllocated);
   jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
 }
   }


[15/50] [abbrv] hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-07 Thread wangda
HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91746450
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91746450
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91746450

Branch: refs/heads/YARN-3368
Commit: 917464505c0e930ebeb4c775d829e51c56a48686
Parents: 6be28bc
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:07:24 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 31 
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b25..d359282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 41cd5c0..b75ac11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -559,6 +559,37 @@ public class TestDFSShell {
 }
   }
 
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



[14/50] [abbrv] hadoop git commit: HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula.

2016-04-07 Thread aengineer
HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da614ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da614ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da614ca5

Branch: refs/heads/HDFS-7240
Commit: da614ca5dc26562d7ecd5d7c5743fa52c3c17342
Parents: 1e6f929
Author: Akira Ajisaka 
Authored: Mon Apr 4 17:46:56 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Apr 4 17:48:08 2016 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 42 --
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 58 
 2 files changed, 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index b855c48..e2d6ecd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -23,7 +23,6 @@ import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.List;
 import java.util.Map;
@@ -381,47 +380,6 @@ public class FileUtil {
 
   }
 
-  @Deprecated
-  /** Copy all files in a directory to one output file (merge). */
-  public static boolean copyMerge(FileSystem srcFS, Path srcDir,
-  FileSystem dstFS, Path dstFile,
-  boolean deleteSource,
-  Configuration conf, String addString) throws 
IOException {
-dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
-
-if (!srcFS.getFileStatus(srcDir).isDirectory())
-  return false;
-
-OutputStream out = dstFS.create(dstFile);
-
-try {
-  FileStatus contents[] = srcFS.listStatus(srcDir);
-  Arrays.sort(contents);
-  for (int i = 0; i < contents.length; i++) {
-if (contents[i].isFile()) {
-  InputStream in = srcFS.open(contents[i].getPath());
-  try {
-IOUtils.copyBytes(in, out, conf, false);
-if (addString!=null)
-  out.write(addString.getBytes("UTF-8"));
-
-  } finally {
-in.close();
-  }
-}
-  }
-} finally {
-  out.close();
-}
-
-
-if (deleteSource) {
-  return srcFS.delete(srcDir, true);
-} else {
-  return true;
-}
-  }
-
   /** Copy local files to a FileSystem. */
   public static boolean copy(File src,
  FileSystem dstFS, Path dst,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index f7464b7..a9ef5c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -19,11 +19,9 @@ package org.apache.hadoop.fs;
 
 import org.junit.*;
 
-import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.InetAddress;
@@ -49,7 +47,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.tools.tar.TarEntry;
 import org.apache.tools.tar.TarOutputStream;
 
-import javax.print.attribute.URISyntax;
 
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
@@ -526,61 +523,6 @@ public class TestFileUtil {
 validateAndSetWritablePermissions(false, ret);
   }
   
-  @Test (timeout = 3)
-  public void testCopyMergeSingleDirectory() throws IOException {
-setupDirs();
-boolean copyMergeResult = copyMerge("partitioned", "tmp/merged");
-Assert.assertTrue("Expected successful copyMerge result.", 
copyMergeResult);
-File merged = new File(TEST_DIR, "tmp/merged");
-Assert.assertTrue("File tmp/merged must exist after copyMerge.",
-merged.exists());
-BufferedReader rdr = new BufferedReader(new 

[05/50] [abbrv] hadoop git commit: HADOOP-11661. Deprecate FileUtil#copyMerge. Contributed by Brahma Reddy Battula.

2016-04-07 Thread aengineer
HADOOP-11661. Deprecate FileUtil#copyMerge. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8d8b80a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8d8b80a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8d8b80a

Branch: refs/heads/HDFS-7240
Commit: a8d8b80a205c78bf0aa65d32a6a83c16d3ea3f0b
Parents: aac4d65
Author: Akira Ajisaka 
Authored: Fri Apr 1 13:59:14 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 1 13:59:14 2016 +0900

--
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java  | 1 +
 .../src/test/java/org/apache/hadoop/fs/TestFileUtil.java| 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d8b80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index e74c41c..b855c48 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -381,6 +381,7 @@ public class FileUtil {
 
   }
 
+  @Deprecated
   /** Copy all files in a directory to one output file (merge). */
   public static boolean copyMerge(FileSystem srcFS, Path srcDir,
   FileSystem dstFS, Path dstFile,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d8b80a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 5fc0b2d..f7464b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -559,6 +559,7 @@ public class TestFileUtil {
* @return boolean true if the call to FileUtil.copyMerge was successful.
* @throws IOException if an I/O error occurs.
*/
+  @SuppressWarnings("deprecation")
   private boolean copyMerge(String src, String dst)
   throws IOException {
 Configuration conf = new Configuration();



[06/50] [abbrv] hadoop git commit: Revert "YARN-4857. Add missing default configuration regarding preemption of CapacityScheduler. Contributed by Kai Sasaki."

2016-04-07 Thread aengineer
Revert "YARN-4857. Add missing default configuration regarding preemption of 
CapacityScheduler. Contributed by Kai Sasaki."

This reverts commit 0064cba169d1bb761f6e81ee86830be598d7c500.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3488c4f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3488c4f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3488c4f2

Branch: refs/heads/HDFS-7240
Commit: 3488c4f2c9767684eb1007bb00250f474c06d5d8
Parents: a8d8b80
Author: Varun Vasudev 
Authored: Fri Apr 1 12:20:40 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Apr 1 12:20:40 2016 +0530

--
 .../src/main/resources/yarn-default.xml | 58 
 1 file changed, 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3488c4f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cb3c73a..506cf3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -908,64 +908,6 @@
 60
   
 
-  
-
-If true, run the policy but do not affect the cluster with preemption and 
kill events.
-
-yarn.resourcemanager.monitor.capacity.preemption.observe_only
-false
-  
-
-  
-
-Time in milliseconds between invocations of this 
ProportionalCapacityPreemptionPolicy
-policy.
-
-
yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
-3000
-  
-
-  
-
-Time in milliseconds between requesting a preemption from an application 
and killing
-the container.
-
-
yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill
-15000
-  
-
-  
-
-Maximum percentage of resources preempted in a single round. By 
controlling this valueone
-can throttle the pace at which containers are reclaimed from the cluster. 
After computing
-the total desired preemption, the policy scales it back within this limit.
-
-
yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
-0.1
-  
-
-  
-
-Maximum amount of resources above the target capacity ignored for 
preemption.
-This defines a deadzone around the target capacity that helps prevent 
thrashing and
-oscillations around the computed target balance. High values would slow 
the time to capacity
-and (absent natural.completions) it might prevent convergence to 
guaranteed capacity.
-
-
yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity
-0.1
-  
-
-  
-
-Given a computed preemption target, account for containers naturally 
expiring and preempt
-only this percentage of the delta. This determines the rate of geometric 
convergence into
-the deadzone (MAX_IGNORED_OVER_CAPACITY). For example, a termination 
factor of 0.5 will reclaim
-almost 95% of resources within 5 * #WAIT_TIME_BEFORE_KILL, even absent 
natural termination.
-
-
yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
-0.2
-  
-
   
 
   



[27/50] [abbrv] hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula.

2016-04-07 Thread aengineer
YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be28bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be28bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be28bcc

Branch: refs/heads/HDFS-7240
Commit: 6be28bcc461292b24589dae17a235b3eaadc07ed
Parents: 1cbcd4a
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 06:57:54 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d5b64c1..25c558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -603,6 +603,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -611,6 +612,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -620,6 +622,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -27,9 +27,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import 

[01/50] [abbrv] hadoop git commit: YARN-4811. Generate histograms in ContainerMetrics for actual container resource usage

2016-04-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 fedb22d9b -> 3f62ba558


YARN-4811. Generate histograms in ContainerMetrics for actual container 
resource usage


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dd9bcab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dd9bcab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dd9bcab

Branch: refs/heads/HDFS-7240
Commit: 0dd9bcab97ccdf24a2174636604110b74664cf80
Parents: 7a02147
Author: Jian He 
Authored: Thu Mar 31 14:28:13 2016 -0700
Committer: Jian He 
Committed: Thu Mar 31 14:28:13 2016 -0700

--
 .../hadoop/metrics2/lib/MutableQuantiles.java   |  7 +-
 .../hadoop/metrics2/util/QuantileEstimator.java | 32 +
 .../hadoop/metrics2/util/SampleQuantiles.java   |  2 +-
 .../hadoop-yarn-server-nodemanager/pom.xml  |  5 ++
 .../monitor/ContainerMetrics.java   | 69 
 .../monitor/TestContainerMetrics.java   | 58 +++-
 6 files changed, 170 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index 2e6053f..a4711db 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.util.Quantile;
+import org.apache.hadoop.metrics2.util.QuantileEstimator;
 import org.apache.hadoop.metrics2.util.SampleQuantiles;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -54,7 +55,7 @@ public class MutableQuantiles extends MutableMetric {
   private final MetricsInfo[] quantileInfos;
   private final int interval;
 
-  private SampleQuantiles estimator;
+  private QuantileEstimator estimator;
   private long previousCount = 0;
 
   @VisibleForTesting
@@ -134,6 +135,10 @@ public class MutableQuantiles extends MutableMetric {
 return interval;
   }
 
+  public synchronized void setEstimator(QuantileEstimator quantileEstimator) {
+this.estimator = quantileEstimator;
+  }
+
   /**
* Runnable used to periodically roll over the internal
* {@link SampleQuantiles} every interval.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
new file mode 100644
index 000..075b879
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import java.util.Map;
+
+public interface QuantileEstimator {
+
+  void insert(long value);
+
+  Map snapshot();
+
+  long getCount();
+
+  void clear();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
--
diff --git 

[10/50] [abbrv] hadoop git commit: YARN-4657. Javadoc comment is broken for Resources.multiplyByAndAddTo(). (Daniel Templeton via kasha)

2016-04-07 Thread aengineer
YARN-4657. Javadoc comment is broken for Resources.multiplyByAndAddTo(). 
(Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81d04cae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81d04cae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81d04cae

Branch: refs/heads/HDFS-7240
Commit: 81d04cae41182808ace5d86cdac7e4d71871eb1e
Parents: 5686caa
Author: Karthik Kambatla 
Authored: Fri Apr 1 16:19:54 2016 -0700
Committer: Karthik Kambatla 
Committed: Fri Apr 1 16:20:00 2016 -0700

--
 .../main/java/org/apache/hadoop/yarn/util/resource/Resources.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81d04cae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index b05d021..558f96c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -152,7 +152,7 @@ public class Resources {
   }
 
   /**
-   * Multiply @param rhs by @param by, and add the result to @param lhs
+   * Multiply {@code rhs} by {@code by}, and add the result to {@code lhs}
* without creating any new {@link Resource} object
*/
   public static Resource multiplyAndAddTo(



[12/50] [abbrv] hadoop git commit: HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning an empty list. author: Pieter Reuse.

2016-04-07 Thread aengineer
HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning 
an empty list. author: Pieter Reuse.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ecdd4cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ecdd4cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ecdd4cf

Branch: refs/heads/HDFS-7240
Commit: 0ecdd4cffa51e34997321c384496efc249e3d8ff
Parents: 54b2e78
Author: Steve Loughran 
Authored: Sun Apr 3 16:39:14 2016 +0100
Committer: Steve Loughran 
Committed: Sun Apr 3 16:40:19 2016 +0100

--
 .../AbstractContractGetFileStatusTest.java  | 23 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  7 --
 .../src/test/resources/contract/s3a.xml |  5 +
 3 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ecdd4cf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 7ed375e..3e5bb12 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -19,10 +19,14 @@
 package org.apache.hadoop.fs.contract;
 
 import java.io.FileNotFoundException;
+import java.io.IOException;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,4 +62,23 @@ public abstract class AbstractContractGetFileStatusTest 
extends
   handleExpectedException(e);
 }
   }
+
+  @Test
+  public void testListStatusEmptyDirectory() throws IOException {
+// remove the test directory
+FileSystem fs = getFileSystem();
+assertTrue(fs.delete(getContract().getTestPath(), true));
+
+// create a - non-qualified - Path for a subdir
+Path subfolder = 
getContract().getTestPath().suffix("/"+testPath.getName());
+assertTrue(fs.mkdirs(subfolder));
+
+// assert empty ls on the empty dir
+assertEquals("ls on an empty directory not of length 0", 0,
+fs.listStatus(subfolder).length);
+
+// assert non-empty ls on parent dir
+assertTrue("ls on a non-empty directory of length 0",
+fs.listStatus(getContract().getTestPath()).length > 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ecdd4cf/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6afb05d..fe705ce 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -788,11 +788,14 @@ public class S3AFileSystem extends FileSystem {
   ObjectListing objects = s3.listObjects(request);
   statistics.incrementReadOps(1);
 
+  Path fQualified = f.makeQualified(uri, workingDir);
+
   while (true) {
 for (S3ObjectSummary summary : objects.getObjectSummaries()) {
   Path keyPath = keyToPath(summary.getKey()).makeQualified(uri, 
workingDir);
   // Skip over keys that are ourselves and old S3N _$folder$ files
-  if (keyPath.equals(f) || 
summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) {
+  if (keyPath.equals(fQualified) ||
+  summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Ignoring: " + keyPath);
 }
@@ -807,7 +810,7 @@ public class S3AFileSystem extends FileSystem {
   } else {
 result.add(new S3AFileStatus(summary.getSize(),
 dateToLong(summary.getLastModified()), keyPath,
-getDefaultBlockSize(f.makeQualified(uri, workingDir;
+getDefaultBlockSize(fQualified)));
 if (LOG.isDebugEnabled()) {
   LOG.debug("Adding: fi: " + keyPath);
 }


[26/50] [abbrv] hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla

2016-04-07 Thread aengineer
YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cbcd4a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cbcd4a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cbcd4a4

Branch: refs/heads/HDFS-7240
Commit: 1cbcd4a491e6a57d466c2897335614dc6770b475
Parents: 776b549
Author: Jason Lowe 
Authored: Tue Apr 5 13:40:19 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:40:19 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acee57..66b293f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -648,6 +648,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  /**
* RM proxy users' prefix
*/
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";


[09/50] [abbrv] hadoop git commit: Missing file for YARN-4895.

2016-04-07 Thread aengineer
Missing file for YARN-4895.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5686caa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5686caa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5686caa9

Branch: refs/heads/HDFS-7240
Commit: 5686caa9fcb59759c9286385575f31e407a97c16
Parents: 82621e3
Author: Arun Suresh 
Authored: Fri Apr 1 15:58:13 2016 -0700
Committer: Arun Suresh 
Committed: Fri Apr 1 15:58:13 2016 -0700

--
 .../api/records/TestResourceUtilization.java| 63 
 1 file changed, 63 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686caa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
new file mode 100644
index 000..5934846
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestResourceUtilization {
+
+  @Test
+  public void testResourceUtilization() {
+ResourceUtilization u1 = ResourceUtilization.newInstance(10, 20, 0.5f);
+ResourceUtilization u2 = ResourceUtilization.newInstance(u1);
+ResourceUtilization u3 = ResourceUtilization.newInstance(10, 20, 0.5f);
+ResourceUtilization u4 = ResourceUtilization.newInstance(20, 20, 0.5f);
+ResourceUtilization u5 = ResourceUtilization.newInstance(30, 40, 0.8f);
+
+Assert.assertEquals(u1, u2);
+Assert.assertEquals(u1, u3);
+Assert.assertNotEquals(u1, u4);
+Assert.assertNotEquals(u2, u5);
+Assert.assertNotEquals(u4, u5);
+
+Assert.assertTrue(u1.hashCode() == u2.hashCode());
+Assert.assertTrue(u1.hashCode() == u3.hashCode());
+Assert.assertFalse(u1.hashCode() == u4.hashCode());
+Assert.assertFalse(u2.hashCode() == u5.hashCode());
+Assert.assertFalse(u4.hashCode() == u5.hashCode());
+
+Assert.assertTrue(u1.getPhysicalMemory() == 10);
+Assert.assertFalse(u1.getVirtualMemory() == 10);
+Assert.assertTrue(u1.getCPU() == 0.5f);
+
+Assert.assertEquals("", u1.toString());
+
+u1.addTo(10, 0, 0.0f);
+Assert.assertNotEquals(u1, u2);
+Assert.assertEquals(u1, u4);
+u1.addTo(10, 20, 0.3f);
+Assert.assertEquals(u1, u5);
+u1.subtractFrom(10, 20, 0.3f);
+Assert.assertEquals(u1, u4);
+u1.subtractFrom(10, 0, 0.0f);
+Assert.assertEquals(u1, u3);
+  }
+}



[29/50] [abbrv] hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.

2016-04-07 Thread aengineer
YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00058167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00058167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00058167

Branch: refs/heads/HDFS-7240
Commit: 00058167431475c6e63c80207424f1d365569e3a
Parents: 9174645
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 09:01:08 2016 -0700

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00058167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 



[46/50] [abbrv] hadoop git commit: HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary directory in tests (Contributed by Steve Loughran and Vinayakumar B

2016-04-07 Thread aengineer
HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary 
directory in tests (Contributed by Steve Loughran and Vinayakumar B

This closes #89


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d29e245
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d29e245
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d29e245

Branch: refs/heads/HDFS-7240
Commit: 8d29e2451f5ca60f864c7ece16722c0abdd1c657
Parents: 654cd1d
Author: Vinayakumar B 
Authored: Thu Apr 7 10:12:00 2016 +0530
Committer: Vinayakumar B 
Committed: Thu Apr 7 10:15:24 2016 +0530

--
 .../apache/hadoop/conf/TestConfiguration.java   |  8 +-
 .../crypto/TestCryptoStreamsForLocalFS.java |  5 +-
 .../apache/hadoop/crypto/key/TestKeyShell.java  |  5 +-
 .../org/apache/hadoop/fs/FSTestWrapper.java |  3 +-
 .../fs/FileContextMainOperationsBaseTest.java   |  4 +-
 .../apache/hadoop/fs/FileContextTestHelper.java |  6 +-
 .../apache/hadoop/fs/FileContextURIBase.java|  6 +-
 .../apache/hadoop/fs/FileSystemTestHelper.java  |  4 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |  9 +--
 .../hadoop/fs/TestChecksumFileSystem.java   |  5 +-
 .../org/apache/hadoop/fs/TestDFVariations.java  |  2 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |  4 +-
 .../hadoop/fs/TestFileContextResolveAfs.java|  8 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  9 +--
 .../org/apache/hadoop/fs/TestFsShellCopy.java   |  6 +-
 .../apache/hadoop/fs/TestFsShellReturnCode.java |  8 +-
 .../org/apache/hadoop/fs/TestFsShellTouch.java  |  6 +-
 .../hadoop/fs/TestGetFileBlockLocations.java|  5 +-
 .../hadoop/fs/TestHarFileSystemBasics.java  |  5 +-
 .../java/org/apache/hadoop/fs/TestHardLink.java |  5 +-
 .../org/apache/hadoop/fs/TestListFiles.java | 14 ++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  7 +-
 .../fs/TestLocalFileSystemPermission.java   | 18 ++---
 .../java/org/apache/hadoop/fs/TestPath.java |  6 +-
 .../java/org/apache/hadoop/fs/TestTrash.java|  6 +-
 .../apache/hadoop/fs/TestTruncatedInputBug.java |  4 +-
 .../hadoop/fs/sftp/TestSFTPFileSystem.java  |  3 +-
 .../apache/hadoop/fs/shell/TestPathData.java|  5 +-
 .../apache/hadoop/fs/shell/TestTextCommand.java |  4 +-
 .../hadoop/fs/viewfs/TestViewfsFileStatus.java  |  7 +-
 .../apache/hadoop/ha/ClientBaseWithFixes.java   |  4 +-
 .../http/TestAuthenticationSessionCookie.java   |  5 +-
 .../apache/hadoop/http/TestHttpCookieFlag.java  |  5 +-
 .../hadoop/http/TestHttpServerLifecycle.java|  5 +-
 .../apache/hadoop/http/TestSSLHttpServer.java   |  5 +-
 .../org/apache/hadoop/io/TestArrayFile.java |  6 +-
 .../org/apache/hadoop/io/TestBloomMapFile.java  |  6 +-
 .../java/org/apache/hadoop/io/TestMapFile.java  |  6 +-
 .../org/apache/hadoop/io/TestSequenceFile.java  | 48 +--
 .../hadoop/io/TestSequenceFileAppend.java   |  4 +-
 .../io/TestSequenceFileSerialization.java   |  4 +-
 .../apache/hadoop/io/TestSequenceFileSync.java  |  5 +-
 .../java/org/apache/hadoop/io/TestSetFile.java  |  5 +-
 .../apache/hadoop/io/compress/TestCodec.java| 23 +++---
 .../apache/hadoop/io/file/tfile/TestTFile.java  |  4 +-
 .../io/file/tfile/TestTFileByteArrays.java  |  4 +-
 .../io/file/tfile/TestTFileComparator2.java |  4 +-
 .../io/file/tfile/TestTFileComparators.java |  5 +-
 .../hadoop/io/file/tfile/TestTFileSeek.java |  4 +-
 .../file/tfile/TestTFileSeqFileComparison.java  |  5 +-
 .../hadoop/io/file/tfile/TestTFileSplit.java|  4 +-
 .../hadoop/io/file/tfile/TestTFileStreams.java  |  4 +-
 .../file/tfile/TestTFileUnsortedByteArrays.java |  5 +-
 .../apache/hadoop/io/file/tfile/TestVLong.java  |  4 +-
 .../apache/hadoop/io/nativeio/TestNativeIO.java | 10 +--
 .../TestSharedFileDescriptorFactory.java|  4 +-
 .../sink/RollingFileSystemSinkTestBase.java |  6 +-
 .../apache/hadoop/security/TestCredentials.java |  4 +-
 .../hadoop/security/TestLdapGroupsMapping.java  |  7 +-
 .../hadoop/security/alias/TestCredShell.java|  4 +-
 .../alias/TestCredentialProviderFactory.java|  5 +-
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  4 +-
 .../ssl/TestReloadingX509TrustManager.java  |  6 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  6 +-
 .../apache/hadoop/test/GenericTestUtils.java| 84 +++-
 .../java/org/apache/hadoop/util/JarFinder.java  |  4 +-
 .../hadoop/util/TestApplicationClassLoader.java |  4 +-
 .../org/apache/hadoop/util/TestClasspath.java   |  5 +-
 .../hadoop/util/TestGenericOptionsParser.java   |  2 +-
 .../apache/hadoop/util/TestHostsFileReader.java |  4 +-
 .../org/apache/hadoop/util/TestJarFinder.java   |  9 ++-
 .../java/org/apache/hadoop/util/TestRunJar.java |  8 +-
 .../java/org/apache/hadoop/util/TestShell.java  |  4 +-
 

[42/50] [abbrv] hadoop git commit: MAPREDUCE-6647. MR usage counters use the resources requested instead of the resources allocated (haibochen via rkanter)

2016-04-07 Thread aengineer
MAPREDUCE-6647. MR usage counters use the resources requested instead of the 
resources allocated (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3be1ab48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3be1ab48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3be1ab48

Branch: refs/heads/HDFS-7240
Commit: 3be1ab485f557c8a3c6a5066453f24d8d61f30be
Parents: 93bacda
Author: Robert Kanter 
Authored: Wed Apr 6 17:15:43 2016 -0700
Committer: Robert Kanter 
Committed: Wed Apr 6 17:15:43 2016 -0700

--
 .../v2/app/job/impl/TaskAttemptImpl.java| 41 
 .../apache/hadoop/mapreduce/v2/app/MRApp.java   | 10 -
 .../hadoop/mapreduce/v2/app/TestRecovery.java   | 29 --
 .../v2/app/job/impl/TestTaskAttempt.java| 37 +++---
 4 files changed, 74 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3be1ab48/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 8fff7de..5f0a622 100755
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -1406,29 +1406,36 @@ public abstract class TaskAttemptImpl implements
   
   private static void updateMillisCounters(JobCounterUpdateEvent jce,
   TaskAttemptImpl taskAttempt) {
-TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
+// if container/resource if not allocated, do not update
+if (null == taskAttempt.container ||
+null == taskAttempt.container.getResource()) {
+  return;
+}
 long duration = (taskAttempt.getFinishTime() - 
taskAttempt.getLaunchTime());
-int mbRequired =
-taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
-int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, 
taskType);
-
+Resource allocatedResource = taskAttempt.container.getResource();
+int mbAllocated = allocatedResource.getMemory();
+int vcoresAllocated = allocatedResource.getVirtualCores();
 int minSlotMemSize = taskAttempt.conf.getInt(
-  YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-  YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
-
-int simSlotsRequired =
-minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
-/ minSlotMemSize);
+YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
+int simSlotsAllocated = minSlotMemSize == 0 ? 0 :
+(int) Math.ceil((float) mbAllocated / minSlotMemSize);
 
+TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
 if (taskType == TaskType.MAP) {
-  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * 
duration);
-  jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
-  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * 
vcoresRequired);
+  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS,
+  simSlotsAllocated * duration);
+  jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbAllocated);
+  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS,
+  duration * vcoresAllocated);
   jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
 } else {
-  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * 
duration);
-  jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * 
mbRequired);
-  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * 
vcoresRequired);
+  jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES,
+  simSlotsAllocated * duration);
+  jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES,
+  duration * mbAllocated);
+  jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES,
+  duration * vcoresAllocated);
   jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
 }
   }


[18/50] [abbrv] hadoop git commit: HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails (Lin Yiqun via iwasakims)

2016-04-07 Thread aengineer
HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails 
(Lin Yiqun via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/154d2532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/154d2532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/154d2532

Branch: refs/heads/HDFS-7240
Commit: 154d2532cf015e9ab9141864bd3ab0d6100ef597
Parents: 7280550
Author: Masatake Iwasaki 
Authored: Tue Apr 5 03:19:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Tue Apr 5 03:19:48 2016 +0900

--
 .../hdfs/server/namenode/TestDecommissioningStatus.java | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/154d2532/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 789ee6f..1e7312a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -57,8 +57,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -78,8 +78,8 @@ public class TestDecommissioningStatus {
 
   final ArrayList decommissionedNodes = new 
ArrayList(numDatanodes);
   
-  @BeforeClass
-  public static void setUp() throws Exception {
+  @Before
+  public void setUp() throws Exception {
 conf = new HdfsConfiguration();
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
 false);
@@ -113,8 +113,8 @@ public class TestDecommissioningStatus {
 Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
   }
 
-  @AfterClass
-  public static void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
 if (localFileSys != null ) cleanupFile(localFileSys, dir);
 if(fileSys != null) fileSys.close();
 if(cluster != null) cluster.shutdown();



[28/50] [abbrv] hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-07 Thread aengineer
HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91746450
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91746450
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91746450

Branch: refs/heads/HDFS-7240
Commit: 917464505c0e930ebeb4c775d829e51c56a48686
Parents: 6be28bc
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:07:24 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 31 
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b25..d359282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 41cd5c0..b75ac11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -559,6 +559,37 @@ public class TestDFSShell {
 }
   }
 
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



[39/50] [abbrv] hadoop git commit: HDFS-6520. hdfs fsck passes invalid length value when creating BlockReader (Xiao Chen via cmccabe)

2016-04-07 Thread aengineer
HDFS-6520. hdfs fsck passes invalid length value when creating BlockReader 
(Xiao Chen via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/188f6528
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/188f6528
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/188f6528

Branch: refs/heads/HDFS-7240
Commit: 188f65287d5b2f26a8862c88198f83ac59035016
Parents: 221b3a8
Author: Colin Patrick Mccabe 
Authored: Wed Apr 6 11:28:34 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Wed Apr 6 11:28:34 2016 -0700

--
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   4 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   2 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   2 +-
 .../datanode/TestDataNodeVolumeFailure.java |   2 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 122 ++-
 5 files changed, 126 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/188f6528/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 8a0050f..7af4609 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -150,7 +150,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private ClientContext clientContext;
 
   /**
-   * Number of bytes to read.  -1 indicates no limit.
+   * Number of bytes to read. Must be set to a non-negative value.
*/
   private long length = -1;
 
@@ -341,6 +341,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
*/
   public BlockReader build() throws IOException {
 Preconditions.checkNotNull(configuration);
+Preconditions
+.checkState(length >= 0, "Length must be set to a non-negative value");
 BlockReader reader = tryToCreateExternalBlockReader();
 if (reader != null) {
   return reader;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/188f6528/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 291ba56..80f510c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -931,7 +931,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 setBlock(block).
 setBlockToken(lblock.getBlockToken()).
 setStartOffset(0).
-setLength(-1).
+setLength(block.getNumBytes()).
 setVerifyChecksum(true).
 setClientName("fsck").
 setDatanodeInfo(chosenNode).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/188f6528/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 8e65ff6..aa46de2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -158,7 +158,7 @@ public class TestBlockTokenWithDFS {
   setBlockToken(lblock.getBlockToken()).
   setInetSocketAddress(targetAddr).
   setStartOffset(0).
-  setLength(-1).
+  setLength(0).
   setVerifyChecksum(true).
   setClientName("TestBlockTokenWithDFS").
   setDatanodeInfo(nodes[0]).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/188f6528/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

[40/50] [abbrv] hadoop git commit: HDFS-9945. Datanode command for evicting writers. Contributed by Kihwal Lee

2016-04-07 Thread aengineer
HDFS-9945. Datanode command for evicting writers. Contributed by Kihwal Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aede8c10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aede8c10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aede8c10

Branch: refs/heads/HDFS-7240
Commit: aede8c10ecad4f2a8802a834e4bd0b8286cebade
Parents: 188f652
Author: Eric Payne 
Authored: Wed Apr 6 20:20:14 2016 +
Committer: Eric Payne 
Committed: Wed Apr 6 20:20:14 2016 +

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  7 +++
 .../ClientDatanodeProtocolTranslatorPB.java | 12 +
 .../src/main/proto/ClientDatanodeProtocol.proto | 10 
 ...tDatanodeProtocolServerSideTranslatorPB.java | 15 ++
 .../hdfs/server/datanode/BlockReceiver.java |  3 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 +++
 .../hdfs/server/datanode/DataXceiver.java   | 48 +++
 .../hdfs/server/datanode/DataXceiverServer.java |  6 +++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 21 +
 .../TestClientProtocolForPipelineRecovery.java  | 49 
 10 files changed, 170 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aede8c10/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 08547c1..e541388 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -122,6 +122,13 @@ public interface ClientDatanodeProtocol {
   void shutdownDatanode(boolean forUpgrade) throws IOException;
 
   /**
+   * Evict clients that are writing to a datanode.
+   *
+   * @throws IOException
+   */
+  void evictWriters() throws IOException;
+
+  /**
* Obtains datanode info
*
* @return software/config version and uptime of the datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aede8c10/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 2fd..6aaa025 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
@@ -97,6 +98,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
   private static final GetBalancerBandwidthRequestProto
   VOID_GET_BALANCER_BANDWIDTH =
   GetBalancerBandwidthRequestProto.newBuilder().build();
+  private final static EvictWritersRequestProto VOID_EVICT_WRITERS =
+  EvictWritersRequestProto.newBuilder().build();
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
@@ -244,6 +247,15 @@ public class ClientDatanodeProtocolTranslatorPB implements
   }
 
   @Override
+  public void evictWriters() throws IOException {
+try {
+  rpcProxy.evictWriters(NULL_CONTROLLER, VOID_EVICT_WRITERS);
+} catch (ServiceException e) {
+  throw ProtobufHelper.getRemoteException(e);
+}
+  }
+
+  @Override
   public DatanodeLocalInfo 

[32/50] [abbrv] hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-07 Thread aengineer
YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e5a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e5a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e5a59

Branch: refs/heads/HDFS-7240
Commit: 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f
Parents: 3020634
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:00:31 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e5a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index e86c4f9..8c0b8c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queueus*. Existing applications 

[16/50] [abbrv] hadoop git commit: HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning an empty list. author: Pieter Reuse. - omitted new S3A subclass

2016-04-07 Thread aengineer
HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning 
an empty list. author: Pieter Reuse. - omitted new S3A subclass


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89c93475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89c93475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89c93475

Branch: refs/heads/HDFS-7240
Commit: 89c93475116ee475645cf81cc80f155f830e61de
Parents: 5092c94
Author: Steve Loughran 
Authored: Mon Apr 4 17:00:35 2016 +0100
Committer: Steve Loughran 
Committed: Mon Apr 4 17:02:04 2016 +0100

--
 .../s3a/TestS3AContractGetFileStatus.java   | 31 
 1 file changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c93475/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
new file mode 100644
index 000..d7b8fe3
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.contract.s3a;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+
+public class TestS3AContractGetFileStatus extends 
AbstractContractGetFileStatusTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+return new S3AContract(conf);
+  }
+
+}



[37/50] [abbrv] hadoop git commit: MAPREDUCE-6670. TestJobListCache#testEviction sometimes fails on Windows with timeout. Contributed by Gergely Novák.

2016-04-07 Thread aengineer
MAPREDUCE-6670. TestJobListCache#testEviction sometimes fails on Windows with 
timeout. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de96d7c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de96d7c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de96d7c8

Branch: refs/heads/HDFS-7240
Commit: de96d7c88a42cd54bd88ce2de63122998e967efa
Parents: b41e65e
Author: Junping Du 
Authored: Wed Apr 6 08:32:35 2016 -0700
Committer: Junping Du 
Committed: Wed Apr 6 08:32:35 2016 -0700

--
 .../java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de96d7c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
index 6ebbb7c..3ccc222 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
@@ -45,7 +45,7 @@ public class TestJobListCache {
 cache.values().size());
   }
 
-  @Test (timeout = 1000)
+  @Test (timeout = 5000)
   public void testEviction() throws InterruptedException {
 int maxSize = 2;
 JobListCache cache = new JobListCache(maxSize, 1000);



[34/50] [abbrv] hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-07 Thread aengineer
HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ba1e5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ba1e5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ba1e5af

Branch: refs/heads/HDFS-7240
Commit: 9ba1e5af06070ba01dcf46e1a4c66713a1d43352
Parents: 0cd320a
Author: Kihwal Lee 
Authored: Tue Apr 5 16:26:18 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:26:18 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 46 ++--
 1 file changed, 33 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba1e5af/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index ed53512..b8fc30d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -39,6 +44,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -49,22 +57,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-setNNs();
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
 
-cluster.transitionToActive(0);
+setNNs();
+fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



[49/50] [abbrv] hadoop git commit: HADOOP-12909. Change ipc.Client to support asynchronous calls. Contributed by Xiaobing Zhou

2016-04-07 Thread aengineer
HADOOP-12909. Change ipc.Client to support asynchronous calls.  Contributed by  
Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a62637a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a62637a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a62637a4

Branch: refs/heads/HDFS-7240
Commit: a62637a413ad88c4273d3251892b8fc1c05afa34
Parents: 3c18a53
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Apr 7 14:01:33 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Apr 7 14:02:51 2016 +0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  73 +++-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 346 +++
 .../java/org/apache/hadoop/ipc/TestIPC.java |  29 +-
 3 files changed, 436 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a62637a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index fb11cb7..489c354 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -62,6 +62,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,6 +97,7 @@ import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AbstractFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.CodedOutputStream;
 
@@ -107,7 +109,7 @@ import com.google.protobuf.CodedOutputStream;
  */
 @InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", 
"Yarn" })
 @InterfaceStability.Evolving
-public class Client {
+public class Client implements AutoCloseable {
   
   public static final Log LOG = LogFactory.getLog(Client.class);
 
@@ -116,6 +118,20 @@ public class Client {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
+  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
+  private static final ThreadLocal asynchronousMode =
+  new ThreadLocal() {
+@Override
+protected Boolean initialValue() {
+  return false;
+}
+  };
+
+  @SuppressWarnings("unchecked")
+  @Unstable
+  public static  Future getReturnValue() {
+return (Future) returnValue.get();
+  }
 
   /** Set call id and retry count for the next call. */
   public static void setCallIdAndRetryCount(int cid, int rc) {
@@ -1354,8 +1370,8 @@ public class Client {
   ConnectionId remoteId, int serviceClass,
   AtomicBoolean fallbackToSimpleAuth) throws IOException {
 final Call call = createCall(rpcKind, rpcRequest);
-Connection connection = getConnection(remoteId, call, serviceClass,
-  fallbackToSimpleAuth);
+final Connection connection = getConnection(remoteId, call, serviceClass,
+fallbackToSimpleAuth);
 try {
   connection.sendRpcRequest(call); // send the rpc request
 } catch (RejectedExecutionException e) {
@@ -1366,6 +1382,51 @@ public class Client {
   throw new IOException(e);
 }
 
+if (isAsynchronousMode()) {
+  Future returnFuture = new AbstractFuture() {
+@Override
+public Writable get() throws InterruptedException, ExecutionException {
+  try {
+set(getRpcResponse(call, connection));
+  } catch (IOException ie) {
+setException(ie);
+  }
+  return super.get();
+}
+  };
+
+  returnValue.set(returnFuture);
+  return null;
+} else {
+  return getRpcResponse(call, connection);
+}
+  }
+
+  /**
+   * Check if RPC is in asynchronous mode or not.
+   *
+   * @returns true, if RPC is in asynchronous mode, otherwise false for
+   *  synchronous mode.
+   */
+  @Unstable
+  static boolean isAsynchronousMode() {
+return asynchronousMode.get();
+  }
+
+  /**
+   * Set RPC to asynchronous or synchronous mode.
+   *
+   

[24/50] [abbrv] hadoop git commit: YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws NPE. Contributed by Sunil G

2016-04-07 Thread aengineer
YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws 
NPE. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/552237d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/552237d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/552237d4

Branch: refs/heads/HDFS-7240
Commit: 552237d4a34ab10fa5f9ec7aad7942f2a110993e
Parents: 818d6b7
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:25:32 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:26:19 2016 +0530

--
 .../resourcemanager/recovery/TestZKRMStateStorePerf.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/552237d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
index 4b0b06a..bd25def 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
@@ -91,7 +91,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 if (appTokenMgr != null) {
   appTokenMgr.stop();
 }
-curatorTestingServer.stop();
+if (curatorTestingServer != null) {
+  curatorTestingServer.stop();
+}
   }
 
   private void initStore(String hostPort) {
@@ -99,8 +101,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 RMContext rmContext = mock(RMContext.class);
 
 conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_ZK_ADDRESS,
-optHostPort.or(curatorTestingServer.getConnectString()));
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort
+.or((curatorTestingServer == null) ? "" : curatorTestingServer
+.getConnectString()));
 conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
 
 store = new ZKRMStateStore();



[17/50] [abbrv] hadoop git commit: HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. (Contributed by Steve Loughran)

2016-04-07 Thread aengineer
HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. 
(Contributed by Steve Loughran)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7280550a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7280550a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7280550a

Branch: refs/heads/HDFS-7240
Commit: 7280550a8f668df8aa32e4630db4ead49e9b8b6d
Parents: 89c9347
Author: Arpit Agarwal 
Authored: Mon Apr 4 10:50:11 2016 -0700
Committer: Arpit Agarwal 
Committed: Mon Apr 4 10:50:11 2016 -0700

--
 .../java/org/apache/hadoop/net/NetUtils.java| 15 +--
 .../org/apache/hadoop/net/TestNetUtils.java | 47 +++-
 2 files changed, 38 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 2c3661a..4050107 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -782,12 +782,21 @@ public class NetUtils {
   + ": " + exception
   + ";"
   + see("EOFException"));
+} else if (exception instanceof SocketException) {
+  // Many of the predecessor exceptions are subclasses of SocketException,
+  // so must be handled before this
+  return wrapWithMessage(exception,
+  "Call From "
+  + localHost + " to " + destHost + ":" + destPort
+  + " failed on socket exception: " + exception
+  + ";"
+  + see("SocketException"));
 }
 else {
   return (IOException) new IOException("Failed on local exception: "
-   + exception
-   + "; Host Details : "
-   + 
getHostDetailsAsString(destHost, destPort, localHost))
+ + exception
+ + "; Host Details : "
+ + getHostDetailsAsString(destHost, destPort, localHost))
   .initCause(exception);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index c93ede8..e59ac77 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -72,7 +72,7 @@ public class TestNetUtils {
* This is a regression test for HADOOP-6722.
*/
   @Test
-  public void testAvoidLoopbackTcpSockets() throws Exception {
+  public void testAvoidLoopbackTcpSockets() throws Throwable {
 Configuration conf = new Configuration();
 
 Socket socket = NetUtils.getDefaultSocketFactory(conf)
@@ -88,11 +88,11 @@ public class TestNetUtils {
   fail("Should not have connected");
 } catch (ConnectException ce) {
   System.err.println("Got exception: " + ce);
-  assertTrue(ce.getMessage().contains("resulted in a loopback"));
+  assertInException(ce, "resulted in a loopback");
 } catch (SocketException se) {
   // Some TCP stacks will actually throw their own Invalid argument 
exception
   // here. This is also OK.
-  assertTrue(se.getMessage().contains("Invalid argument"));
+  assertInException(se, "Invalid argument");
 }
   }
   
@@ -188,15 +188,11 @@ public class TestNetUtils {
   }  
 
   @Test
-  public void testVerifyHostnamesNoException() {
+  public void testVerifyHostnamesNoException() throws UnknownHostException {
 String[] names = {"valid.host.com", "1.com"};
-try {
-  NetUtils.verifyHostnames(names);
-} catch (UnknownHostException e) {
-  fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
-}
+NetUtils.verifyHostnames(names);
   }
-  
+
   /** 
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@@ -267,7 +263,18 @@ public class TestNetUtils {
 assertRemoteDetailsIncluded(wrapped);
 assertInException(wrapped, "/EOFException");
   }
-  
+
+  @Test
+  public void 

[35/50] [abbrv] hadoop git commit: YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode is used to change label of a node. (Sunil G via wangda)

2016-04-07 Thread aengineer
YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode 
is used to change label of a node. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21eb4284
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21eb4284
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21eb4284

Branch: refs/heads/HDFS-7240
Commit: 21eb4284487d6f8e4beedb8a0c3168e952f224fc
Parents: 9ba1e5a
Author: Wangda Tan 
Authored: Tue Apr 5 16:24:11 2016 -0700
Committer: Wangda Tan 
Committed: Tue Apr 5 16:24:11 2016 -0700

--
 .../scheduler/capacity/AbstractCSQueue.java |  6 +++
 .../scheduler/capacity/CSQueueUtils.java|  2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 40 +++-
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 6e715fb..c7d6d02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -591,6 +591,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.incUsed(nodeLabel, resourceToInc);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.incUsedResource(nodeLabel, resourceToInc, null);
 }
@@ -604,6 +607,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.decUsed(nodeLabel, resourceToDec);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.decUsedResource(nodeLabel, resourceToDec, null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 9cdcb72..0166d83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -180,7 +180,7 @@ class CSQueueUtils {
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
-  private static void updateUsedCapacity(final ResourceCalculator rc,
+  public static void updateUsedCapacity(final ResourceCalculator rc,
   final Resource totalPartitionResource, final Resource minimumAllocation,
   ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
   String nodePartition) {


[31/50] [abbrv] hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-07 Thread aengineer
YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30206346
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30206346
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30206346

Branch: refs/heads/HDFS-7240
Commit: 30206346cf13fe1b7267f86e7c210b77c86b88c9
Parents: 85ec557
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:22 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30206346/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



[23/50] [abbrv] hadoop git commit: HDFS-9917. IBR accumulate more objects when SNN was down for sometime. (Contributed by Brahma Reddy Battula)

2016-04-07 Thread aengineer
HDFS-9917. IBR accumulate more objects when SNN was down for sometime. 
(Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/818d6b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/818d6b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/818d6b79

Branch: refs/heads/HDFS-7240
Commit: 818d6b799eead13a17a0214172df60a269b046fb
Parents: f6b1a81
Author: Vinayakumar B 
Authored: Tue Apr 5 09:49:39 2016 +0800
Committer: Vinayakumar B 
Committed: Tue Apr 5 09:49:39 2016 +0800

--
 .../hdfs/server/datanode/BPServiceActor.java|  5 +
 .../datanode/IncrementalBlockReportManager.java |  9 ++
 .../server/datanode/TestBPOfferService.java | 96 +++-
 3 files changed, 107 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 49f64c2..39f8219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -798,6 +798,11 @@ class BPServiceActor implements Runnable {
   // and re-register
   register(nsInfo);
   scheduler.scheduleHeartbeat();
+  // HDFS-9917,Standby NN IBR can be very huge if standby namenode is down
+  // for sometime.
+  if (state == HAServiceState.STANDBY) {
+ibrManager.clearIBRs();
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
index b9b348a..e95142d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
@@ -258,4 +258,13 @@ class IncrementalBlockReportManager {
   }
 }
   }
+
+  void clearIBRs() {
+pendingIBRs.clear();
+  }
+
+  @VisibleForTesting
+  int getPendingIBRSize() {
+return pendingIBRs.size();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 95a103e..29db702 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -30,6 +30,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -48,10 +49,12 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import 

[13/50] [abbrv] hadoop git commit: YARN-4607. Pagination support for AppAttempt page TotalOutstandingResource Requests table. Contributed by Bibin A Chundatt

2016-04-07 Thread aengineer
YARN-4607. Pagination support for AppAttempt page TotalOutstandingResource 
Requests table. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e6f9297
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e6f9297
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e6f9297

Branch: refs/heads/HDFS-7240
Commit: 1e6f92977dc5431b117745feb5a3491e88a559c0
Parents: 0ecdd4c
Author: Rohith Sharma K S 
Authored: Mon Apr 4 08:09:29 2016 +0530
Committer: Rohith Sharma K S 
Committed: Mon Apr 4 08:09:29 2016 +0530

--
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  6 ++
 .../resourcemanager/webapp/AppAttemptPage.java  |  4 +-
 .../webapp/RMAppAttemptBlock.java   | 61 
 3 files changed, 45 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index a07baa2..3a26ae5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -95,4 +95,10 @@ public class WebPageUtils {
   .append(", 'mRender': parseHadoopID }]").toString();
   }
 
+  public static String resourceRequestsTableInit() {
+return tableInit().append(", 'aaData': resourceRequestsTableData")
+.append(", bDeferRender: true").append(", bProcessing: true}")
+.toString();
+  }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
index df5fb9e..45f1887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -41,8 +41,10 @@ public class AppAttemptPage extends RmView {
   : join("Application Attempt ",
 $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
 
-set(DATATABLES_ID, "containers");
+set(DATATABLES_ID, "containers resourceRequests");
 set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
+set(initID(DATATABLES, "resourceRequests"),
+WebPageUtils.resourceRequestsTableInit());
 setTableStyles(html, "containers", ".queue {width:6em}", ".ui 
{width:8em}");
 
 set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.RM_WEB_UI);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 668269e..6fef367 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 

[02/50] [abbrv] hadoop git commit: YARN-4634. Scheduler UI/Metrics need to consider cases like non-queue label mappings. (Sunil G via wangda)

2016-04-07 Thread aengineer
YARN-4634. Scheduler UI/Metrics need to consider cases like non-queue label 
mappings. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12b11e2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12b11e2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12b11e2e

Branch: refs/heads/HDFS-7240
Commit: 12b11e2e688158404feeb3ded37eb6cccad4ea5c
Parents: 0dd9bca
Author: Wangda Tan 
Authored: Thu Mar 31 14:32:37 2016 -0700
Committer: Wangda Tan 
Committed: Thu Mar 31 14:35:18 2016 -0700

--
 .../webapp/CapacitySchedulerPage.java   | 16 ++--
 1 file changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b11e2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 033152a..5abc250 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -405,8 +405,20 @@ class CapacitySchedulerPage extends RmView {
 CapacitySchedulerInfo sinfo = new CapacitySchedulerInfo(root, cs);
 csqinfo.csinfo = sinfo;
 
-if (null == nodeLabelsInfo || (nodeLabelsInfo.size() == 1
-&& nodeLabelsInfo.get(0).getLabelName().isEmpty())) {
+boolean hasAnyLabelLinkedToNM = false;
+if (null != nodeLabelsInfo) {
+  for (RMNodeLabel label : nodeLabelsInfo) {
+if (label.getLabelName().length() == 0) {
+  // Skip DEFAULT_LABEL
+  continue;
+}
+if (label.getNumActiveNMs() > 0) {
+  hasAnyLabelLinkedToNM = true;
+  break;
+}
+  }
+}
+if (!hasAnyLabelLinkedToNM) {
   used = sinfo.getUsedCapacity() / 100;
   //label is not enabled in the cluster or there's only "default" 
label,
   ul.li().



[11/50] [abbrv] hadoop git commit: HDFS-10253. Fix TestRefreshCallQueue failure (Contributed by Xiaoyu Yao)

2016-04-07 Thread aengineer
HDFS-10253. Fix TestRefreshCallQueue failure (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54b2e78f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54b2e78f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54b2e78f

Branch: refs/heads/HDFS-7240
Commit: 54b2e78fd28c9def42bec7f0418833bad352686c
Parents: 81d04ca
Author: Vinayakumar B 
Authored: Sun Apr 3 13:27:49 2016 +0530
Committer: Vinayakumar B 
Committed: Sun Apr 3 13:27:49 2016 +0530

--
 .../src/test/java/org/apache/hadoop/TestRefreshCallQueue.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54b2e78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
index 1be2752..5cb7def 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
@@ -92,7 +92,7 @@ public class TestRefreshCallQueue {
 
   @SuppressWarnings("serial")
   public static class MockCallQueue extends LinkedBlockingQueue {
-public MockCallQueue(int cap, String ns, Configuration conf) {
+public MockCallQueue(int levels, int cap, String ns, Configuration conf) {
   super(cap);
   mockQueueConstructions++;
 }



[30/50] [abbrv] hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-07 Thread aengineer
HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85ec5573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85ec5573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85ec5573

Branch: refs/heads/HDFS-7240
Commit: 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5
Parents: 0005816
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:22:48 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85ec5573/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 7e6c7e3..fb11cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if 

[36/50] [abbrv] hadoop git commit: YARN-4906. Capture container start/finish time in container metrics. Contributed by Jian He.

2016-04-07 Thread aengineer
YARN-4906. Capture container start/finish time in container metrics. 
Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b41e65e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b41e65e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b41e65e5

Branch: refs/heads/HDFS-7240
Commit: b41e65e5bc9459b4d950a2c53860a223f1a0d2ec
Parents: 21eb428
Author: Varun Vasudev 
Authored: Wed Apr 6 13:41:33 2016 +0530
Committer: Varun Vasudev 
Committed: Wed Apr 6 13:41:33 2016 +0530

--
 .../container/ContainerImpl.java| 22 
 .../monitor/ContainerMetrics.java   | 18 
 .../containermanager/TestAuxServices.java   |  2 +-
 .../container/TestContainer.java| 11 ++
 4 files changed, 52 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b41e65e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index da8a3a6..a43a005 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerMetrics;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStartMonitoringEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStopMonitoringEvent;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -100,6 +101,7 @@ public class ContainerImpl implements Container {
   private boolean wasLaunched;
   private long containerLocalizationStartTime;
   private long containerLaunchStartTime;
+  private ContainerMetrics containerMetrics;
   private static Clock clock = SystemClock.getInstance();
 
   /** The NM-wide configuration - not specific to this container */
@@ -147,6 +149,21 @@ public class ContainerImpl implements Container {
 this.readLock = readWriteLock.readLock();
 this.writeLock = readWriteLock.writeLock();
 this.context = context;
+boolean containerMetricsEnabled =
+conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
+YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
+
+if (containerMetricsEnabled) {
+  long flushPeriod =
+  conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
+  YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);
+  long unregisterDelay = conf.getLong(
+  YarnConfiguration.NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS,
+  YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS);
+  containerMetrics = ContainerMetrics
+  .forContainer(containerId, flushPeriod, unregisterDelay);
+  containerMetrics.recordStartTime(clock.getTime());
+}
 
 stateMachine = stateMachineFactory.make(this);
   }
@@ -989,6 +1006,11 @@ public class ContainerImpl implements Container {
 @SuppressWarnings("unchecked")
 public void transition(ContainerImpl container, ContainerEvent event) {
   container.metrics.releaseContainer(container.resource);
+  if (container.containerMetrics != null) {
+container.containerMetrics
+.recordFinishTimeAndExitCode(clock.getTime(), container.exitCode);
+container.containerMetrics.finished();
+  }
   container.sendFinishedEvents();
   //if the current state is NEW it means the CONTAINER_INIT was never 
   

[07/50] [abbrv] hadoop git commit: HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. Contributed by Aaron Peterson and harsh.

2016-04-07 Thread aengineer
HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. 
Contributed by Aaron Peterson and harsh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/256c82fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/256c82fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/256c82fe

Branch: refs/heads/HDFS-7240
Commit: 256c82fe2981748cd0befc5490d8118d139908f9
Parents: 3488c4f
Author: Harsh J 
Authored: Fri Apr 1 14:18:10 2016 +0530
Committer: Harsh J 
Committed: Fri Apr 1 14:18:10 2016 +0530

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 70 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  7 ++
 2 files changed, 76 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/256c82fe/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 7ab6c79..6afb05d 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
@@ -1128,7 +1129,7 @@ public class S3AFileSystem extends FileSystem {
 }
 
 ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-final ObjectMetadata dstom = srcom.clone();
+ObjectMetadata dstom = cloneObjectMetadata(srcom);
 if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
   dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
 }
@@ -1235,6 +1236,73 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+// This approach may be too brittle, especially if
+// in future there are new attributes added to ObjectMetadata
+// that we do not explicitly call to set here
+ObjectMetadata ret = new ObjectMetadata();
+
+// Non null attributes
+ret.setContentLength(source.getContentLength());
+
+// Possibly null attributes
+// Allowing nulls to pass breaks it during later use
+if (source.getCacheControl() != null) {
+  ret.setCacheControl(source.getCacheControl());
+}
+if (source.getContentDisposition() != null) {
+  ret.setContentDisposition(source.getContentDisposition());
+}
+if (source.getContentEncoding() != null) {
+  ret.setContentEncoding(source.getContentEncoding());
+}
+if (source.getContentMD5() != null) {
+  ret.setContentMD5(source.getContentMD5());
+}
+if (source.getContentType() != null) {
+  ret.setContentType(source.getContentType());
+}
+if (source.getExpirationTime() != null) {
+  ret.setExpirationTime(source.getExpirationTime());
+}
+if (source.getExpirationTimeRuleId() != null) {
+  ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+}
+if (source.getHttpExpiresDate() != null) {
+  ret.setHttpExpiresDate(source.getHttpExpiresDate());
+}
+if (source.getLastModified() != null) {
+  ret.setLastModified(source.getLastModified());
+}
+if (source.getOngoingRestore() != null) {
+  ret.setOngoingRestore(source.getOngoingRestore());
+}
+if (source.getRestoreExpirationTime() != null) {
+  ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+}
+if (source.getSSEAlgorithm() != null) {
+  ret.setSSEAlgorithm(source.getSSEAlgorithm());
+}
+if (source.getSSECustomerAlgorithm() != null) {
+  ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm());
+}
+if (source.getSSECustomerKeyMd5() != null) {
+  ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5());
+}
+
+for (Map.Entry e : source.getUserMetadata().entrySet()) {
+  ret.addUserMetadata(e.getKey(), e.getValue());
+}
+return ret;
+  }
+
+  /**
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link 

[45/50] [abbrv] hadoop git commit: HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary directory in tests (Contributed by Steve Loughran and Vinayakumar B

2016-04-07 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d29e245/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
index 235e5e4..f243b2a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
@@ -29,13 +29,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.file.tfile.TFile.Reader;
 import org.apache.hadoop.io.file.tfile.TFile.Writer;
 import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestTFileUnsortedByteArrays {
-  private static String ROOT =
-  System.getProperty("test.build.data", "/tmp/tfile-test");
-
+  private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath();
 
   private final static int BLOCK_SIZE = 512;
   private final static int BUF_SIZE = 64;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d29e245/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
index 9efd271..69e6eb8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
@@ -29,12 +29,12 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestVLong {
-  private static String ROOT =
-  System.getProperty("test.build.data", "/tmp/tfile-test");
+  private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath();
   private Configuration conf;
   private FileSystem fs;
   private Path path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d29e245/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index 13fdbc1..e6f25dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -61,8 +61,7 @@ import static 
org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
 public class TestNativeIO {
   static final Log LOG = LogFactory.getLog(TestNativeIO.class);
 
-  static final File TEST_DIR = new File(
-System.getProperty("test.build.data"), "testnativeio");
+  static final File TEST_DIR = GenericTestUtils.getTestDir("testnativeio");
 
   @Before
   public void checkLoaded() {
@@ -516,8 +515,7 @@ public class TestNativeIO {
 
   @Test (timeout = 3)
   public void testRenameTo() throws Exception {
-final File TEST_DIR = new File(new File(
-System.getProperty("test.build.data","build/test/data")), 
"renameTest");
+final File TEST_DIR = GenericTestUtils.getTestDir("renameTest") ;
 assumeTrue(TEST_DIR.mkdirs());
 File nonExistentFile = new File(TEST_DIR, "nonexistent");
 File targetFile = new File(TEST_DIR, "target");
@@ -566,9 +564,7 @@ public class TestNativeIO {
   @Test(timeout=1)
   public void testMlock() throws Exception {
 assumeTrue(NativeIO.isAvailable());
-final File TEST_FILE = new File(new File(
-System.getProperty("test.build.data","build/test/data")),
-"testMlockFile");
+final File TEST_FILE = GenericTestUtils.getTestDir("testMlockFile");
 final int BUF_LEN = 12289;
 byte buf[] = new byte[BUF_LEN];
 int bufSum = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d29e245/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
--
diff --git 

[03/50] [abbrv] hadoop git commit: HADOOP-12955. Fix bugs in the initialization of the ISA-L library JNI bindings (Kai Zheng via cmccabe)

2016-04-07 Thread aengineer
HADOOP-12955. Fix bugs in the initialization of the ISA-L library JNI bindings 
(Kai Zheng via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19639785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19639785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19639785

Branch: refs/heads/HDFS-7240
Commit: 19639785f5e9c483558ce585287b9dda9d626263
Parents: 12b11e2
Author: Colin Patrick Mccabe 
Authored: Thu Mar 31 15:09:11 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 31 15:09:11 2016 -0700

--
 .../hadoop/util/NativeLibraryChecker.java   |  8 ++--
 .../hadoop/io/erasurecode/erasure_coder.c   |  1 +
 .../apache/hadoop/io/erasurecode/isal_load.c| 46 +---
 .../apache/hadoop/io/erasurecode/isal_load.h|  6 +--
 .../io/erasurecode/jni_erasure_code_native.c| 11 +++--
 5 files changed, 34 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
index 46f0897..e166bec 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
@@ -95,12 +95,12 @@ public class NativeLibraryChecker {
 snappyLibraryName = SnappyCodec.getLibraryName();
   }
 
-  try {
-isalDetail = ErasureCodeNative.getLoadingFailureReason();
+  isalDetail = ErasureCodeNative.getLoadingFailureReason();
+  if (isalDetail != null) {
+isalLoaded = false;
+  } else {
 isalDetail = ErasureCodeNative.getLibraryName();
 isalLoaded = true;
-  } catch (UnsatisfiedLinkError e) {
-isalLoaded = false;
   }
 
   openSslDetail = OpensslCipher.getLoadingFailureReason();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
index b3479bb..b2d856b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
@@ -19,6 +19,7 @@
 #include "erasure_code.h"
 #include "gf_util.h"
 #include "erasure_coder.h"
+#include "dump.h"
 
 #include 
 #include 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
index 55e8efd..26d8e1a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
@@ -78,6 +78,12 @@ static const char* load_functions() {
 
 void load_erasurecode_lib(char* err, size_t err_len) {
   const char* errMsg;
+  const char* library = NULL;
+#ifdef UNIX
+  Dl_info dl_info;
+#else
+  LPTSTR filename = NULL;
+#endif
 
   err[0] = '\0';
 
@@ -111,38 +117,28 @@ void load_erasurecode_lib(char* err, size_t err_len) {
   if (errMsg != NULL) {
 snprintf(err, err_len, "Loading functions from ISA-L failed: %s", errMsg);
   }
-}
 
-int build_support_erasurecode() {
-#ifdef HADOOP_ISAL_LIBRARY
-  return 1;
-#else
-  return 0;
-#endif
-}
-
-const char* get_library_name() {
 #ifdef UNIX
-  Dl_info dl_info;
-
-  if (isaLoader->ec_encode_data == NULL) {
-return HADOOP_ISAL_LIBRARY;
-  }
-
   if(dladdr(isaLoader->ec_encode_data, _info)) {
-return dl_info.dli_fname;
+library = dl_info.dli_fname;
   }
 #else
-  LPTSTR filename = NULL;
-
-  if (isaLoader->libec == NULL) {
-return HADOOP_ISAL_LIBRARY;
-  }
-
   if (GetModuleFileName(isaLoader->libec, filename, 256) > 0) 

[33/50] [abbrv] hadoop git commit: HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by Brahma Reddy Battula.

2016-04-07 Thread aengineer
HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd320a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd320a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd320a8

Branch: refs/heads/HDFS-7240
Commit: 0cd320a8463efe19a6228f9fe14693aa37ac8a10
Parents: 500e5a5
Author: Ravi Prakash 
Authored: Tue Apr 5 13:41:19 2016 -0700
Committer: Ravi Prakash 
Committed: Tue Apr 5 13:41:19 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd320a8/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 3d9ca42..a9c3304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -310,7 +310,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-{#helper_relative_time 
value="{lastContact}"/}
+{lastContact}s
 
   
 {capacity|fmt_bytes}



[48/50] [abbrv] hadoop git commit: HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. Contributed by Kai Zheng.

2016-04-07 Thread aengineer
HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c18a53c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c18a53c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c18a53c

Branch: refs/heads/HDFS-7240
Commit: 3c18a53cbd2efabb2ad108d63a0b0b558424115f
Parents: 8d29e24
Author: Uma Maheswara Rao G 
Authored: Wed Apr 6 22:50:24 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Wed Apr 6 22:50:24 2016 -0700

--
 .../hadoop/hdfs/util/StripedBlockUtil.java  |   22 +-
 .../erasurecode/ErasureCodingWorker.java| 1016 +-
 .../erasurecode/StripedBlockReader.java |  202 
 .../erasurecode/StripedBlockWriter.java |  196 
 .../datanode/erasurecode/StripedReader.java |  466 
 .../erasurecode/StripedReconstructor.java   |  273 +
 .../datanode/erasurecode/StripedWriter.java |  313 ++
 .../datanode/erasurecode/package-info.java  |   26 +
 .../hadoop/hdfs/TestReconstructStripedFile.java |   11 +-
 9 files changed, 1555 insertions(+), 970 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 0819376..c8827d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -141,6 +141,12 @@ public class StripedBlockUtil {
 return locatedBlock;
   }
 
+  public static ExtendedBlock constructInternalBlock(
+  ExtendedBlock blockGroup, ErasureCodingPolicy ecPolicy,
+  int idxInBlockGroup) {
+return constructInternalBlock(blockGroup, ecPolicy.getCellSize(),
+ecPolicy.getNumDataUnits(), idxInBlockGroup);
+  }
   /**
* This method creates an internal {@link ExtendedBlock} at the given index
* of a block group.
@@ -154,21 +160,28 @@ public class StripedBlockUtil {
 return block;
   }
 
+  public static long getInternalBlockLength(long dataSize,
+ErasureCodingPolicy ecPolicy,
+int idxInBlockGroup) {
+return getInternalBlockLength(dataSize, ecPolicy.getCellSize(),
+ecPolicy.getNumDataUnits(), idxInBlockGroup);
+  }
+
   /**
* Get the size of an internal block at the given index of a block group
*
* @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
* @param numDataBlocks The number of data blocks
-   * @param i The logical index in the striped block group
+   * @param idxInBlockGroup The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
   public static long getInternalBlockLength(long dataSize,
-  int cellSize, int numDataBlocks, int i) {
+  int cellSize, int numDataBlocks, int idxInBlockGroup) {
 Preconditions.checkArgument(dataSize >= 0);
 Preconditions.checkArgument(cellSize > 0);
 Preconditions.checkArgument(numDataBlocks > 0);
-Preconditions.checkArgument(i >= 0);
+Preconditions.checkArgument(idxInBlockGroup >= 0);
 // Size of each stripe (only counting data blocks)
 final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
@@ -180,7 +193,8 @@ public class StripedBlockUtil {
 
 final int numStripes = (int) ((dataSize - 1) / stripeSize + 1);
 return (numStripes - 1L)*cellSize
-+ lastCellSize(lastStripeDataLen, cellSize, numDataBlocks, i);
++ lastCellSize(lastStripeDataLen, cellSize,
+numDataBlocks, idxInBlockGroup);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 4bcb291..e7c5abc 100644
--- 

[19/50] [abbrv] hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-07 Thread aengineer
HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d1fb0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d1fb0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d1fb0c

Branch: refs/heads/HDFS-7240
Commit: a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584
Parents: 154d253
Author: Kihwal Lee 
Authored: Mon Apr 4 16:39:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:40:00 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e4ee02..fb0c1c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -574,6 +574,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 773a64c..398935d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -300,11 +300,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownstream() throws IOException {}
+
   public void noRegistration() throws IOException { }
 }


[43/50] [abbrv] hadoop git commit: HDFS-10267. Extra "synchronized" on FsDatasetImpl#recoverAppend and FsDatasetImpl#recoverClose

2016-04-07 Thread aengineer
HDFS-10267. Extra "synchronized" on FsDatasetImpl#recoverAppend and 
FsDatasetImpl#recoverClose


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bd7cbc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bd7cbc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bd7cbc2

Branch: refs/heads/HDFS-7240
Commit: 4bd7cbc29d142fc56324156333b9a8a7d7b68042
Parents: 3be1ab4
Author: Colin Patrick Mccabe 
Authored: Wed Apr 6 12:36:54 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Wed Apr 6 21:07:31 2016 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java | 234 ++-
 2 files changed, 180 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bd7cbc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 240345c..7e4e8eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1268,7 +1268,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   }
 
   @Override  // FsDatasetSpi
-  public synchronized ReplicaHandler recoverAppend(
+  public ReplicaHandler recoverAppend(
   ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
 LOG.info("Recover failed append to " + b);
 
@@ -1301,7 +1301,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   }
 
   @Override // FsDatasetSpi
-  public synchronized Replica recoverClose(ExtendedBlock b, long newGS,
+  public Replica recoverClose(ExtendedBlock b, long newGS,
   long expectedBlockLen) throws IOException {
 LOG.info("Recover failed close " + b);
 while (true) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bd7cbc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 751089f..42e80fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -44,8 +44,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 import com.google.common.collect.Iterators;
 import org.apache.commons.logging.Log;
@@ -90,6 +92,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
@@ -161,7 +164,7 @@ public class TestBlockRecovery {
   }
 
   private final long
-  TEST_LOCK_HOG_DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS = 10L;
+  TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS = 10L;
 
   /**
* Starts an instance of DataNode
@@ -175,11 +178,10 @@ public class TestBlockRecovery {
 conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
-if (currentTestName.getMethodName().equals(
-"testInitReplicaRecoveryDoesNotHogLock")) {
+if (currentTestName.getMethodName().contains("DoesNotHoldLock")) {
   // This test requires a very long value for the xceiver stop timeout.
   conf.setLong(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
-  TEST_LOCK_HOG_DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS);
+  TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS);
 }
 

[41/50] [abbrv] hadoop git commit: YARN-4769. Add support for CSRF header in the dump capacity scheduler logs and kill app buttons in RM web UI. Contributed by Varun Vasudev

2016-04-07 Thread aengineer
YARN-4769. Add support for CSRF header in the dump capacity scheduler logs and 
kill app buttons in RM web UI. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93bacda0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93bacda0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93bacda0

Branch: refs/heads/HDFS-7240
Commit: 93bacda08bc546612f9278b31f5c38107867630a
Parents: aede8c1
Author: Jian He 
Authored: Wed Apr 6 16:13:47 2016 -0700
Committer: Jian He 
Committed: Wed Apr 6 16:13:47 2016 -0700

--
 .../security/http/RestCsrfPreventionFilter.java |  2 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 20 
 .../webapp/CapacitySchedulerPage.java   |  2 ++
 3 files changed, 23 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93bacda0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
index c0f7e39..33579b4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java
@@ -62,7 +62,7 @@ public class RestCsrfPreventionFilter implements Filter {
   public static final String CUSTOM_METHODS_TO_IGNORE_PARAM =
   "methods-to-ignore";
   static final String  BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*";
-  static final String HEADER_DEFAULT = "X-XSRF-HEADER";
+  public static final String HEADER_DEFAULT = "X-XSRF-HEADER";
   static final String  METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE";
   private String  headerName = HEADER_DEFAULT;
   private Set methodsToIgnore = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93bacda0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 44ed223..69beef2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -24,12 +24,14 @@ import static 
org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
+import java.util.Map;
 
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -143,6 +145,7 @@ public class AppBlock extends HtmlBlock {
   .append(" type: 'PUT',")
   .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
   .append(" contentType: 'application/json',")
+  .append(getCSRFHeaderString(conf))
   .append(" data: '{\"state\":\"KILLED\"}',")
   .append(" dataType: 'json'")
   .append(" }).done(function(data){")
@@ -369,4 +372,21 @@ public class AppBlock extends HtmlBlock {
   protected LogAggregationStatus getLogAggregationStatus() {
 return null;
   }
+
+  public static String getCSRFHeaderString(Configuration conf) {
+String ret = "";
+if (conf.getBoolean(YarnConfiguration.RM_CSRF_ENABLED, false)) {
+  ret = " headers : { '";
+  Map filterParams = RestCsrfPreventionFilter
+  .getFilterParams(conf, YarnConfiguration.RM_CSRF_PREFIX);
+  if (filterParams
+  .containsKey(RestCsrfPreventionFilter.CUSTOM_HEADER_PARAM)) {
+ret += filterParams.get(RestCsrfPreventionFilter.CUSTOM_HEADER_PARAM);
+

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-04-07 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f62ba55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f62ba55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f62ba55

Branch: refs/heads/HDFS-7240
Commit: 3f62ba558d33a3d385359e04a445a54441d6d53b
Parents: fedb22d a62637a
Author: Anu Engineer 
Authored: Thu Apr 7 14:43:39 2016 -0700
Committer: Anu Engineer 
Committed: Thu Apr 7 14:43:39 2016 -0700

--
 BUILDING.txt|4 +-
 .../server/AuthenticationFilter.java|   33 +-
 .../hadoop-common/src/main/bin/hadoop   |   12 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |6 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|   37 +-
 .../src/main/bin/hadoop-layout.sh.example   |   14 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |4 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |4 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |   10 +-
 .../org/apache/hadoop/conf/Configuration.java   |   13 +
 .../hadoop/fs/CommonConfigurationKeys.java  |   14 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   41 -
 .../org/apache/hadoop/fs/PathIOException.java   |9 +
 .../hadoop/fs/shell/CommandWithDestination.java |3 +-
 .../apache/hadoop/fs/shell/MoveCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |3 +-
 .../org/apache/hadoop/ipc/CallQueueManager.java |  134 ++-
 .../main/java/org/apache/hadoop/ipc/Client.java |  130 ++-
 .../apache/hadoop/ipc/DecayRpcScheduler.java|  396 +--
 .../hadoop/ipc/DecayRpcSchedulerMXBean.java |2 +
 .../apache/hadoop/ipc/DefaultRpcScheduler.java  |   45 +
 .../org/apache/hadoop/ipc/FairCallQueue.java|   45 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|8 +-
 .../org/apache/hadoop/ipc/RpcScheduler.java |8 +-
 .../java/org/apache/hadoop/ipc/Schedulable.java |5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   77 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java|   45 +-
 .../hadoop/metrics2/lib/MutableQuantiles.java   |7 +-
 .../hadoop/metrics2/util/QuantileEstimator.java |   32 +
 .../hadoop/metrics2/util/SampleQuantiles.java   |2 +-
 .../java/org/apache/hadoop/net/NetUtils.java|   15 +-
 .../apache/hadoop/security/SecurityUtil.java|   53 +-
 .../security/http/RestCsrfPreventionFilter.java |2 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |   42 +-
 .../org/apache/hadoop/tracing/TraceUtils.java   |4 +-
 .../hadoop/util/NativeLibraryChecker.java   |8 +-
 .../apache/hadoop/util/ShutdownHookManager.java |  116 +-
 .../hadoop/io/compress/lz4/Lz4Compressor.c  |4 +-
 .../hadoop/io/erasurecode/erasure_coder.c   |1 +
 .../apache/hadoop/io/erasurecode/isal_load.c|   46 +-
 .../apache/hadoop/io/erasurecode/isal_load.h|6 +-
 .../io/erasurecode/jni_erasure_code_native.c|   11 +-
 .../src/main/resources/core-default.xml |9 +-
 .../src/site/markdown/ClusterSetup.md   |   40 +-
 .../src/site/markdown/CommandsManual.md |2 +-
 .../src/site/markdown/UnixShellGuide.md |2 +-
 .../apache/hadoop/conf/TestConfiguration.java   |8 +-
 .../crypto/TestCryptoStreamsForLocalFS.java |5 +-
 .../apache/hadoop/crypto/key/TestKeyShell.java  |5 +-
 .../org/apache/hadoop/fs/FSTestWrapper.java |3 +-
 .../fs/FileContextMainOperationsBaseTest.java   |4 +-
 .../apache/hadoop/fs/FileContextTestHelper.java |6 +-
 .../apache/hadoop/fs/FileContextURIBase.java|6 +-
 .../apache/hadoop/fs/FileSystemTestHelper.java  |4 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |9 +-
 .../hadoop/fs/TestChecksumFileSystem.java   |5 +-
 .../org/apache/hadoop/fs/TestDFVariations.java  |2 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |4 +-
 .../hadoop/fs/TestFileContextResolveAfs.java|8 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |   66 +-
 .../org/apache/hadoop/fs/TestFsShellCopy.java   |   56 +-
 .../apache/hadoop/fs/TestFsShellReturnCode.java |8 +-
 .../org/apache/hadoop/fs/TestFsShellTouch.java  |   88 ++
 .../hadoop/fs/TestGetFileBlockLocations.java|5 +-
 .../hadoop/fs/TestHarFileSystemBasics.java  |5 +-
 .../java/org/apache/hadoop/fs/TestHardLink.java |5 +-
 .../org/apache/hadoop/fs/TestListFiles.java |   14 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |7 +-
 .../fs/TestLocalFileSystemPermission.java   |   18 +-
 .../java/org/apache/hadoop/fs/TestPath.java |6 +-
 

[25/50] [abbrv] hadoop git commit: YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin A Chundatt

2016-04-07 Thread aengineer
YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin 
A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/776b549e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/776b549e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/776b549e

Branch: refs/heads/HDFS-7240
Commit: 776b549e2ac20a68a5513cbcaac0edc33233dc03
Parents: 552237d
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:47:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:47:25 2016 +0530

--
 .../resourcemanager/webapp/NodesPage.java   | 53 +---
 .../resourcemanager/webapp/TestNodesPage.java   | 37 --
 2 files changed, 45 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/776b549e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 9603468..7063421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -101,6 +100,7 @@ class NodesPage extends RmView {
   LOG.debug("Unexpected state filter for inactive RM node");
 }
   }
+  StringBuilder nodeTableData = new StringBuilder("[\n");
   for (RMNode ni : rmNodes) {
 if (stateFilter != null) {
   NodeState state = ni.getState();
@@ -129,27 +129,40 @@ class NodesPage extends RmView {
 NodeInfo info = new NodeInfo(ni, sched);
 int usedMemory = (int) info.getUsedMemory();
 int availableMemory = (int) info.getAvailableMemory();
-TR> row =
-tbody.tr().td(StringUtils.join(",", info.getNodeLabels()))
-.td(info.getRack()).td(info.getState()).td(info.getNodeId());
+nodeTableData.append("[\"")
+.append(StringUtils.join(",", 
info.getNodeLabels())).append("\",\"")
+.append(info.getRack()).append("\",\"").append(info.getState())
+.append("\",\"").append(info.getNodeId());
 if (isInactive) {
-  row.td()._("N/A")._();
+  nodeTableData.append("\",\"").append("N/A").append("\",\"");
 } else {
   String httpAddress = info.getNodeHTTPAddress();
-  row.td().a("//" + httpAddress, httpAddress)._();
+  nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
 }
-row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._()
-._(Times.format(info.getLastHealthUpdate()))._()
-.td(info.getHealthReport())
-.td(String.valueOf(info.getNumContainers())).td().br()
-.$title(String.valueOf(usedMemory))._()
-._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br()
-.$title(String.valueOf(availableMemory))._()
-._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._()
-.td(String.valueOf(info.getUsedVirtualCores()))
-.td(String.valueOf(info.getAvailableVirtualCores()))
-.td(ni.getNodeManagerVersion())._();
+nodeTableData.append("")
+

[38/50] [abbrv] hadoop git commit: HDFS-10192. Namenode safemode not coming out during failover. Contributed by Brahma Reddy Battula.

2016-04-07 Thread aengineer
HDFS-10192. Namenode safemode not coming out during failover. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/221b3a87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/221b3a87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/221b3a87

Branch: refs/heads/HDFS-7240
Commit: 221b3a8722f84f8e9ad0a98eea38a12cc4ad2f24
Parents: de96d7c
Author: Jing Zhao 
Authored: Wed Apr 6 10:42:59 2016 -0700
Committer: Jing Zhao 
Committed: Wed Apr 6 10:42:59 2016 -0700

--
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  1 +
 .../TestBlockManagerSafeMode.java   | 14 +++-
 .../hdfs/server/namenode/ha/TestHASafeMode.java | 35 
 4 files changed, 50 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 66ab789..104d723 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1974,7 +1974,7 @@ public class BlockManager implements BlockStatsMXBean {
 return bmSafeMode.leaveSafeMode(force);
   }
 
-  void checkSafeMode() {
+  public void checkSafeMode() {
 bmSafeMode.checkSafeMode();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9ff4be6..681fc96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1154,6 +1154,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 } finally {
   startingActiveService = false;
+  blockManager.checkSafeMode();
   writeUnlock();
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/221b3a87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index cb749c7..a347669 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -66,6 +66,7 @@ public class TestBlockManagerSafeMode {
   private static final long BLOCK_THRESHOLD = (long)(BLOCK_TOTAL * THRESHOLD);
   private static final int EXTENSION = 1000; // 1 second
 
+  private FSNamesystem fsn;
   private BlockManager bm;
   private DatanodeManager dn;
   private BlockManagerSafeMode bmSafeMode;
@@ -90,7 +91,7 @@ public class TestBlockManagerSafeMode {
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
 DATANODE_NUM);
 
-FSNamesystem fsn = mock(FSNamesystem.class);
+fsn = mock(FSNamesystem.class);
 doReturn(true).when(fsn).hasWriteLock();
 doReturn(true).when(fsn).hasReadLock();
 doReturn(true).when(fsn).isRunning();
@@ -163,6 +164,17 @@ public class TestBlockManagerSafeMode {
 setBlockSafe(BLOCK_THRESHOLD);
 bmSafeMode.checkSafeMode();
 assertEquals(BMSafeModeStatus.EXTENSION, getSafeModeStatus());
+
+// should stay in PENDING_THRESHOLD during transitionToActive
+doReturn(true).when(fsn).inTransitionToActive();
+Whitebox.setInternalState(bmSafeMode, "extension", 0);
+setSafeModeStatus(BMSafeModeStatus.PENDING_THRESHOLD);
+setBlockSafe(BLOCK_THRESHOLD);
+bmSafeMode.checkSafeMode();
+

[04/50] [abbrv] hadoop git commit: HADOOP-12950. ShutdownHookManager should have a timeout for each of the Registered shutdown hook. Contributed by Xiaoyu Yao.

2016-04-07 Thread aengineer
HADOOP-12950. ShutdownHookManager should have a timeout for each of the 
Registered shutdown hook. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aac4d65b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aac4d65b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aac4d65b

Branch: refs/heads/HDFS-7240
Commit: aac4d65bf9c6d68f53610e5fe9997a391e3fa053
Parents: 1963978
Author: Xiaoyu Yao 
Authored: Thu Mar 31 15:20:09 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Mar 31 15:22:24 2016 -0700

--
 .../apache/hadoop/util/ShutdownHookManager.java | 116 +++
 .../hadoop/util/TestShutdownHookManager.java|  57 -
 2 files changed, 150 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac4d65b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 843454b..33f942f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.util;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -26,6 +28,10 @@ import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
@@ -42,7 +48,12 @@ public class ShutdownHookManager {
   private static final ShutdownHookManager MGR = new ShutdownHookManager();
 
   private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
+  private static final long TIMEOUT_DEFAULT = 10;
+  private static final TimeUnit TIME_UNIT_DEFAULT = TimeUnit.SECONDS;
 
+  private static final ExecutorService EXECUTOR =
+  HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder()
+  .setDaemon(true).build());
   static {
 try {
   Runtime.getRuntime().addShutdownHook(
@@ -50,14 +61,33 @@ public class ShutdownHookManager {
   @Override
   public void run() {
 MGR.shutdownInProgress.set(true);
-for (Runnable hook: MGR.getShutdownHooksInOrder()) {
+for (HookEntry entry: MGR.getShutdownHooksInOrder()) {
+  Future future = EXECUTOR.submit(entry.getHook());
   try {
-hook.run();
+future.get(entry.getTimeout(), entry.getTimeUnit());
+  } catch (TimeoutException ex) {
+future.cancel(true);
+LOG.warn("ShutdownHook '" + entry.getHook().getClass().
+getSimpleName() + "' timeout, " + ex.toString(), ex);
   } catch (Throwable ex) {
-LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() +
- "' failed, " + ex.toString(), ex);
+LOG.warn("ShutdownHook '" + entry.getHook().getClass().
+getSimpleName() + "' failed, " + ex.toString(), ex);
   }
 }
+try {
+  EXECUTOR.shutdown();
+  if (!EXECUTOR.awaitTermination(TIMEOUT_DEFAULT,
+  TIME_UNIT_DEFAULT)) {
+LOG.error("ShutdownHookManger shutdown forcefully.");
+EXECUTOR.shutdownNow();
+  }
+  LOG.info("ShutdownHookManger complete shutdown.");
+} catch (InterruptedException ex) {
+  LOG.error("ShutdownHookManger interrupted while waiting for " +
+  "termination.", ex);
+  EXECUTOR.shutdownNow();
+  Thread.currentThread().interrupt();
+}
   }
 }
   );
@@ -77,15 +107,24 @@ public class ShutdownHookManager {
   }
 
   /**
-   * Private structure to store ShutdownHook and its priority.
+   * Private structure to store ShutdownHook, its priority and timeout
+   * settings.
*/
-  private static class HookEntry {
-Runnable hook;
-int priority;
+  static class HookEntry {
+private final 

[20/50] [abbrv] hadoop git commit: YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira AJISAKA via gtcarrera9)

2016-04-07 Thread aengineer
YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira 
AJISAKA via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61de417
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61de417
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61de417

Branch: refs/heads/HDFS-7240
Commit: f61de4173684aa1767cef20b3cb4d54df20273cd
Parents: a7d1fb0
Author: Li Lu 
Authored: Mon Apr 4 14:39:47 2016 -0700
Committer: Li Lu 
Committed: Mon Apr 4 14:40:27 2016 -0700

--
 .../hadoop-yarn-site/src/site/markdown/TimelineServer.md   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61de417/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 9283e58..f20bd2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -56,6 +56,7 @@ With the introduction of the timeline server, the Application 
History Server bec
 the Timeline Server.
 
 Generic information includes application level data such as 
+
 * queue-name, 
 * user information and the like set in the `ApplicationSubmissionContext`,
 * a list of application-attempts that ran for an application
@@ -192,6 +193,7 @@ selected if this policy is `HTTPS_ONLY`.
  UI Hosting Configuration
 
 The timeline service can host multiple UIs if enabled. The service can support 
both static web sites hosted in a directory or war files bundled. The web UI is 
then hosted on the timeline service HTTP port under the path configured.
+
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.ui-names` | Comma separated list of UIs that will be 
hosted. Defaults to `none`. |



[44/50] [abbrv] hadoop git commit: HDFS-10186. DirectoryScanner: Improve logs by adding full path of both actual and expected block directories. Contributed by Rakesh R

2016-04-07 Thread aengineer
HDFS-10186. DirectoryScanner: Improve logs by adding full path of both actual 
and expected block directories.  Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/654cd1d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/654cd1d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/654cd1d0

Branch: refs/heads/HDFS-7240
Commit: 654cd1d0c0427c23e73804fc9d87208f76bbf6aa
Parents: 4bd7cbc
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Apr 7 12:36:29 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Apr 7 12:38:42 2016 +0800

--
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/654cd1d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 083ca31..0e51cec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -902,8 +902,7 @@ public class DirectoryScanner implements Runnable {
 break;
   }
 }
-verifyFileLocation(blockFile.getParentFile(), bpFinalizedDir,
-blockId);
+verifyFileLocation(blockFile, bpFinalizedDir, blockId);
 report.add(new ScanInfo(blockId, blockFile, metaFile, vol));
   }
   return report;
@@ -913,12 +912,15 @@ public class DirectoryScanner implements Runnable {
  * Verify whether the actual directory location of block file has the
  * expected directory path computed using its block ID.
  */
-private void verifyFileLocation(File actualBlockDir,
+private void verifyFileLocation(File actualBlockFile,
 File bpFinalizedDir, long blockId) {
   File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-  if (actualBlockDir.compareTo(blockDir) != 0) {
+  if (actualBlockFile.getParentFile().compareTo(blockDir) != 0) {
+File expBlockFile = new File(blockDir, actualBlockFile.getName());
 LOG.warn("Block: " + blockId
-+ " has to be upgraded to block ID-based layout");
++ " has to be upgraded to block ID-based layout. "
++ "Actual block file path: " + actualBlockFile
++ ", expected block file path: " + expBlockFile);
   }
 }
 



[47/50] [abbrv] hadoop git commit: HDFS-9719. Refactoring ErasureCodingWorker into smaller reusable constructs. Contributed by Kai Zheng.

2016-04-07 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c18a53c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
new file mode 100644
index 000..a0a5f83
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.erasurecode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.DataChecksum;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.BitSet;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+
+/**
+ * StripedReconstructor reconstruct one or more missed striped block in the
+ * striped block group, the minimum number of live striped blocks should be
+ * no less than data block number.
+ *
+ * | <- Striped Block Group -> |
+ *  blk_0  blk_1   blk_2(*)   blk_3   ...   <- A striped block group
+ *|  |   |  |
+ *v  v   v  v
+ * +--+   +--+   +--+   +--+
+ * |cell_0|   |cell_1|   |cell_2|   |cell_3|  ...
+ * +--+   +--+   +--+   +--+
+ * |cell_4|   |cell_5|   |cell_6|   |cell_7|  ...
+ * +--+   +--+   +--+   +--+
+ * |cell_8|   |cell_9|   |cell10|   |cell11|  ...
+ * +--+   +--+   +--+   +--+
+ *  ... ...   ... ...
+ *
+ *
+ * We use following steps to reconstruct striped block group, in each round, we
+ * reconstruct bufferSize data until finish, the
+ * bufferSize is configurable and may be less or larger than
+ * cell size:
+ * step1: read bufferSize data from minimum number of sources
+ *required by reconstruction.
+ * step2: decode data for targets.
+ * step3: transfer data to targets.
+ *
+ * In step1, try to read bufferSize data from minimum number
+ * of sources , if there is corrupt or stale sources, read from new source
+ * will be scheduled. The best sources are remembered for next round and
+ * may be updated in each round.
+ *
+ * In step2, typically if source blocks we read are all data blocks, we
+ * need to call encode, and if there is one parity block, we need to call
+ * decode. Notice we only read once and reconstruct all missed striped block
+ * if they are more than one.
+ *
+ * In step3, send the reconstructed data to targets by constructing packet
+ * and send them directly. Same as continuous block replication, we
+ * don't check the packet ack. Since the datanode doing the reconstruction work
+ * are one of the source datanodes, so the reconstructed data are sent
+ * remotely.
+ *
+ * There are some points we can do further improvements in next phase:
+ * 1. we can read the block file directly on the local datanode,
+ *currently we use remote block reader. (Notice short-circuit is not
+ *a good choice, see inline comments).
+ * 2. We need to check the packet ack for EC reconstruction? Since EC
+ *reconstruction is more expensive than 

[08/50] [abbrv] hadoop git commit: YARN-4895. Add subtractFrom method to ResourceUtilization class. Contributed by Konstantinos Karanasos.

2016-04-07 Thread aengineer
YARN-4895. Add subtractFrom method to ResourceUtilization class. Contributed by 
Konstantinos Karanasos.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82621e38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82621e38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82621e38

Branch: refs/heads/HDFS-7240
Commit: 82621e38a0445832998bc00693279e23a98605c1
Parents: 256c82f
Author: Arun Suresh 
Authored: Fri Apr 1 14:57:06 2016 -0700
Committer: Arun Suresh 
Committed: Fri Apr 1 14:57:06 2016 -0700

--
 .../yarn/api/records/ResourceUtilization.java   | 22 
 1 file changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82621e38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
index 5f52f85..2ae4872 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
@@ -44,6 +44,14 @@ public abstract class ResourceUtilization implements
 return utilization;
   }
 
+  @Public
+  @Unstable
+  public static ResourceUtilization newInstance(
+  ResourceUtilization resourceUtil) {
+return newInstance(resourceUtil.getPhysicalMemory(),
+resourceUtil.getVirtualMemory(), resourceUtil.getCPU());
+  }
+
   /**
* Get used virtual memory.
*
@@ -147,4 +155,18 @@ public abstract class ResourceUtilization implements
 this.setVirtualMemory(this.getVirtualMemory() + vmem);
 this.setCPU(this.getCPU() + cpu);
   }
+
+  /**
+   * Subtract utilization from the current one.
+   * @param pmem Physical memory to be subtracted.
+   * @param vmem Virtual memory to be subtracted.
+   * @param cpu CPU utilization to be subtracted.
+   */
+  @Public
+  @Unstable
+  public void subtractFrom(int pmem, int vmem, float cpu) {
+this.setPhysicalMemory(this.getPhysicalMemory() - pmem);
+this.setVirtualMemory(this.getVirtualMemory() - vmem);
+this.setCPU(this.getCPU() - cpu);
+  }
 }
\ No newline at end of file



[15/50] [abbrv] hadoop git commit: YARN-4746. yarn web services should convert parse failures of appId, appAttemptId and containerId to 400. Contributed by Bibin A Chundatt

2016-04-07 Thread aengineer
YARN-4746. yarn web services should convert parse failures of appId, 
appAttemptId and containerId to 400. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5092c941
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5092c941
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5092c941

Branch: refs/heads/HDFS-7240
Commit: 5092c94195a63bd2c3e36d5a74b4c061cea1b847
Parents: da614ca
Author: naganarasimha 
Authored: Mon Apr 4 16:25:03 2016 +0530
Committer: naganarasimha 
Committed: Mon Apr 4 16:25:03 2016 +0530

--
 .../apache/hadoop/yarn/util/ConverterUtils.java | 16 --
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 22 ++
 .../hadoop/yarn/server/webapp/WebServices.java  | 22 +++---
 .../nodemanager/webapp/NMWebServices.java   |  6 ++--
 .../webapp/TestNMWebServicesApps.java   |  9 --
 .../resourcemanager/webapp/RMWebServices.java   | 32 ++--
 .../webapp/TestRMWebServicesApps.java   | 24 +--
 .../TestRMWebServicesAppsModification.java  | 10 --
 8 files changed, 87 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index e9674cf..acd29fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -122,8 +122,20 @@ public class ConverterUtils {
   public static ApplicationId toApplicationId(RecordFactory recordFactory,
   String appIdStr) {
 Iterator it = _split(appIdStr).iterator();
-it.next(); // prefix. TODO: Validate application prefix
-return toApplicationId(recordFactory, it);
+if (!it.next().equals(APPLICATION_PREFIX)) {
+  throw new IllegalArgumentException("Invalid ApplicationId prefix: "
+  + appIdStr + ". The valid ApplicationId should start with prefix "
+  + APPLICATION_PREFIX);
+}
+try {
+  return toApplicationId(recordFactory, it);
+} catch (NumberFormatException n) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  n);
+} catch (NoSuchElementException e) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  e);
+}
   }
 
   private static ApplicationId toApplicationId(RecordFactory recordFactory,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f8e67ee..faf4a77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -33,9 +33,14 @@ import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RMHAUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
 
 @Private
 @Evolving
@@ -378,4 +383,21 @@ public class WebAppUtils {
 }
 return password;
   }
+
+  public static ApplicationId parseApplicationId(RecordFactory recordFactory,
+  String appId) {
+if (appId == null || appId.isEmpty()) {
+  throw new NotFoundException("appId, " + appId + ", is empty or null");
+}
+ApplicationId aid = null;
+try {
+  aid = 

[21/50] [abbrv] hadoop git commit: HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via cmccabe)

2016-04-07 Thread aengineer
HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via 
cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f65f5b18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f65f5b18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f65f5b18

Branch: refs/heads/HDFS-7240
Commit: f65f5b18fd4647e868b8d2a2c035a3b64dc16aa8
Parents: f61de41
Author: Colin Patrick Mccabe 
Authored: Mon Apr 4 16:30:32 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 4 16:30:32 2016 -0700

--
 BUILDING.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f65f5b18/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 408cae1..c7a91da 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -75,6 +75,7 @@ Optional packages:
   $ sudo apt-get install snappy libsnappy-dev
 * Intel ISA-L library for erasure coding
   Please refer to 
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+  (OR https://github.com/01org/isa-l)
 * Bzip2
   $ sudo apt-get install bzip2 libbz2-dev
 * Jansson (C Library for JSON)
@@ -188,11 +189,12 @@ Maven build goals:
 
  Intel ISA-L build options:
 
-   Intel ISA-L is a erasure coding library that can be utilized by the native 
code.
+   Intel ISA-L is an erasure coding library that can be utilized by the native 
code.
It is currently an optional component, meaning that Hadoop can be built with
or without this dependency. Note the library is used via dynamic module. 
Please
reference the official site for the library details.
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+   (OR https://github.com/01org/isa-l)
 
   * Use -Drequire.isal to fail the build if libisal.so is not found.
 If this option is not specified and the isal library is missing,



[2/2] hadoop git commit: HDFS-10268. Ozone: end-to-end integration for create/get volumes, buckets and keys. Contributed by Chris Nauroth.

2016-04-07 Thread aengineer
HDFS-10268. Ozone: end-to-end integration for create/get volumes, buckets and 
keys. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fedb22d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fedb22d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fedb22d9

Branch: refs/heads/HDFS-7240
Commit: fedb22d9b642da94f6cd3fb79239924708ec34eb
Parents: b3044db
Author: Anu Engineer 
Authored: Thu Apr 7 14:38:54 2016 -0700
Committer: Anu Engineer 
Committed: Thu Apr 7 14:38:54 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  25 +-
 .../server/datanode/ObjectStoreHandler.java |  60 -
 .../org/apache/hadoop/ozone/OzoneConsts.java|   2 +
 .../container/common/helpers/ChunkUtils.java|   3 +-
 .../ozone/container/common/impl/Dispatcher.java |   6 +-
 .../common/transport/client/XceiverClient.java  |  14 +-
 .../transport/client/XceiverClientHandler.java  |   2 +-
 .../transport/client/XceiverClientManager.java  |  83 ++
 .../common/transport/client/package-info.java   |  24 ++
 .../common/transport/server/package-info.java   |  24 ++
 .../ozone/storage/StorageContainerManager.java  | 110 +++-
 .../hadoop/ozone/web/client/OzoneBucket.java|   9 +-
 .../ozone/web/exceptions/OzoneException.java|   2 +-
 .../hadoop/ozone/web/request/OzoneQuota.java|  10 +
 .../ozone/web/storage/ChunkInputStream.java | 193 ++
 .../ozone/web/storage/ChunkOutputStream.java| 193 ++
 .../web/storage/ContainerProtocolCalls.java | 198 ++
 .../web/storage/DistributedStorageHandler.java  | 266 +++
 .../web/storage/OzoneContainerTranslation.java  | 261 ++
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  71 +++--
 .../ozone/web/TestOzoneRestWithMiniCluster.java | 253 ++
 21 files changed, 1695 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedb22d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index a5d5015..ff71653 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1846,16 +1846,6 @@ public class DataNode extends ReconfigurableBase
   public void shutdown() {
 stopMetricsLogger();
 
-if(this.ozoneEnabled) {
-  if(ozoneServer != null) {
-try {
-  ozoneServer.stop();
-} catch (Exception e) {
-  LOG.error("Error is ozone shutdown. ex {}", e.toString());
-}
-  }
-}
-
 if (plugins != null) {
   for (ServicePlugin p : plugins) {
 try {
@@ -1914,6 +1904,21 @@ public class DataNode extends ReconfigurableBase
   }
 }
 
+// Stop the object store handler
+if (this.objectStoreHandler != null) {
+  this.objectStoreHandler.close();
+}
+
+if(this.ozoneEnabled) {
+  if(ozoneServer != null) {
+try {
+  ozoneServer.stop();
+} catch (Exception e) {
+  LOG.error("Error is ozone shutdown. ex {}", e.toString());
+}
+  }
+}
+
 if (pauseMonitor != null) {
   pauseMonitor.stop();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedb22d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 6413ac0..b8c6a13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -17,36 +17,58 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static 
com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_DEFAULT;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY;
 import 

[1/2] hadoop git commit: HDFS-10268. Ozone: end-to-end integration for create/get volumes, buckets and keys. Contributed by Chris Nauroth.

2016-04-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b3044db40 -> fedb22d9b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedb22d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 218058c..da6d9c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -23,6 +23,10 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_KE
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URISyntaxException;
+import java.util.Random;
+
+import com.google.common.base.Preconditions;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,6 +41,8 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerLocationProtocolPB;
 import 
org.apache.hadoop.ozone.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.storage.StorageContainerManager;
+import org.apache.hadoop.ozone.web.client.OzoneClient;
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 
@@ -53,6 +59,8 @@ public class MiniOzoneCluster extends MiniDFSCluster 
implements Closeable {
   private static final Logger LOG =
   LoggerFactory.getLogger(MiniOzoneCluster.class);
 
+  private static final String USER_AUTH = "hdfs";
+
   private final OzoneConfiguration conf;
   private final StorageContainerManager scm;
 
@@ -126,24 +134,26 @@ public class MiniOzoneCluster extends MiniDFSCluster 
implements Closeable {
   }
 
   /**
-   * Waits for the Ozone cluster to be ready for processing requests.
+   * Creates an {@link OzoneClient} connected to this cluster's REST service.
+   * Callers take ownership of the client and must close it when done.
+   *
+   * @return OzoneClient connected to this cluster's REST service
+   * @throws OzoneException if Ozone encounters an error creating the client
*/
-  public void waitOzoneReady() {
-long begin = Time.monotonicNow();
-while (scm.getDatanodeReport(DatanodeReportType.LIVE).length <
-numDataNodes) {
-  if (Time.monotonicNow() - begin > 2) {
-throw new IllegalStateException(
-"Timed out waiting for Ozone cluster to become ready.");
-  }
-  LOG.info("Waiting for Ozone cluster to become ready");
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-Thread.currentThread().interrupt();
-throw new IllegalStateException(
-"Interrupted while waiting for Ozone cluster to become ready.");
-  }
+  public OzoneClient createOzoneClient() throws OzoneException {
+Preconditions.checkState(!getDataNodes().isEmpty(),
+"Cannot create OzoneClient if the cluster has no DataNodes.");
+// An Ozone request may originate at any DataNode, so pick one at random.
+int dnIndex = new Random().nextInt(getDataNodes().size());
+String uri = String.format("http://127.0.0.1:%d;,
+getDataNodes().get(dnIndex).getInfoPort());
+LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}",
+dnIndex, uri, USER_AUTH);
+try {
+  return new OzoneClient(uri, USER_AUTH);
+} catch (URISyntaxException e) {
+  // We control the REST service URI, so it should never be invalid.
+  throw new IllegalStateException("Unexpected URISyntaxException", e);
 }
   }
 
@@ -155,14 +165,39 @@ public class MiniOzoneCluster extends MiniDFSCluster 
implements Closeable {
* @return RPC proxy for accessing container location information
* @throws IOException if there is an I/O error
*/
-  protected StorageContainerLocationProtocolClientSideTranslatorPB
+  public StorageContainerLocationProtocolClientSideTranslatorPB
   createStorageContainerLocationClient() throws IOException {
 long version = RPC.getProtocolVersion(
 StorageContainerLocationProtocolPB.class);
 InetSocketAddress address = scm.getStorageContainerLocationRpcAddress();
+LOG.info(
+"Creating StorageContainerLocationProtocol RPC client with address {}",
+address);
 return new StorageContainerLocationProtocolClientSideTranslatorPB(
 RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
 address, UserGroupInformation.getCurrentUser(), conf,
 NetUtils.getDefaultSocketFactory(conf), Client.getTimeout(conf)));
   }
+
+  /**
+   * Waits for the Ozone cluster to be ready for processing 

hadoop git commit: YARN-3461. Consolidate flow name/version/run defaults. (Sangjin Lee via Varun Saxena)

2016-04-07 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 f746c80b3 -> fb0acd08e


YARN-3461. Consolidate flow name/version/run defaults. (Sangjin Lee via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb0acd08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb0acd08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb0acd08

Branch: refs/heads/YARN-2928
Commit: fb0acd08e6f0b030d82eeb7cbfa5404376313e60
Parents: f746c80
Author: Varun Saxena 
Authored: Thu Apr 7 22:10:11 2016 +0530
Committer: Varun Saxena 
Committed: Thu Apr 7 22:10:11 2016 +0530

--
 .../mapred/TestMRTimelineEventHandling.java | 46 +++---
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../distributedshell/TestDistributedShell.java  | 18 --
 .../yarn/util/timeline/TimelineUtils.java   |  8 ++-
 .../resourcemanager/amlauncher/AMLauncher.java  | 67 +++-
 .../RMTimelineCollectorManager.java | 36 +--
 .../TestSystemMetricsPublisherForV2.java| 20 +++---
 .../collector/AppLevelTimelineCollector.java| 11 +---
 .../collector/NodeTimelineCollectorManager.java | 12 
 .../collector/TimelineCollectorContext.java |  5 +-
 10 files changed, 150 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb0acd08/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index a9bbdf5..0481b35 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -20,15 +20,12 @@ package org.apache.hadoop.mapred;
 
 import java.io.File;
 import java.io.IOException;
-
 import java.util.EnumSet;
 import java.util.List;
-import java.util.Set;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -38,9 +35,9 @@ import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
@@ -48,7 +45,6 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import 
org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
-
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -204,7 +200,7 @@ public class TestMRTimelineEventHandling {
   ApplicationReport appReport = apps.get(0);
   firstAppId = appReport.getApplicationId();
 
-  checkNewTimelineEvent(firstAppId);
+  checkNewTimelineEvent(firstAppId, appReport);
 
   LOG.info("Run 2nd job which should be failed.");
   job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
@@ -213,11 +209,10 @@ public class TestMRTimelineEventHandling {
   
   apps = yarnClient.getApplications(appStates);
   Assert.assertEquals(apps.size(), 2);
-  
-  ApplicationId secAppId = null;
-  secAppId = apps.get(0).getApplicationId() == firstAppId ? 
-  apps.get(1).getApplicationId() : apps.get(0).getApplicationId();
-  checkNewTimelineEvent(firstAppId);
+
+  appReport = apps.get(0).getApplicationId().equals(firstAppId) ?
+  apps.get(0) : apps.get(1);
+  checkNewTimelineEvent(firstAppId, appReport);
 
 } finally {
   if 

  1   2   >