[2/2] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

2018-04-30 Thread aengineer
HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0c3dc4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0c3dc4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0c3dc4c

Branch: refs/heads/trunk
Commit: f0c3dc4cf40575497ca6f29c037e43fa50e0ffdd
Parents: 2d319e3
Author: Anu Engineer 
Authored: Mon Apr 30 21:41:10 2018 -0700
Committer: Anu Engineer 
Committed: Mon Apr 30 21:41:10 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/SCMMXBean.java   |   50 -
 .../org/apache/hadoop/hdds/scm/SCMStorage.java  |   73 -
 .../hdds/scm/StorageContainerManager.java   | 1290 --
 .../scm/StorageContainerManagerHttpServer.java  |   76 --
 .../hadoop/hdds/scm/node/SCMNodeManager.java|5 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java |  222 +++
 .../scm/server/SCMClientProtocolServer.java |  314 +
 .../scm/server/SCMDatanodeProtocolServer.java   |  350 +
 .../hadoop/hdds/scm/server/SCMMXBean.java   |   50 +
 .../hadoop/hdds/scm/server/SCMStorage.java  |   73 +
 .../scm/server/StorageContainerManager.java |  722 ++
 .../StorageContainerManagerHttpServer.java  |   77 ++
 .../hadoop/hdds/scm/server/package-info.java|   22 +
 .../TestStorageContainerManagerHttpServer.java  |7 +-
 hadoop-ozone/common/src/main/bin/ozone  |2 +-
 .../container/TestContainerStateManager.java|   29 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   34 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   10 +-
 .../ozone/TestStorageContainerManager.java  |   20 +-
 .../TestStorageContainerManagerHelper.java  |2 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |2 +-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |8 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |   13 +-
 .../apache/hadoop/ozone/scm/TestSCMMXBean.java  |2 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |   16 +-
 25 files changed, 1912 insertions(+), 1557 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
deleted file mode 100644
index 17b6814..000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-import java.util.Map;
-
-/**
- *
- * This is the JMX management interface for scm information.
- */
-@InterfaceAudience.Private
-public interface SCMMXBean extends ServiceRuntimeInfo {
-
-  /**
-   * Get the SCM RPC server port that used to listen to datanode requests.
-   * @return SCM datanode RPC server port
-   */
-  String getDatanodeRpcPort();
-
-  /**
-   * Get the SCM RPC server port that used to listen to client requests.
-   * @return SCM client RPC server port
-   */
-  String getClientRpcPort();
-
-  /**
-   * Get container report info that includes container IO stats of nodes.
-   * @return The datanodeUUid to report json string mapping
-   */
-  Map getContainerReport();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java
deleted file mode 100644
index 27e9363..000
--- 
a/hadoop-hdds/server-scm/src/mai

[1/2] hadoop git commit: HDDS-13. Refactor StorageContainerManager into seperate RPC endpoints. Contributed by Anu Engineer.

2018-04-30 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d319e379 -> f0c3dc4cf


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0c3dc4c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
new file mode 100644
index 000..e42b887
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+
+
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.versionCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.registeredCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.sendContainerReport;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.deleteBlocksCommand;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
+
+
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
+import org.apache.hadoop.ozone.protocolPB
+.StorageContainerDatanodeProtocolServerSideTranslatorPB

hadoop git commit: YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.

2018-04-30 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0e31e014f -> f12c78120


YARN-8212. Pending backlog for async allocation threads should be configurable. 
Contributed by Tao Yang.

(Cherry picked from commit 2d319e37937c1e20c6a7dc4477ef88defd1f8464)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f12c7812
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f12c7812
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f12c7812

Branch: refs/heads/branch-3.0
Commit: f12c78120edf8679407a43857f5caa310bc556ca
Parents: 0e31e01
Author: Weiwei Yang 
Authored: Tue May 1 09:47:10 2018 +0800
Committer: Weiwei Yang 
Committed: Tue May 1 10:19:08 2018 +0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 9 -
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 8 
 2 files changed, 16 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f12c7812/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 5a29e5d..3587f30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -245,6 +245,7 @@ public class CapacityScheduler extends
   CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
   + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
+  private long asyncMaxPendingBacklogs;
 
   public CapacityScheduler() {
 super(CapacityScheduler.class.getName());
@@ -354,6 +355,11 @@ public class CapacityScheduler extends
   asyncSchedulerThreads.add(new AsyncScheduleThread(this));
 }
 resourceCommitterService = new ResourceCommitterService(this);
+asyncMaxPendingBacklogs = this.conf.getInt(
+CapacitySchedulerConfiguration.
+SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS,
+CapacitySchedulerConfiguration.
+DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS);
   }
 
   // Setup how many containers we can allocate for each round
@@ -541,7 +547,8 @@ public class CapacityScheduler extends
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
-if (cs.getAsyncSchedulingPendingBacklogs() > 100) {
+if (cs.getAsyncSchedulingPendingBacklogs()
+> cs.asyncMaxPendingBacklogs) {
   Thread.sleep(1);
 } else{
   schedule(cs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f12c7812/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 1e22e0b..c05b948 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -240,6 +240,14 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-threads";
 
   @Private
+  public static final St

hadoop git commit: YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.

2018-04-30 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 0fc55a8f5 -> f34f4e9c1


YARN-8212. Pending backlog for async allocation threads should be configurable. 
Contributed by Tao Yang.

(cherry picked from commit 2d319e37937c1e20c6a7dc4477ef88defd1f8464)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f34f4e9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f34f4e9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f34f4e9c

Branch: refs/heads/branch-3.1
Commit: f34f4e9c117a4b4bf72c9c603c61d442e2b714fc
Parents: 0fc55a8
Author: Weiwei Yang 
Authored: Tue May 1 09:47:10 2018 +0800
Committer: Weiwei Yang 
Committed: Tue May 1 09:57:15 2018 +0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 9 -
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 8 
 2 files changed, 16 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f34f4e9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 05ab318..e6e31a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -260,6 +260,7 @@ public class CapacityScheduler extends
   CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
   + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
+  private long asyncMaxPendingBacklogs;
 
   public CapacityScheduler() {
 super(CapacityScheduler.class.getName());
@@ -377,6 +378,11 @@ public class CapacityScheduler extends
   asyncSchedulerThreads.add(new AsyncScheduleThread(this));
 }
 resourceCommitterService = new ResourceCommitterService(this);
+asyncMaxPendingBacklogs = this.conf.getInt(
+CapacitySchedulerConfiguration.
+SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS,
+CapacitySchedulerConfiguration.
+DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS);
   }
 
   // Setup how many containers we can allocate for each round
@@ -571,7 +577,8 @@ public class CapacityScheduler extends
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
-if (cs.getAsyncSchedulingPendingBacklogs() > 100) {
+if (cs.getAsyncSchedulingPendingBacklogs()
+> cs.asyncMaxPendingBacklogs) {
   Thread.sleep(1);
 } else{
   schedule(cs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f34f4e9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 37ca016..1fda5a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -245,6 +245,14 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-threads";
 
   @Private
+  public static final St

hadoop git commit: YARN-8212. Pending backlog for async allocation threads should be configurable. Contributed by Tao Yang.

2018-04-30 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk a966ec6e2 -> 2d319e379


YARN-8212. Pending backlog for async allocation threads should be configurable. 
Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d319e37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d319e37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d319e37

Branch: refs/heads/trunk
Commit: 2d319e37937c1e20c6a7dc4477ef88defd1f8464
Parents: a966ec6
Author: Weiwei Yang 
Authored: Tue May 1 09:47:10 2018 +0800
Committer: Weiwei Yang 
Committed: Tue May 1 09:47:10 2018 +0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 9 -
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 8 
 2 files changed, 16 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 776e512..1d6c104 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -261,6 +261,7 @@ public class CapacityScheduler extends
   CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
   + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
+  private long asyncMaxPendingBacklogs;
 
   public CapacityScheduler() {
 super(CapacityScheduler.class.getName());
@@ -379,6 +380,11 @@ public class CapacityScheduler extends
   asyncSchedulerThreads.add(new AsyncScheduleThread(this));
 }
 resourceCommitterService = new ResourceCommitterService(this);
+asyncMaxPendingBacklogs = this.conf.getInt(
+CapacitySchedulerConfiguration.
+SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS,
+CapacitySchedulerConfiguration.
+DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS);
   }
 
   // Setup how many containers we can allocate for each round
@@ -573,7 +579,8 @@ public class CapacityScheduler extends
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
-if (cs.getAsyncSchedulingPendingBacklogs() > 100) {
+if (cs.getAsyncSchedulingPendingBacklogs()
+> cs.asyncMaxPendingBacklogs) {
   Thread.sleep(1);
 } else{
   schedule(cs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d319e37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index c41bd96..76eaac0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -246,6 +246,14 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-threads";
 
   @Private
+  public static final String SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS =
+  SCHEDULE_ASYNCHRO

hadoop git commit: YARN-8228. Added hostname length check for docker container. Contributed by Shane Kumpf

2018-04-30 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 4c13e7e3a -> 0fc55a8f5


YARN-8228.  Added hostname length check for docker container.
Contributed by Shane Kumpf

(cherry picked from commit a966ec6e23b3ac8e233b2cf9b9ddaa6628a8c996)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fc55a8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fc55a8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fc55a8f

Branch: refs/heads/branch-3.1
Commit: 0fc55a8f57ddfbe1191992d9bb2bb4e0e917cd48
Parents: 4c13e7e
Author: Eric Yang 
Authored: Mon Apr 30 19:12:53 2018 -0400
Committer: Eric Yang 
Committed: Mon Apr 30 19:14:37 2018 -0400

--
 .../linux/runtime/DockerLinuxContainerRuntime.java |  6 ++
 .../linux/runtime/TestDockerContainerRuntime.java  | 13 +
 2 files changed, 19 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc55a8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 4244946..dc63d2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -197,6 +197,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
   "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+  private static final int HOST_NAME_LENGTH = 64;
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -534,6 +535,11 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 throw new ContainerExecutionException("Hostname '" + hostname
 + "' doesn't match docker hostname pattern");
   }
+  if (hostname.length() > HOST_NAME_LENGTH) {
+throw new ContainerExecutionException(
+"Hostname can not be greater than " + HOST_NAME_LENGTH
++ " characters: " + hostname);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc55a8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 70d0d50..0db2b56 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1529,6 +1529,19 @@ public class TestDockerContainerRuntime {
 }
   }
 
+  @Test
+  public void testValidDockerHostnameLength() throws Exception {
+String validLength = "example.test.site";
+DockerLinuxContainerRuntime.validateHostname(validLength);
+  }
+
+  @Test(expected = ContainerExecutionException.class)
+  public void testInvalidDockerHostnameLength() throws Exception {
+String invalidLength =
+"exampleexampleexampleexampleexampleexampleexampleexample.test.site";
+DockerLinuxContainerRuntime.validateHostname(invalidLength);
+  }
+
   @SuppressWarnings("unchecked")
   private void checkVolumeCreateCommand()
   throws Pr

hadoop git commit: YARN-8228. Added hostname length check for docker container. Contributed by Shane Kumpf

2018-04-30 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 919865a34 -> a966ec6e2


YARN-8228.  Added hostname length check for docker container.
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a966ec6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a966ec6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a966ec6e

Branch: refs/heads/trunk
Commit: a966ec6e23b3ac8e233b2cf9b9ddaa6628a8c996
Parents: 919865a
Author: Eric Yang 
Authored: Mon Apr 30 19:12:53 2018 -0400
Committer: Eric Yang 
Committed: Mon Apr 30 19:12:53 2018 -0400

--
 .../linux/runtime/DockerLinuxContainerRuntime.java |  6 ++
 .../linux/runtime/TestDockerContainerRuntime.java  | 13 +
 2 files changed, 19 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 999b343..9c05c59 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -199,6 +199,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
   "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+  private static final int HOST_NAME_LENGTH = 64;
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -541,6 +542,11 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 throw new ContainerExecutionException("Hostname '" + hostname
 + "' doesn't match docker hostname pattern");
   }
+  if (hostname.length() > HOST_NAME_LENGTH) {
+throw new ContainerExecutionException(
+"Hostname can not be greater than " + HOST_NAME_LENGTH
++ " characters: " + hostname);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a966ec6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index a333bac..6ad35b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1539,6 +1539,19 @@ public class TestDockerContainerRuntime {
 }
   }
 
+  @Test
+  public void testValidDockerHostnameLength() throws Exception {
+String validLength = "example.test.site";
+DockerLinuxContainerRuntime.validateHostname(validLength);
+  }
+
+  @Test(expected = ContainerExecutionException.class)
+  public void testInvalidDockerHostnameLength() throws Exception {
+String invalidLength =
+"exampleexampleexampleexampleexampleexampleexampleexample.test.site";
+DockerLinuxContainerRuntime.validateHostname(invalidLength);
+  }
+
   @SuppressWarnings("unchecked")
   private void checkVolumeCreateCommand()
   throws PrivilegedOperationException, IOException {



hadoop git commit: HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed. Contributed by Gabor Bota.

2018-04-30 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/trunk fc074a359 -> 919865a34


HADOOP-15239 S3ABlockOutputStream.flush() be no-op when stream closed.  
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/919865a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/919865a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/919865a3

Branch: refs/heads/trunk
Commit: 919865a34bd5c3c99603993a0410846a97975869
Parents: fc074a3
Author: Aaron Fabbri 
Authored: Mon Apr 30 16:02:57 2018 -0700
Committer: Aaron Fabbri 
Committed: Mon Apr 30 16:02:57 2018 -0700

--
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |  7 ++-
 .../hadoop/fs/s3a/TestS3ABlockOutputStream.java | 66 
 2 files changed, 72 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index 96de8e4..bdffed4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -238,7 +238,12 @@ class S3ABlockOutputStream extends OutputStream implements
*/
   @Override
   public synchronized void flush() throws IOException {
-checkOpen();
+try {
+  checkOpen();
+} catch (IOException e) {
+  LOG.warn("Stream closed: " + e.getMessage());
+  return;
+}
 S3ADataBlocks.DataBlock dataBlock = getActiveBlock();
 if (dataBlock != null) {
   dataBlock.flush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/919865a3/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
new file mode 100644
index 000..ff176f5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.hadoop.fs.s3a.commit.PutTracker;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+/**
+ * Unit tests for {@link S3ABlockOutputStream}.
+ */
+public class TestS3ABlockOutputStream extends AbstractS3AMockTest {
+
+  private S3ABlockOutputStream stream;
+
+  @Before
+  public void setUp() throws Exception {
+ExecutorService executorService = mock(ExecutorService.class);
+Progressable progressable = mock(Progressable.class);
+S3ADataBlocks.BlockFactory blockFactory =
+mock(S3ADataBlocks.BlockFactory.class);
+long blockSize = Constants.DEFAULT_MULTIPART_SIZE;
+S3AInstrumentation.OutputStreamStatistics statistics = null;
+WriteOperationHelper oHelper = mock(WriteOperationHelper.class);
+PutTracker putTracker = mock(PutTracker.class);
+stream = spy(new S3ABlockOutputStream(fs, "", executorService,
+  progressable, blockSize, blockFactory, statistics, oHelper,
+  putTracker));
+  }
+
+  @Test
+  public void testFlushNoOpWhenStreamClosed() throws Exception {
+doThrow(new IOException()).when(stream).checkOpen();
+
+try {
+  stream.flush();
+} catch (Exception e){
+  fail("Should not have any

hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-04-30 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 b8a596199 -> 20e4a7989


HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.

(cherry picked from commit cb3414a27944b5878bfd8134a086276e454b3db0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20e4a798
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20e4a798
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20e4a798

Branch: refs/heads/branch-2.9
Commit: 20e4a7989e3b4d49fd2c163fe162363c716e3e31
Parents: b8a5961
Author: Inigo Goiri 
Authored: Mon Apr 30 15:24:21 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 15:25:05 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  36 +--
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  59 -
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 6 files changed, 516 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e4a798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b774bab..04d0ff0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -539,8 +540,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e4a798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 71d93ae..4c8accf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -62,7 +62,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -99,7 +98,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   = new ConcurrentHashMap();
   private final File currentDir;// /c

hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-04-30 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c0c788aaf -> cb3414a27


HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3414a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3414a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3414a2

Branch: refs/heads/branch-2
Commit: cb3414a27944b5878bfd8134a086276e454b3db0
Parents: c0c788a
Author: Inigo Goiri 
Authored: Mon Apr 30 15:24:21 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 15:24:21 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  36 +--
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  59 -
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 6 files changed, 516 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ec50448..c128a8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -540,8 +541,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 71d93ae..4c8accf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -62,7 +62,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -99,7 +98,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   = new ConcurrentHashMap();
   private final File currentDir;// /current
   private final DF usage;
-  private final long reserved;
+  private fi

hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-04-30 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 af9bc982c -> 0e31e014f


HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e31e014
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e31e014
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e31e014

Branch: refs/heads/branch-3.0
Commit: 0e31e014f3382a586b9579febbbc4f4468d0fd6b
Parents: af9bc98
Author: Inigo Goiri 
Authored: Mon Apr 30 15:22:30 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 15:22:30 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  37 +--
 .../fsdataset/impl/FsVolumeImplBuilder.java |  17 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  90 +++-
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 7 files changed, 561 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e31e014/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 185fed9..bba83b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -599,8 +600,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e31e014/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 922d3b1..ec4be98 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -78,7 +78,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -121,7 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private final File currentDir;// /current
   private f

[2/2] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-04-30 Thread inigoiri
HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.

(cherry picked from commit fc074a359c44e84dd9612be2bd772763f943eb04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c13e7e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c13e7e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c13e7e3

Branch: refs/heads/branch-3.1
Commit: 4c13e7e3a08a995cf6e84d70242e580e5721f6db
Parents: 9d29670
Author: Inigo Goiri 
Authored: Mon Apr 30 13:28:33 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 13:31:03 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  38 ++--
 .../fsdataset/impl/FsVolumeImplBuilder.java |  16 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  |   2 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  90 +++-
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 8 files changed, 561 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c13e7e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index a7f0a07..bc8e81f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -647,8 +648,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c13e7e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index b8c95a4..9969976 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -78,7 +78,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -121,7 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private final F

[1/2] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

2018-04-30 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9d2967098 -> 4c13e7e3a
  refs/heads/trunk 9b0955545 -> fc074a359


HDFS-13283. Percentage based Reserved Space Calculation for DataNode. 
Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc074a35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc074a35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc074a35

Branch: refs/heads/trunk
Commit: fc074a359c44e84dd9612be2bd772763f943eb04
Parents: 9b09555
Author: Inigo Goiri 
Authored: Mon Apr 30 13:28:33 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 30 13:28:33 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  38 ++--
 .../fsdataset/impl/FsVolumeImplBuilder.java |  16 +-
 .../fsdataset/impl/ProvidedVolumeImpl.java  |   2 +-
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++
 .../src/main/resources/hdfs-default.xml |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java|  90 +++-
 .../impl/TestReservedSpaceCalculator.java   | 171 ++
 8 files changed, 561 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index a7f0a07..bc8e81f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -647,8 +648,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = 
"dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+  "dfs.datanode.du.reserved.calculator";
+  public static final Class
+  DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+  ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = 
"dfs.datanode.du.reserved";
   public static final longDFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+  "dfs.datanode.du.reserved.pct";
+  public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = 
"dfs.datanode.handler.count";
   public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc074a35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index b8c95a4..9969976 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -78,7 +78,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -121,7 +120,7 @@ public class FsVol

[2/2] hadoop git commit: YARN-8195. Fix constraint cardinality check in the presence of multiple target allocation tags. Contributed by Weiwei Yang.

2018-04-30 Thread kkaranasos
YARN-8195. Fix constraint cardinality check in the presence of multiple target 
allocation tags. Contributed by Weiwei Yang.

(cherry picked from commit 9b0955545174abe16fd81240db30f175145ee89b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d296709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d296709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d296709

Branch: refs/heads/branch-3.1
Commit: 9d2967098d07aa39d422b31b620481a296ac7376
Parents: ce62991
Author: Konstantinos Karanasos 
Authored: Mon Apr 30 11:54:30 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Mon Apr 30 11:55:26 2018 -0700

--
 .../constraint/PlacementConstraintsUtil.java|  8 +-
 .../TestPlacementConstraintsUtil.java   | 88 
 2 files changed, 92 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d296709/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index efa7b65..f47e1d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -91,20 +91,20 @@ public final class PlacementConstraintsUtil {
 if (sc.getScope().equals(PlacementConstraints.NODE)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d296709/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 3248450..dc61981 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.collect.ImmutableMap;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -228,6 +229,93 @@ public class TestPlacementConstraintsUtil {
   }
 
   @Test
+  public void testMultiTagsPlacementConstraints()
+  throws InvalidAllocationTagsQueryException {
+PlacementConstraintManagerService pcm =
+   

[1/2] hadoop git commit: YARN-8195. Fix constraint cardinality check in the presence of multiple target allocation tags. Contributed by Weiwei Yang.

2018-04-30 Thread kkaranasos
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ce6299164 -> 9d2967098
  refs/heads/trunk 3d43474f7 -> 9b0955545


YARN-8195. Fix constraint cardinality check in the presence of multiple target 
allocation tags. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b095554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b095554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b095554

Branch: refs/heads/trunk
Commit: 9b0955545174abe16fd81240db30f175145ee89b
Parents: 3d43474
Author: Konstantinos Karanasos 
Authored: Mon Apr 30 11:54:30 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Mon Apr 30 11:54:30 2018 -0700

--
 .../constraint/PlacementConstraintsUtil.java|  8 +-
 .../TestPlacementConstraintsUtil.java   | 88 
 2 files changed, 92 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index efa7b65..f47e1d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -91,20 +91,20 @@ public final class PlacementConstraintsUtil {
 if (sc.getScope().equals(PlacementConstraints.NODE)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
   if (checkMinCardinality) {
 minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::max);
+allocationTags, Long::min);
   }
   if (checkMaxCardinality) {
 maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
-allocationTags, Long::min);
+allocationTags, Long::max);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b095554/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 3248450..dc61981 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.collect.ImmutableMap;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -228,6 +229,93 @@ public class TestPlacementConstraintsUtil {
   }
 
   @Test
+  public void testMultiTagsPlacementConstraints()
+  throws InvalidAllocationTagsQueryException {

hadoop git commit: HDFS-12156. TestFSImage fails without -Pnative

2018-04-30 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 112041d3f -> 20c6b448a


HDFS-12156. TestFSImage fails without -Pnative

(cherry picked from commit 319defafc105c0d0b69b83828b578d9c453036f5)
(cherry picked from commit b0c80f1c814827abf95316dc9c23701f8ef09d9a)
(cherry picked from commit 9699bc2e11663469ed1425fb42d09dba3fbeb0d6)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20c6b448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20c6b448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20c6b448

Branch: refs/heads/branch-2.7
Commit: 20c6b448ac34841c24cdb2ca5e7d7414148ceedd
Parents: 112041d
Author: Akira Ajisaka 
Authored: Tue Mar 13 11:26:48 2018 +0900
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 30 10:28:09 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/TestFSImage.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20c6b448/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 29116ba..d3d039e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.EnumSet;
 
 import org.junit.Assert;
+import org.junit.Assume;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -46,6 +47,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Test;
 
 public class TestFSImage {
@@ -65,6 +67,13 @@ public class TestFSImage {
 setCompressCodec(conf, "org.apache.hadoop.io.compress.DefaultCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.GzipCodec");
 setCompressCodec(conf, "org.apache.hadoop.io.compress.BZip2Codec");
+  }
+
+  @Test
+  public void testNativeCompression() throws IOException {
+Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
 setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec");
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by Elek, Marton.

2018-04-30 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk eb7fe1d58 -> 3d43474f7


HDDS-11. Fix findbugs exclude rules for ozone and hdds projects. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d43474f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d43474f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d43474f

Branch: refs/heads/trunk
Commit: 3d43474f7567117e4e11a0d198be6aa1fc023106
Parents: eb7fe1d
Author: Anu Engineer 
Authored: Mon Apr 30 09:20:58 2018 -0700
Committer: Anu Engineer 
Committed: Mon Apr 30 09:20:58 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml | 21 
 hadoop-hdds/container-service/pom.xml   |  7 +++
 .../tools/dev-support/findbugsExcludeFile.xml   | 19 ++
 hadoop-ozone/tools/pom.xml  | 14 +
 4 files changed, 61 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..3571a89
--- /dev/null
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,21 @@
+
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 3dc8470..36c7235 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -98,6 +98,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   
 
   
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  
${basedir}/dev-support/findbugsExcludeFile.xml
+
+  
 
   
 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml 
b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..e6a345e
--- /dev/null
+++ b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,19 @@
+
+
+
+ 
+   
+ 
+ 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d43474f/hadoop-ozone/tools/pom.xml
--
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 918a675..839ca0d 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -68,4 +68,18 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   1.19
 
   
+  
+
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  ${basedir}/dev-support/findbugsExcludeFile.xml
+  
+  true
+  2048
+
+  
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org