hadoop git commit: YARN-8449. RM HA for AM web server HTTPS Support. (Contributed by Robert Kanter)

2018-10-18 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13cc0f50e -> 285d2c075


YARN-8449. RM HA for AM web server HTTPS Support. (Contributed by Robert Kanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/285d2c07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/285d2c07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/285d2c07

Branch: refs/heads/trunk
Commit: 285d2c07531a92067368ac4bdd21d309e6e81bc4
Parents: 13cc0f5
Author: Haibo Chen 
Authored: Thu Oct 18 21:23:48 2018 -0700
Committer: Haibo Chen 
Committed: Thu Oct 18 21:24:36 2018 -0700

--
 .../server/resourcemanager/ResourceManager.java |   3 +
 .../recovery/FileSystemRMStateStore.java|  53 +
 .../recovery/LeveldbRMStateStore.java   |  62 +++
 .../recovery/MemoryRMStateStore.java|  18 +++
 .../recovery/NullRMStateStore.java  |   9 ++
 .../resourcemanager/recovery/RMStateStore.java  | 100 -
 .../recovery/RMStateStoreEventType.java |   1 +
 .../recovery/RMStateStoreProxyCAEvent.java  |  49 +
 .../recovery/ZKRMStateStore.java|  60 +-
 .../security/ProxyCAManager.java|  23 +++-
 .../recovery/RMStateStoreTestBase.java  |  33 ++
 .../recovery/TestFSRMStateStore.java|   1 +
 .../recovery/TestLeveldbRMStateStore.java   |   6 +
 .../recovery/TestZKRMStateStore.java|   1 +
 .../security/TestProxyCAManager.java|  54 +
 .../hadoop/yarn/server/webproxy/ProxyCA.java|  33 ++
 .../yarn/server/webproxy/TestProxyCA.java   | 109 +++
 17 files changed, 610 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/285d2c07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 5f4ae6e..a89069a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1510,6 +1510,9 @@ public class ResourceManager extends CompositeService
 // recover applications
 rmAppManager.recover(state);
 
+// recover ProxyCA
+rmContext.getProxyCAManager().recover(state);
+
 setSchedulerRecoveryStartAndWaitTime(state, conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/285d2c07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index b797283..ed0486a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -24,6 +24,8 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.security.PrivateKey;
+import java.security.cert.X509Certificate;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -114,6 +116,7 @@ public class FileSystemRMStateStore extends RMStateStore {
 
   Path amrmTokenSecretManagerRoot;
   private Path reservationRoot;
+  private Path proxyCARoot;
 
   @Override
   public synchronized void initInternal(Configuration conf)
@@ -125,6 +128,7 @@ public class FileSystemRMStateStore extends RMStateStore {
 

hadoop git commit: YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed by Zhankun Tang.

2018-10-18 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6a7bf9f85 -> 37af9580c


YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed 
by Zhankun Tang.

(cherry picked from commit 13cc0f50ea1c5f8978455e34f49716ddb0e3a143)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37af9580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37af9580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37af9580

Branch: refs/heads/branch-3.0
Commit: 37af9580c8d7359fe15b23fa7c8c5fe62311d75c
Parents: 6a7bf9f
Author: Weiwei Yang 
Authored: Fri Oct 19 09:26:39 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Oct 19 10:03:39 2018 +0800

--
 .../scheduler/capacity/TestCapacityScheduler.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37af9580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index dfc9718..3a69150 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -507,7 +507,7 @@ public class TestCapacityScheduler {
 application1.schedule();
 checkApplicationResourceUsage(3 * GB, application1);
 checkNodeResourceUsage(4 * GB, nm0);
-LOG.info("--- START: testNotAssignMultiple ---");
+LOG.info("--- END: testNotAssignMultiple ---");
   }
 
   @Test
@@ -609,7 +609,7 @@ public class TestCapacityScheduler {
 application1.schedule();
 checkApplicationResourceUsage(7 * GB, application1);
 checkNodeResourceUsage(10 * GB, nm0);
-LOG.info("--- START: testAssignMultiple ---");
+LOG.info("--- END: testAssignMultiple ---");
   }
 
   private void nodeUpdate(ResourceManager rm, NodeManager nm) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed by Zhankun Tang.

2018-10-18 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 92c6bde6b -> beca90ece


YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed 
by Zhankun Tang.

(cherry picked from commit 13cc0f50ea1c5f8978455e34f49716ddb0e3a143)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beca90ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beca90ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beca90ec

Branch: refs/heads/branch-3.1
Commit: beca90ece82755dc9c06b239d08d548324cf04cb
Parents: 92c6bde
Author: Weiwei Yang 
Authored: Fri Oct 19 09:26:39 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Oct 19 10:02:46 2018 +0800

--
 .../scheduler/capacity/TestCapacityScheduler.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beca90ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 8d948b5..b1be1d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -488,7 +488,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(3 * GB, application1);
 checkNodeResourceUsage(4 * GB, nm0);
-LOG.info("--- START: testNotAssignMultiple ---");
+LOG.info("--- END: testNotAssignMultiple ---");
   }
 
   @Test
@@ -590,7 +590,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(7 * GB, application1);
 checkNodeResourceUsage(10 * GB, nm0);
-LOG.info("--- START: testAssignMultiple ---");
+LOG.info("--- END: testAssignMultiple ---");
   }
 
   private void nodeUpdate(ResourceManager rm, NodeManager nm) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed by Zhankun Tang.

2018-10-18 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 0b6a7b5f1 -> 042f2df19


YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed 
by Zhankun Tang.

(cherry picked from commit 13cc0f50ea1c5f8978455e34f49716ddb0e3a143)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/042f2df1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/042f2df1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/042f2df1

Branch: refs/heads/branch-3.2
Commit: 042f2df19b699bf38e63e6c1be9ad3f00c24e5b6
Parents: 0b6a7b5
Author: Weiwei Yang 
Authored: Fri Oct 19 09:26:39 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Oct 19 10:01:45 2018 +0800

--
 .../scheduler/capacity/TestCapacityScheduler.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/042f2df1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index f32b467..c090866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -487,7 +487,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(3 * GB, application1);
 checkNodeResourceUsage(4 * GB, nm0);
-LOG.info("--- START: testNotAssignMultiple ---");
+LOG.info("--- END: testNotAssignMultiple ---");
   }
 
   @Test
@@ -589,7 +589,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(7 * GB, application1);
 checkNodeResourceUsage(10 * GB, nm0);
-LOG.info("--- START: testAssignMultiple ---");
+LOG.info("--- END: testAssignMultiple ---");
   }
 
   private void nodeUpdate(ResourceManager rm, NodeManager nm) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed by Zhankun Tang.

2018-10-18 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk cd2158456 -> 13cc0f50e


YARN-8907. Fix incorrect logging message in TestCapacityScheduler. Contributed 
by Zhankun Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13cc0f50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13cc0f50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13cc0f50

Branch: refs/heads/trunk
Commit: 13cc0f50ea1c5f8978455e34f49716ddb0e3a143
Parents: cd21584
Author: Weiwei Yang 
Authored: Fri Oct 19 09:26:39 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Oct 19 09:55:59 2018 +0800

--
 .../scheduler/capacity/TestCapacityScheduler.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13cc0f50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index f32b467..c090866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -487,7 +487,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(3 * GB, application1);
 checkNodeResourceUsage(4 * GB, nm0);
-LOG.info("--- START: testNotAssignMultiple ---");
+LOG.info("--- END: testNotAssignMultiple ---");
   }
 
   @Test
@@ -589,7 +589,7 @@ public class TestCapacityScheduler extends 
CapacitySchedulerTestBase {
 application1.schedule();
 checkApplicationResourceUsage(7 * GB, application1);
 checkNodeResourceUsage(10 * GB, nm0);
-LOG.info("--- START: testAssignMultiple ---");
+LOG.info("--- END: testAssignMultiple ---");
   }
 
   private void nodeUpdate(ResourceManager rm, NodeManager nm) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2.0 38895fcef -> aca6b715f


HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aca6b715
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aca6b715
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aca6b715

Branch: refs/heads/branch-3.2.0
Commit: aca6b715fffe5e4dd1b846473435d408d67f0e84
Parents: 38895fc
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 14:25:17 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 17:37:59 2018 -0700

--
 .../src/main/webapps/router/federationhealth.js  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca6b715/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index 6779b61..6311a80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -156,7 +156,7 @@
   $('#tab-namenode').html(out);
   $('#ui-tabs a[href="#tab-namenode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_router_info() {
@@ -220,7 +220,7 @@
   $('#tab-router').html(out);
   $('#ui-tabs a[href="#tab-router"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   // TODO Copied directly from dfshealth.js; is there a way to import this 
function?
@@ -306,7 +306,7 @@
 ]});
   $('#ui-tabs a[href="#tab-datanode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_mount_table() {
@@ -337,7 +337,7 @@
   $('#tab-mounttable').html(out);
   $('#ui-tabs a[href="#tab-mounttable"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function toTitleCase(str) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2.0 a7d813049 -> 38895fcef


HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38895fce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38895fce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38895fce

Branch: refs/heads/branch-3.2.0
Commit: 38895fcef1ab0848ca737cd88eb0bdf6935af909
Parents: a7d8130
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 17:24:23 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 17:32:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38895fce/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 14a5f8f..d4a632c 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -79,7 +79,8 @@ import static 
org.apache.hadoop.hdfs.server.namenode.FSImageUtil.MAGIC_HEADER;
 public class ImageWriter implements Closeable {
 
   private static final int ONDISK_VERSION = 1;
-  private static final int LAYOUT_VERSION = -64; // see NameNodeLayoutVersion
+  private static final int LAYOUT_VERSION =
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
 
   private final Path outdir;
   private final FileSystem outfs;
@@ -128,7 +129,7 @@ public class ImageWriter implements Closeable {
 NamespaceInfo info = NNStorage.newNamespaceInfo();
 if (info.getLayoutVersion() != LAYOUT_VERSION) {
   throw new IllegalStateException("Incompatible layout " +
-  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION);
+  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION + ")");
 }
 // set the cluster id, if given
 if (opts.clusterID.length() > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15418. Hadoop KMSAuthenticationFilter needs to use getPropsByPrefix instead of iterator to avoid ConcurrentModificationException. Contributed by lqjack and Suma Shivaprasad

2018-10-18 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk cc09b2b0c -> cd2158456


HADOOP-15418. Hadoop KMSAuthenticationFilter needs to use getPropsByPrefix 
instead of iterator to avoid ConcurrentModificationException. Contributed by 
lqjack and Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd215845
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd215845
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd215845

Branch: refs/heads/trunk
Commit: cd2158456db8c89eeea64b72654a736ea8607e23
Parents: cc09b2b
Author: Wei-Chiu Chuang 
Authored: Thu Oct 18 17:25:57 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 18 17:28:28 2018 -0700

--
 .../key/kms/server/KMSAuthenticationFilter.java | 23 ++
 .../kms/server/TestKMSAuthenticationFilter.java | 48 
 2 files changed, 63 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd215845/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
index 3e98a25..da542ff 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
@@ -54,16 +55,22 @@ public class KMSAuthenticationFilter
   @Override
   protected Properties getConfiguration(String configPrefix,
   FilterConfig filterConfig) {
-Properties props = new Properties();
+
 Configuration conf = KMSWebApp.getConfiguration();
-for (Map.Entry entry : conf) {
-  String name = entry.getKey();
-  if (name.startsWith(CONFIG_PREFIX)) {
-String value = conf.get(name);
-name = name.substring(CONFIG_PREFIX.length());
-props.setProperty(name, value);
-  }
+return getKMSConfiguration(conf);
+  }
+
+  @VisibleForTesting
+  Properties getKMSConfiguration(Configuration conf) {
+Properties props = new Properties();
+
+Map propsWithPrefixMap = conf.getPropsWithPrefix(
+CONFIG_PREFIX);
+
+for (Map.Entry entry : propsWithPrefixMap.entrySet()) {
+  props.setProperty(entry.getKey(), entry.getValue());
 }
+
 String authType = props.getProperty(AUTH_TYPE);
 if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
   props.setProperty(AUTH_TYPE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd215845/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAuthenticationFilter.java
new file mode 100644
index 000..da3913b
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAuthenticationFilter.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.security.token.delegation.web
+

hadoop git commit: HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 c0f618f3d -> 0b6a7b5f1


HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b6a7b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b6a7b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b6a7b5f

Branch: refs/heads/branch-3.2
Commit: 0b6a7b5f1643f0d69ce7b67b984d7ccdd5e9c75e
Parents: c0f618f
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 17:24:23 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 17:26:22 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6a7b5f/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 14a5f8f..d4a632c 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -79,7 +79,8 @@ import static 
org.apache.hadoop.hdfs.server.namenode.FSImageUtil.MAGIC_HEADER;
 public class ImageWriter implements Closeable {
 
   private static final int ONDISK_VERSION = 1;
-  private static final int LAYOUT_VERSION = -64; // see NameNodeLayoutVersion
+  private static final int LAYOUT_VERSION =
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
 
   private final Path outdir;
   private final FileSystem outfs;
@@ -128,7 +129,7 @@ public class ImageWriter implements Closeable {
 NamespaceInfo info = NNStorage.newNamespaceInfo();
 if (info.getLayoutVersion() != LAYOUT_VERSION) {
   throw new IllegalStateException("Incompatible layout " +
-  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION);
+  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION + ")");
 }
 // set the cluster id, if given
 if (opts.clusterID.length() > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66f059ed1 -> cc09b2b0c


HDFS-14007. Incompatible layout when generating FSImage. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc09b2b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc09b2b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc09b2b0

Branch: refs/heads/trunk
Commit: cc09b2b0c7e9efa45c17d93d9ae1fcda8e050965
Parents: 66f059e
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 17:24:23 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 17:24:23 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc09b2b0/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
--
diff --git 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
index 14a5f8f..d4a632c 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java
@@ -79,7 +79,8 @@ import static 
org.apache.hadoop.hdfs.server.namenode.FSImageUtil.MAGIC_HEADER;
 public class ImageWriter implements Closeable {
 
   private static final int ONDISK_VERSION = 1;
-  private static final int LAYOUT_VERSION = -64; // see NameNodeLayoutVersion
+  private static final int LAYOUT_VERSION =
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
 
   private final Path outdir;
   private final FileSystem outfs;
@@ -128,7 +129,7 @@ public class ImageWriter implements Closeable {
 NamespaceInfo info = NNStorage.newNamespaceInfo();
 if (info.getLayoutVersion() != LAYOUT_VERSION) {
   throw new IllegalStateException("Incompatible layout " +
-  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION);
+  info.getLayoutVersion() + " (expected " + LAYOUT_VERSION + ")");
 }
 // set the cluster id, if given
 if (opts.clusterID.length() > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14002. TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. Contributed by Takanobu Asanuma.

2018-10-18 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2.0 30b65ea18 -> a7d813049


HDFS-14002. 
TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. 
Contributed by Takanobu Asanuma.

(cherry picked from commit be1cffb0854cb28967beb062e9db7d61e6eeff1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d81304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d81304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d81304

Branch: refs/heads/branch-3.2.0
Commit: a7d813049ca0aed1f9a53e0afcf5f5e23c05a372
Parents: 30b65ea
Author: Inigo Goiri 
Authored: Thu Oct 18 15:25:53 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 18 17:15:28 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d81304/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
index e944b81..2c9905d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
@@ -127,7 +127,8 @@ public class TestLayoutVersion {
 NameNodeLayoutVersion.Feature.TRUNCATE,
 NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
 NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE,
-NameNodeLayoutVersion.Feature.ERASURE_CODING);
+NameNodeLayoutVersion.Feature.ERASURE_CODING,
+NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE);
 for (LayoutFeature f : compatibleFeatures) {
   assertEquals(String.format("Expected minimum compatible layout version " 
+
   "%d for feature %s.", baseLV, f), baseLV,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14002. TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. Contributed by Takanobu Asanuma.

2018-10-18 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 91c916e20 -> c0f618f3d


HDFS-14002. 
TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. 
Contributed by Takanobu Asanuma.

(cherry picked from commit be1cffb0854cb28967beb062e9db7d61e6eeff1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0f618f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0f618f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0f618f3

Branch: refs/heads/branch-3.2
Commit: c0f618f3d58eaa296c81115d34838d07c3bbbed5
Parents: 91c916e
Author: Inigo Goiri 
Authored: Thu Oct 18 15:25:53 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 18 17:14:56 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0f618f3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
index e944b81..2c9905d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
@@ -127,7 +127,8 @@ public class TestLayoutVersion {
 NameNodeLayoutVersion.Feature.TRUNCATE,
 NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
 NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE,
-NameNodeLayoutVersion.Feature.ERASURE_CODING);
+NameNodeLayoutVersion.Feature.ERASURE_CODING,
+NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE);
 for (LayoutFeature f : compatibleFeatures) {
   assertEquals(String.format("Expected minimum compatible layout version " 
+
   "%d for feature %s.", baseLV, f), baseLV,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14009. HttpFS: FileStatus#setSnapShotEnabledFlag throws InvocationTargetException when attribute set is emptySet. Contributed by Siyao Meng.

2018-10-18 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b34c650a4 -> 6a7bf9f85


HDFS-14009. HttpFS: FileStatus#setSnapShotEnabledFlag throws 
InvocationTargetException when attribute set is emptySet. Contributed by Siyao 
Meng.

Signed-off-by: Wei-Chiu Chuang 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a7bf9f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a7bf9f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a7bf9f8

Branch: refs/heads/branch-3.0
Commit: 6a7bf9f850b560eb01b2db78d04446c74b443b8f
Parents: b34c650
Author: Siyao Meng 
Authored: Thu Oct 18 17:02:04 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 18 17:02:38 2018 -0700

--
 .../src/main/java/org/apache/hadoop/fs/FileStatus.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a7bf9f8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index bdfbd20..4d953c6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -360,7 +360,11 @@ public class FileStatus implements Writable, 
Comparable,
*/
   public void setSnapShotEnabledFlag(boolean isSnapShotEnabled) {
 if (isSnapShotEnabled) {
-  attr.add(AttrFlags.SNAPSHOT_ENABLED);
+  if (attr == NONE) {
+attr = EnumSet.of(AttrFlags.SNAPSHOT_ENABLED);
+  } else {
+attr.add(AttrFlags.SNAPSHOT_ENABLED);
+  }
 } else {
   attr.remove(AttrFlags.SNAPSHOT_ENABLED);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8618. Added detection for non-upgradable service. Contributed by Chandni Singh

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 1810f2392 -> 91c916e20


YARN-8618. Added detection for non-upgradable service.
   Contributed by Chandni Singh

(cherry picked from commit 66f059ed1db93c0a86e86c64636013001169a677)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91c916e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91c916e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91c916e2

Branch: refs/heads/branch-3.2
Commit: 91c916e20915c264cdc26dbdc82db9104a15b60a
Parents: 1810f23
Author: Eric Yang 
Authored: Thu Oct 18 19:59:11 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 20:00:50 2018 -0400

--
 .../yarn/service/client/ServiceClient.java  | 17 ++-
 .../yarn/service/client/TestServiceClient.java  | 23 
 2 files changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91c916e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 23db57e..c71ed03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -238,7 +238,22 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   LOG.error(message);
   throw new YarnException(message);
 }
-
+boolean foundNotNeverComp = false;
+for (Component comp : persistedService.getComponents()) {
+  // If restart policy of any component is not NEVER then upgrade is
+  // allowed.
+  if (!comp.getRestartPolicy().equals(Component.RestartPolicyEnum.NEVER)) {
+foundNotNeverComp = true;
+break;
+  }
+}
+if (!foundNotNeverComp) {
+  String message = "All the components of the service " + service.getName()
+  + " have " + Component.RestartPolicyEnum.NEVER + " restart policy, " 
+
+  "so it cannot be upgraded.";
+  LOG.error(message);
+  throw new YarnException(message);
+}
 Service liveService = getStatus(service.getName());
 if (!liveService.getState().equals(ServiceState.STABLE)) {
   String message = service.getName() + " is at " + liveService.getState()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91c916e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
index 700655c..beb3339 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
@@ -149,6 +149,29 @@ public class TestServiceClient {
 client.stop();
   }
 
+  @Test
+  public void testUpgradeDisabledWhenAllCompsHaveNeverRestartPolicy()
+  throws Exception {
+Service service = createService();
+service.getComponents().forEach(comp ->
+comp.setRestartPolicy(Component.RestartPolicyEnum.NEVER));
+
+ServiceClient client = MockServiceClient.create(rule, service, true);
+
+//upgrade the service
+service.setVersion("v2");
+try {
+  client.initiateUpgrade(service);
+} catch (YarnException ex) {
+  Assert.assertEquals("All the components of the service " +
+  service.getName() + " have " + Component.RestartPolicyEnum.NEVER
+  + " restart 

hadoop git commit: YARN-8618. Added detection for non-upgradable service. Contributed by Chandni Singh

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk be1cffb08 -> 66f059ed1


YARN-8618. Added detection for non-upgradable service.
   Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66f059ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66f059ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66f059ed

Branch: refs/heads/trunk
Commit: 66f059ed1db93c0a86e86c64636013001169a677
Parents: be1cffb
Author: Eric Yang 
Authored: Thu Oct 18 19:59:11 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 19:59:11 2018 -0400

--
 .../yarn/service/client/ServiceClient.java  | 17 ++-
 .../yarn/service/client/TestServiceClient.java  | 23 
 2 files changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66f059ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 23db57e..c71ed03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -238,7 +238,22 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   LOG.error(message);
   throw new YarnException(message);
 }
-
+boolean foundNotNeverComp = false;
+for (Component comp : persistedService.getComponents()) {
+  // If restart policy of any component is not NEVER then upgrade is
+  // allowed.
+  if (!comp.getRestartPolicy().equals(Component.RestartPolicyEnum.NEVER)) {
+foundNotNeverComp = true;
+break;
+  }
+}
+if (!foundNotNeverComp) {
+  String message = "All the components of the service " + service.getName()
+  + " have " + Component.RestartPolicyEnum.NEVER + " restart policy, " 
+
+  "so it cannot be upgraded.";
+  LOG.error(message);
+  throw new YarnException(message);
+}
 Service liveService = getStatus(service.getName());
 if (!liveService.getState().equals(ServiceState.STABLE)) {
   String message = service.getName() + " is at " + liveService.getState()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66f059ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
index 700655c..beb3339 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java
@@ -149,6 +149,29 @@ public class TestServiceClient {
 client.stop();
   }
 
+  @Test
+  public void testUpgradeDisabledWhenAllCompsHaveNeverRestartPolicy()
+  throws Exception {
+Service service = createService();
+service.getComponents().forEach(comp ->
+comp.setRestartPolicy(Component.RestartPolicyEnum.NEVER));
+
+ServiceClient client = MockServiceClient.create(rule, service, true);
+
+//upgrade the service
+service.setVersion("v2");
+try {
+  client.initiateUpgrade(service);
+} catch (YarnException ex) {
+  Assert.assertEquals("All the components of the service " +
+  service.getName() + " have " + Component.RestartPolicyEnum.NEVER
+  + " restart policy, so it cannot be upgraded.",
+  ex.getMessage());
+  return;

hadoop git commit: HDFS-14002. TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. Contributed by Takanobu Asanuma.

2018-10-18 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 098d0b86f -> be1cffb08


HDFS-14002. 
TestLayoutVersion#testNameNodeFeatureMinimumCompatibleLayoutVersions fails. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be1cffb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be1cffb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be1cffb0

Branch: refs/heads/trunk
Commit: be1cffb0854cb28967beb062e9db7d61e6eeff1d
Parents: 098d0b8
Author: Inigo Goiri 
Authored: Thu Oct 18 15:25:53 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 18 15:25:53 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be1cffb0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
index e944b81..2c9905d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
@@ -127,7 +127,8 @@ public class TestLayoutVersion {
 NameNodeLayoutVersion.Feature.TRUNCATE,
 NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
 NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE,
-NameNodeLayoutVersion.Feature.ERASURE_CODING);
+NameNodeLayoutVersion.Feature.ERASURE_CODING,
+NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE);
 for (LayoutFeature f : compatibleFeatures) {
   assertEquals(String.format("Expected minimum compatible layout version " 
+
   "%d for feature %s.", baseLV, f), baseLV,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 7252f8e11 -> 1810f2392


HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1810f239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1810f239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1810f239

Branch: refs/heads/branch-3.2
Commit: 1810f2392f02392a9b333511d454ac11e1f30fc7
Parents: 7252f8e
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 14:25:17 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 14:35:20 2018 -0700

--
 .../src/main/webapps/router/federationhealth.js  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1810f239/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index 6779b61..6311a80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -156,7 +156,7 @@
   $('#tab-namenode').html(out);
   $('#ui-tabs a[href="#tab-namenode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_router_info() {
@@ -220,7 +220,7 @@
   $('#tab-router').html(out);
   $('#ui-tabs a[href="#tab-router"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   // TODO Copied directly from dfshealth.js; is there a way to import this 
function?
@@ -306,7 +306,7 @@
 ]});
   $('#ui-tabs a[href="#tab-datanode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_mount_table() {
@@ -337,7 +337,7 @@
   $('#tab-mounttable').html(out);
   $('#ui-tabs a[href="#tab-mounttable"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function toTitleCase(str) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d381f3221 -> 92c6bde6b


HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92c6bde6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92c6bde6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92c6bde6

Branch: refs/heads/branch-3.1
Commit: 92c6bde6be10dbb5fd41542111bcab26b879d7e8
Parents: d381f32
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 14:25:17 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 14:28:19 2018 -0700

--
 .../src/main/webapps/router/federationhealth.js  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92c6bde6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index 6779b61..6311a80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -156,7 +156,7 @@
   $('#tab-namenode').html(out);
   $('#ui-tabs a[href="#tab-namenode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_router_info() {
@@ -220,7 +220,7 @@
   $('#tab-router').html(out);
   $('#ui-tabs a[href="#tab-router"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   // TODO Copied directly from dfshealth.js; is there a way to import this 
function?
@@ -306,7 +306,7 @@
 ]});
   $('#ui-tabs a[href="#tab-datanode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_mount_table() {
@@ -337,7 +337,7 @@
   $('#tab-mounttable').html(out);
   $('#ui-tabs a[href="#tab-mounttable"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function toTitleCase(str) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.

2018-10-18 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk beb850d8f -> 098d0b86f


HDFS-14005. RBF: Web UI update to bootstrap-3.3.7. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/098d0b86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/098d0b86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/098d0b86

Branch: refs/heads/trunk
Commit: 098d0b86fea07c00d87851bdbb942e0314f436af
Parents: beb850d
Author: Giovanni Matteo Fumarola 
Authored: Thu Oct 18 14:25:17 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Thu Oct 18 14:25:17 2018 -0700

--
 .../src/main/webapps/router/federationhealth.js  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/098d0b86/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index 6779b61..6311a80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -156,7 +156,7 @@
   $('#tab-namenode').html(out);
   $('#ui-tabs a[href="#tab-namenode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_router_info() {
@@ -220,7 +220,7 @@
   $('#tab-router').html(out);
   $('#ui-tabs a[href="#tab-router"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   // TODO Copied directly from dfshealth.js; is there a way to import this 
function?
@@ -306,7 +306,7 @@
 ]});
   $('#ui-tabs a[href="#tab-datanode"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function load_mount_table() {
@@ -337,7 +337,7 @@
   $('#tab-mounttable').html(out);
   $('#ui-tabs a[href="#tab-mounttable"]').tab('show');
 });
-  })).error(ajax_error_handler);
+  })).fail(ajax_error_handler);
   }
 
   function toTitleCase(str) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8899. Fixed minicluster dependency on yarn-server-web-proxy. Contributed by Robert Kanter

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk f8d61b9fc -> beb850d8f


YARN-8899.  Fixed minicluster dependency on yarn-server-web-proxy.
Contributed by Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beb850d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beb850d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beb850d8

Branch: refs/heads/trunk
Commit: beb850d8f7f1fefa7a6d9502df2b4a4eea372523
Parents: f8d61b9
Author: Eric Yang 
Authored: Thu Oct 18 15:46:15 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 15:46:15 2018 -0400

--
 hadoop-minicluster/pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb850d8/hadoop-minicluster/pom.xml
--
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
index 34f5b0e..a138f87 100644
--- a/hadoop-minicluster/pom.xml
+++ b/hadoop-minicluster/pom.xml
@@ -104,6 +104,12 @@
   hadoop-mapreduce-client-hs
   compile
 
+
+
+  org.apache.hadoop
+  hadoop-yarn-server-web-proxy
+  compile
+
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

2018-10-18 Thread wangda
YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

Change-Id: I23bf7ca0acbe1168a7f805ceccdd62fe41bfeb35
(cherry picked from commit bfb88b10f46a265aa38ab3e1d87b6a0a99d94be8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d381f322
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d381f322
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d381f322

Branch: refs/heads/branch-3.1
Commit: d381f32219dbe27b629ba3bea3424f9648287c97
Parents: 46baafe
Author: Wangda Tan 
Authored: Thu Oct 18 10:55:39 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 12:29:28 2018 -0700

--
 .../src/site/markdown/CapacityScheduler.md| 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d381f322/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 9857010..75d7954 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -54,7 +54,7 @@ The `CapacityScheduler` supports the following features:
 
 * **Operability**
 
-* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
+* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime unless the queue is STOPPED and nhas no 
pending/running apps.
 
 * Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queues*. Existing applications 
continue to completion, thus the queue can be *drained* gracefully. 
Administrators can also *start* the stopped queues.
 
@@ -430,6 +430,18 @@ Changing queue/scheduler properties and adding/removing 
queues can be done in tw
 $ vi $HADOOP_CONF_DIR/capacity-scheduler.xml
 $ $HADOOP_YARN_HOME/bin/yarn rmadmin -refreshQueues
 
+ Deleting queue via file
+
+  Step 1: Stop the queue
+
+  Before deleting a leaf queue, the leaf queue should not have any 
running/pending apps and has to BE STOPPED by changing 
`yarn.scheduler.capacity..state`. See the
+  [Queue Administration & Permissions](CapacityScheduler.html#Queue 
Properties) section. 
+  Before deleting a parent queue, all its child queues should not have any 
running/pending apps and have to BE STOPPED. The parent queue also needs to be 
STOPPED
+
+  Step 2: Delete the queue
+
+  Remove the queue configurations from the file and run refresh as described 
above
+
 ### Changing queue configuration via API
 
   Editing by API uses a backing store for the scheduler configuration. To 
enable this, the following parameters can be configured in yarn-site.xml.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8896. Limit the maximum number of container assignments per heartbeat. (Zhankun Tang via wangda)

2018-10-18 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3cb52958b -> d381f3221


YARN-8896. Limit the maximum number of container assignments per heartbeat. 
(Zhankun Tang via wangda)

Change-Id: I6e72f8362bd7f5c2a844cb9e3c4732492314e9f1
(cherry picked from commit 780be14f07df2a3ed6273b96ae857c278fd72718)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46baafed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46baafed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46baafed

Branch: refs/heads/branch-3.1
Commit: 46baafedf13446f5c1b973d667f437bf7158d3c2
Parents: 3cb5295
Author: Wangda Tan 
Authored: Thu Oct 18 10:58:21 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 12:29:19 2018 -0700

--
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46baafed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 5c9904b..547d131 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -333,8 +333,11 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   public static final String MAX_ASSIGN_PER_HEARTBEAT = PREFIX
   + "per-node-heartbeat.maximum-container-assignments";
 
+  /**
+   * Avoid potential risk that greedy assign multiple may involve
+   * */
   @Private
-  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = -1;
+  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = 100;
 
   /** Configuring absolute min/max resources in a queue. **/
   @Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: YARN-8896. Limit the maximum number of container assignments per heartbeat. (Zhankun Tang via wangda)

2018-10-18 Thread wangda
YARN-8896. Limit the maximum number of container assignments per heartbeat. 
(Zhankun Tang via wangda)

Change-Id: I6e72f8362bd7f5c2a844cb9e3c4732492314e9f1
(cherry picked from commit 780be14f07df2a3ed6273b96ae857c278fd72718)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7252f8e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7252f8e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7252f8e1

Branch: refs/heads/branch-3.2
Commit: 7252f8e117bacdd3b746021d4b5d6b9c944813bd
Parents: 5f7ed04
Author: Wangda Tan 
Authored: Thu Oct 18 10:58:21 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 12:12:19 2018 -0700

--
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7252f8e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index b937ae7..08380f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -336,8 +336,11 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   public static final String MAX_ASSIGN_PER_HEARTBEAT = PREFIX
   + "per-node-heartbeat.maximum-container-assignments";
 
+  /**
+   * Avoid potential risk that greedy assign multiple may involve
+   * */
   @Private
-  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = -1;
+  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = 100;
 
   /** Configuring absolute min/max resources in a queue. **/
   @Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: YARN-8456. Fix a configuration handling bug when user leave FPGA discover executable path configuration default but set OpenCL SDK path environment variable. (Zhankun Tang via

2018-10-18 Thread wangda
YARN-8456. Fix a configuration handling bug when user leave FPGA discover 
executable path configuration default but set OpenCL SDK path environment 
variable. (Zhankun Tang via wangda)

Change-Id: Iff150ea98ba0c60d448474fd940eb121afce6965
(cherry picked from commit a457a8951a1b35f06811c40443ca44bb9c698c30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f7ed043
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f7ed043
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f7ed043

Branch: refs/heads/branch-3.2
Commit: 5f7ed043c8e8f9d964a719d076a9d3d207d4cea7
Parents: a7785d2
Author: Wangda Tan 
Authored: Thu Oct 18 10:57:11 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 12:12:14 2018 -0700

--
 .../fpga/IntelFpgaOpenclPlugin.java |  4 +-
 .../resourceplugin/fpga/TestFpgaDiscoverer.java | 64 ++--
 2 files changed, 61 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f7ed043/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
index 2d6cf6f..f8fb6d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
@@ -124,7 +124,9 @@ public class IntelFpgaOpenclPlugin implements 
AbstractFpgaVendorPlugin {
   } else {
 binaryPath = new File(pluginDefaultPreferredPath + "/bin", 
pluginDefaultBinaryName);
 if (binaryPath.exists()) {
-  pathToExecutable = pluginDefaultPreferredPath;
+  pathToExecutable = binaryPath.getAbsolutePath();
+  LOG.info("Succeed in finding FPGA discoverer executable: " +
+  pathToExecutable);
 } else {
   pathToExecutable = pluginDefaultBinaryName;
   LOG.warn("Failed to find FPGA discoverer executable in " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f7ed043/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java
index 87fb4e9..d5bcdb3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestFpgaDiscoverer.java
@@ -32,9 +32,8 @@ import org.junit.Test;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
+import java.lang.reflect.Field;
+import java.util.*;
 
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyString;
@@ -60,8 +59,41 @@ public class TestFpgaDiscoverer {
 f.mkdirs();
   }
 
+  // A dirty hack to modify the env of the current JVM itself - Dirty, but
+  // should be okay for testing.
+  @SuppressWarnings({ "rawtypes", "unchecked" })
+  private static void setNewEnvironmentHack(Map newenv)
+  throws Exception {
+try {
+  Class cl = Class.forName("java.lang.ProcessEnvironment");
+  Field field = cl.getDeclaredField("theEnvironment");
+  field.setAccessible(true);
+  Map env = (Map) 

[1/3] hadoop git commit: YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

2018-10-18 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 4681e17a3 -> 7252f8e11


YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

Change-Id: I23bf7ca0acbe1168a7f805ceccdd62fe41bfeb35
(cherry picked from commit bfb88b10f46a265aa38ab3e1d87b6a0a99d94be8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7785d27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7785d27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7785d27

Branch: refs/heads/branch-3.2
Commit: a7785d2764767ffed7acb0cef7d2894c9a43618a
Parents: 4681e17
Author: Wangda Tan 
Authored: Thu Oct 18 10:55:39 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 12:12:09 2018 -0700

--
 .../src/site/markdown/CapacityScheduler.md| 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7785d27/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 5ac1d0a..c2d4ce4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -54,7 +54,7 @@ The `CapacityScheduler` supports the following features:
 
 * **Operability**
 
-* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
+* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime unless the queue is STOPPED and nhas no 
pending/running apps.
 
 * Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queues*. Existing applications 
continue to completion, thus the queue can be *drained* gracefully. 
Administrators can also *start* the stopped queues.
 
@@ -441,6 +441,18 @@ Changing queue/scheduler properties and adding/removing 
queues can be done in tw
 $ vi $HADOOP_CONF_DIR/capacity-scheduler.xml
 $ $HADOOP_YARN_HOME/bin/yarn rmadmin -refreshQueues
 
+ Deleting queue via file
+
+  Step 1: Stop the queue
+
+  Before deleting a leaf queue, the leaf queue should not have any 
running/pending apps and has to BE STOPPED by changing 
`yarn.scheduler.capacity..state`. See the
+  [Queue Administration & Permissions](CapacityScheduler.html#Queue 
Properties) section. 
+  Before deleting a parent queue, all its child queues should not have any 
running/pending apps and have to BE STOPPED. The parent queue also needs to be 
STOPPED
+
+  Step 2: Delete the queue
+
+  Remove the queue configurations from the file and run refresh as described 
above
+
 ### Changing queue configuration via API
 
   Editing by API uses a backing store for the scheduler configuration. To 
enable this, the following parameters can be configured in yarn-site.xml.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-690. Javadoc build fails in hadoop-ozone. Contributed by Takanobu Asanuma.

2018-10-18 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 ca1136fb4 -> fac987b9a


HDDS-690. Javadoc build fails in hadoop-ozone.
Contributed by Takanobu Asanuma.

(cherry picked from commit f8d61b9fc2259e7bfd503924a0c2e41a0955aaed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fac987b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fac987b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fac987b9

Branch: refs/heads/ozone-0.3
Commit: fac987b9a1a04acdd7ce9394fb892a9c6ca1f50a
Parents: ca1136f
Author: Anu Engineer 
Authored: Thu Oct 18 12:14:54 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 18 12:16:30 2018 -0700

--
 .../java/org/apache/hadoop/ozone/client/BucketArgs.java |  2 +-
 .../apache/hadoop/ozone/om/helpers/OmBucketArgs.java|  4 ++--
 .../apache/hadoop/ozone/om/helpers/OmBucketInfo.java|  2 +-
 .../org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java   |  1 -
 .../apache/hadoop/ozone/web/handlers/VolumeHandler.java |  6 --
 .../apache/hadoop/ozone/web/interfaces/Accounting.java  |  2 +-
 .../org/apache/hadoop/ozone/web/interfaces/Volume.java  | 12 +++-
 .../java/org/apache/hadoop/ozone/om/KeyManager.java |  2 +-
 .../org/apache/hadoop/ozone/om/OzoneManagerLock.java|  7 ---
 .../apache/hadoop/ozone/om/ServiceListJSONServlet.java  |  4 ++--
 .../genconf/GenerateOzoneRequiredConfigurations.java|  2 +-
 11 files changed, 24 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac987b9/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
index d86062b..e68e9f1 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
@@ -74,7 +74,7 @@ public final class BucketArgs {
 
   /**
* Returns the ACL's associated with this bucket.
-   * @return List
+   * @return {@literal List}
*/
   public List getAcls() {
 return acls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac987b9/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 3819131..a3ae7e5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -100,7 +100,7 @@ public final class OmBucketArgs implements Auditable {
 
   /**
* Returns the ACL's that are to be added.
-   * @return List
+   * @return {@literal List}
*/
   public List getAddAcls() {
 return addAcls;
@@ -108,7 +108,7 @@ public final class OmBucketArgs implements Auditable {
 
   /**
* Returns the ACL's that are to be removed.
-   * @return List
+   * @return {@literal List}
*/
   public List getRemoveAcls() {
 return removeAcls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac987b9/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 4418738..0bff1f7 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -101,7 +101,7 @@ public final class OmBucketInfo implements Auditable {
 
   /**
* Returns the ACL's associated with this bucket.
-   * @return List
+   * @return {@literal List}
*/
   public List getAcls() {
 return acls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac987b9/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 50f4b17..c9a7e48 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 

hadoop git commit: HDDS-690. Javadoc build fails in hadoop-ozone. Contributed by Takanobu Asanuma.

2018-10-18 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk c64018026 -> f8d61b9fc


HDDS-690. Javadoc build fails in hadoop-ozone.
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8d61b9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8d61b9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8d61b9f

Branch: refs/heads/trunk
Commit: f8d61b9fc2259e7bfd503924a0c2e41a0955aaed
Parents: c640180
Author: Anu Engineer 
Authored: Thu Oct 18 12:14:54 2018 -0700
Committer: Anu Engineer 
Committed: Thu Oct 18 12:14:54 2018 -0700

--
 .../java/org/apache/hadoop/ozone/client/BucketArgs.java |  2 +-
 .../apache/hadoop/ozone/om/helpers/OmBucketArgs.java|  4 ++--
 .../apache/hadoop/ozone/om/helpers/OmBucketInfo.java|  2 +-
 .../org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java   |  1 -
 .../apache/hadoop/ozone/web/handlers/VolumeHandler.java |  6 --
 .../apache/hadoop/ozone/web/interfaces/Accounting.java  |  2 +-
 .../org/apache/hadoop/ozone/web/interfaces/Volume.java  | 12 +++-
 .../java/org/apache/hadoop/ozone/om/KeyManager.java |  2 +-
 .../org/apache/hadoop/ozone/om/OzoneManagerLock.java|  7 ---
 .../apache/hadoop/ozone/om/ServiceListJSONServlet.java  |  4 ++--
 .../genconf/GenerateOzoneRequiredConfigurations.java|  2 +-
 11 files changed, 24 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d61b9f/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
index d86062b..e68e9f1 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
@@ -74,7 +74,7 @@ public final class BucketArgs {
 
   /**
* Returns the ACL's associated with this bucket.
-   * @return List
+   * @return {@literal List}
*/
   public List getAcls() {
 return acls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d61b9f/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 3819131..a3ae7e5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -100,7 +100,7 @@ public final class OmBucketArgs implements Auditable {
 
   /**
* Returns the ACL's that are to be added.
-   * @return List
+   * @return {@literal List}
*/
   public List getAddAcls() {
 return addAcls;
@@ -108,7 +108,7 @@ public final class OmBucketArgs implements Auditable {
 
   /**
* Returns the ACL's that are to be removed.
-   * @return List
+   * @return {@literal List}
*/
   public List getRemoveAcls() {
 return removeAcls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d61b9f/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 4418738..0bff1f7 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -101,7 +101,7 @@ public final class OmBucketInfo implements Auditable {
 
   /**
* Returns the ACL's associated with this bucket.
-   * @return List
+   * @return {@literal List}
*/
   public List getAcls() {
 return acls;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d61b9f/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 50f4b17..c9a7e48 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -119,7 

[2/2] hadoop git commit: HDDS-673. Suppress "Key not found" exception log with stack trace in OM. Contributed by Arpit Agarwal.

2018-10-18 Thread arp
HDDS-673. Suppress "Key not found" exception log with stack trace in OM. 
Contributed by Arpit Agarwal.

(cherry picked from commit c64018026e66b437533d1fdea4b0d92a209e9054)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca1136fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca1136fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca1136fb

Branch: refs/heads/ozone-0.3
Commit: ca1136fb4be586e69723f3523f3a4119e6f2549a
Parents: eeb1d43
Author: Arpit Agarwal 
Authored: Thu Oct 18 11:51:16 2018 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 18 11:51:24 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca1136fb/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index e035eb2..c7fc56d 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -370,7 +370,7 @@ public class KeyManagerImpl implements KeyManager {
   }
   return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
 } catch (IOException ex) {
-  LOG.error("Get key failed for volume:{} bucket:{} key:{}",
+  LOG.debug("Get key failed for volume:{} bucket:{} key:{}",
   volumeName, bucketName, keyName, ex);
   throw new OMException(ex.getMessage(),
   OMException.ResultCodes.FAILED_KEY_NOT_FOUND);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: YARN-8896. Limit the maximum number of container assignments per heartbeat. (Zhankun Tang via wangda)

2018-10-18 Thread wangda
YARN-8896. Limit the maximum number of container assignments per heartbeat. 
(Zhankun Tang via wangda)

Change-Id: I6e72f8362bd7f5c2a844cb9e3c4732492314e9f1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/780be14f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/780be14f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/780be14f

Branch: refs/heads/trunk
Commit: 780be14f07df2a3ed6273b96ae857c278fd72718
Parents: a457a89
Author: Wangda Tan 
Authored: Thu Oct 18 10:58:21 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 10:58:21 2018 -0700

--
 .../scheduler/capacity/CapacitySchedulerConfiguration.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/780be14f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index b937ae7..08380f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -336,8 +336,11 @@ public class CapacitySchedulerConfiguration extends 
ReservationSchedulerConfigur
   public static final String MAX_ASSIGN_PER_HEARTBEAT = PREFIX
   + "per-node-heartbeat.maximum-container-assignments";
 
+  /**
+   * Avoid potential risk that greedy assign multiple may involve
+   * */
   @Private
-  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = -1;
+  public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = 100;
 
   /** Configuring absolute min/max resources in a queue. **/
   @Private


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

2018-10-18 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0e56c883c -> 780be14f0


YARN-6098. Add documentation for Delete Queue. (Suma Shivaprasad via wangda)

Change-Id: I23bf7ca0acbe1168a7f805ceccdd62fe41bfeb35


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfb88b10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfb88b10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfb88b10

Branch: refs/heads/trunk
Commit: bfb88b10f46a265aa38ab3e1d87b6a0a99d94be8
Parents: 0e56c88
Author: Wangda Tan 
Authored: Thu Oct 18 10:55:39 2018 -0700
Committer: Wangda Tan 
Committed: Thu Oct 18 10:55:39 2018 -0700

--
 .../src/site/markdown/CapacityScheduler.md| 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb88b10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 5ac1d0a..c2d4ce4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -54,7 +54,7 @@ The `CapacityScheduler` supports the following features:
 
 * **Operability**
 
-* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
+* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime unless the queue is STOPPED and nhas no 
pending/running apps.
 
 * Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queues*. Existing applications 
continue to completion, thus the queue can be *drained* gracefully. 
Administrators can also *start* the stopped queues.
 
@@ -441,6 +441,18 @@ Changing queue/scheduler properties and adding/removing 
queues can be done in tw
 $ vi $HADOOP_CONF_DIR/capacity-scheduler.xml
 $ $HADOOP_YARN_HOME/bin/yarn rmadmin -refreshQueues
 
+ Deleting queue via file
+
+  Step 1: Stop the queue
+
+  Before deleting a leaf queue, the leaf queue should not have any 
running/pending apps and has to BE STOPPED by changing 
`yarn.scheduler.capacity..state`. See the
+  [Queue Administration & Permissions](CapacityScheduler.html#Queue 
Properties) section. 
+  Before deleting a parent queue, all its child queues should not have any 
running/pending apps and have to BE STOPPED. The parent queue also needs to be 
STOPPED
+
+  Step 2: Delete the queue
+
+  Remove the queue configurations from the file and run refresh as described 
above
+
 ### Changing queue configuration via API
 
   Editing by API uses a backing store for the scheduler configuration. To 
enable this, the following parameters can be configured in yarn-site.xml.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8862. [GPG] Add Yarn Registry cleanup in ApplicationCleaner. Contributed by Botong Huang.

2018-10-18 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 3671dc3ef -> 1ca57be32


YARN-8862. [GPG] Add Yarn Registry cleanup in ApplicationCleaner. Contributed 
by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ca57be3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ca57be3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ca57be3

Branch: refs/heads/YARN-7402
Commit: 1ca57be3208bc351d428ed2ab13ca34b249f2101
Parents: 3671dc3
Author: Botong Huang 
Authored: Thu Oct 18 10:26:16 2018 -0700
Committer: Botong Huang 
Committed: Thu Oct 18 10:26:16 2018 -0700

--
 .../utils/FederationRegistryClient.java | 18 +++
 .../utils/TestFederationRegistryClient.java | 31 ++-
 .../globalpolicygenerator/GPGContext.java   |  5 +++
 .../globalpolicygenerator/GPGContextImpl.java   | 12 
 .../GlobalPolicyGenerator.java  | 21 +
 .../applicationcleaner/ApplicationCleaner.java  | 19 +++-
 .../DefaultApplicationCleaner.java  |  2 ++
 .../TestDefaultApplicationCleaner.java  | 32 
 .../amrmproxy/FederationInterceptor.java|  6 ++--
 9 files changed, 136 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ca57be3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
index 6624318..7eb9049 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
@@ -202,21 +202,27 @@ public class FederationRegistryClient {
* Remove an application from registry.
*
* @param appId application id
+   * @param ignoreMemoryState whether to ignore the memory data in terms of
+   *  known application
*/
-  public void removeAppFromRegistry(ApplicationId appId) {
+  public void removeAppFromRegistry(ApplicationId appId,
+  boolean ignoreMemoryState) {
 Map> subClusterTokenMap =
 this.appSubClusterTokenMap.get(appId);
-LOG.info("Removing all registry entries for {}", appId);
-
-if (subClusterTokenMap == null || subClusterTokenMap.size() == 0) {
-  return;
+if (!ignoreMemoryState) {
+  if (subClusterTokenMap == null || subClusterTokenMap.size() == 0) {
+return;
+  }
 }
+LOG.info("Removing all registry entries for {}", appId);
 
 // Lastly remove the application directory
 String key = getRegistryKey(appId, null);
 try {
   removeKeyRegistry(this.registry, this.user, key, true, true);
-  subClusterTokenMap.clear();
+  if (subClusterTokenMap != null) {
+subClusterTokenMap.clear();
+  }
 } catch (YarnException e) {
   LOG.error("Failed removing registry directory key " + key, e);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ca57be3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
index 42be851..5b799a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
@@ -80,11 +80,40 @@ public class TestFederationRegistryClient {
 Assert.assertEquals(2,
 this.registryClient.loadStateFromRegistry(appId).size());
 
-

hadoop git commit: HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed by Yiqun Lin.

2018-10-18 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1e78dfca4 -> 0e56c883c


HDFS-14003. Fix findbugs warning in trunk for FSImageFormatPBINode. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e56c883
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e56c883
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e56c883

Branch: refs/heads/trunk
Commit: 0e56c883cd2310f3ff9d62afb306b1ab27419c36
Parents: 1e78dfc
Author: Inigo Goiri 
Authored: Thu Oct 18 10:53:30 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 18 10:53:30 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e56c883/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 7c4f21e..bc455e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -660,7 +660,6 @@ public final class FSImageFormatPBINode {
 }
 
 private void save(OutputStream out, INodeSymlink n) throws IOException {
-  SaverContext state = parent.getSaverContext();
   INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
   .newBuilder()
   .setPermission(buildPermissionStatus(n))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/020cf744
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/020cf744
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/020cf744

Branch: refs/heads/HDDS-4
Commit: 020cf744e4d8e66306fc0c59866b4b61401ffdb2
Parents: 9abda83
Author: Xiaoyu Yao 
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 13:48:55 2018 -0700

--
 .../authentication/util/KerberosUtil.java   |   2 +-
 .../conf/TestConfigurationFieldsBase.java   |   2 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   9 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |   3 +
 .../StorageContainerLocationProtocol.java   |   4 +
 .../protocolPB/ScmBlockLocationProtocolPB.java  |   6 +
 .../StorageContainerLocationProtocolPB.java |   4 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   4 +
 .../common/src/main/resources/ozone-default.xml |  26 ++-
 .../StorageContainerDatanodeProtocol.java   |   4 +
 .../StorageContainerDatanodeProtocolPB.java |   6 +
 .../scm/server/StorageContainerManager.java |  51 -
 .../StorageContainerManagerHttpServer.java  |   5 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   3 +-
 .../ozone/client/protocol/ClientProtocol.java   |   3 +
 hadoop-ozone/common/src/main/bin/start-ozone.sh |  16 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  13 +-
 hadoop-ozone/integration-test/pom.xml   |   6 +
 .../TestContainerStateManagerIntegration.java   |   5 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   4 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  21 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 205 +++
 .../ozone/TestStorageContainerManager.java  |   8 +-
 24 files changed, 375 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/020cf744/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
   }
 
   /* Return fqdn of the current host */
-  static String getLocalHostName() throws UnknownHostException {
+  public static String getLocalHostName() throws UnknownHostException {
 return InetAddress.getLocalHost().getCanonicalHostName();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/020cf744/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 152159b..bce1cd5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
 // Create XML key/value map
 LOG_XML.debug("Reading XML property files\n");
 xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+// Remove hadoop property set in ozone-default.xml
+xmlKeyValueMap.remove("hadoop.custom.tags");
 LOG_XML.debug("\n=\n");
 
 // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/020cf744/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index db9d374..efafb5c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -242,18 +242,7 @@ public final class HddsUtils {
   }
 
   public static boolean 

[21/50] [abbrv] hadoop git commit: HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky

2018-10-18 Thread xyao
HDFS-13662. TestBlockReaderLocal#testStatisticsForErasureCodingRead is flaky


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53313871
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53313871
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53313871

Branch: refs/heads/HDDS-4
Commit: 533138718cc05b78e0afe583d7a9bd30e8a48fdc
Parents: e3342a1
Author: Xiao Chen 
Authored: Tue Oct 16 19:32:12 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 16 19:33:55 2018 -0700

--
 .../hadoop/hdfs/client/impl/TestBlockReaderLocal.java | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53313871/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
index ace21c0..95fb67a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -827,9 +828,12 @@ public class TestBlockReaderLocal {
 
   Path ecFile = new Path(ecDir, "file2");
   DFSTestUtil.createFile(fs, ecFile, length, repl, randomSeed);
-  // Shutdown one DataNode so that erasure coding decoding process can kick
-  // in.
-  cluster.shutdownDataNode(0);
+
+  // Shutdown a DataNode that holds a data block, to trigger EC decoding.
+  final BlockLocation[] locs = fs.getFileBlockLocations(ecFile, 0, length);
+  final String[] nodes = locs[0].getNames();
+  cluster.stopDataNode(nodes[0]);
+
   try (HdfsDataInputStream in = (HdfsDataInputStream) fs.open(ecFile)) {
 IOUtils.readFully(in, buf, 0, length);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: HDDS-547. Fix secure docker and configs. Contributed by Xiaoyu Yao.

2018-10-18 Thread xyao
HDDS-547. Fix secure docker and configs. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28aacb02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28aacb02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28aacb02

Branch: refs/heads/HDDS-4
Commit: 28aacb02140833eda45009025c85317287eabc47
Parents: 5c810fa
Author: Ajay Kumar 
Authored: Mon Oct 1 11:03:27 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:04 2018 -0700

--
 hadoop-dist/src/main/compose/ozonesecure/.env   |  18 +++
 .../compose/ozonesecure/docker-compose.yaml |  57 +++
 .../src/main/compose/ozonesecure/docker-config  | 103 +
 .../ozonesecure/docker-image/runner/Dockerfile  |  39 +
 .../ozonesecure/docker-image/runner/build.sh|  26 
 .../docker-image/runner/scripts/envtoconf.py| 115 ++
 .../docker-image/runner/scripts/krb5.conf   |  38 +
 .../docker-image/runner/scripts/starter.sh  | 100 +
 .../runner/scripts/transformation.py| 150 +++
 hadoop-hdds/common/pom.xml  |   6 +
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   8 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   3 -
 .../common/src/main/resources/ozone-default.xml |   6 +-
 .../hadoop/ozone/HddsDatanodeService.java   |  30 
 .../StorageContainerManagerHttpServer.java  |   4 +-
 .../src/test/compose/compose-secure/.env|  17 ---
 .../compose/compose-secure/docker-compose.yaml  |  66 
 .../test/compose/compose-secure/docker-config   |  99 
 .../apache/hadoop/ozone/om/OMConfigKeys.java|   4 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java|  11 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   4 +-
 21 files changed, 701 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28aacb02/hadoop-dist/src/main/compose/ozonesecure/.env
--
diff --git a/hadoop-dist/src/main/compose/ozonesecure/.env 
b/hadoop-dist/src/main/compose/ozonesecure/.env
new file mode 100644
index 000..a494004
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozonesecure/.env
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+HDDS_VERSION=${hdds.version}
+SRC_VOLUME=../../

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28aacb02/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
--
diff --git a/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
new file mode 100644
index 000..42ab05e
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   kdc:
+  image: ahadoop/kdc:v1
+  hostname: kdc
+  volumes:
+  - $SRC_VOLUME:/opt/hadoop
+   datanode:
+  image: ahadoop/runner:latest
+  volumes:
+- $SRC_VOLUME:/opt/hadoop
+  hostname: datanode
+  ports:
+- 9864
+  command: ["/opt/hadoop/bin/ozone","datanode"]
+  env_file:
+- ./docker-config
+   ozoneManager:
+   

[26/50] [abbrv] hadoop git commit: HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat Viswanadham.

2018-10-18 Thread xyao
HDDS-563. Support hybrid VirtualHost style URL. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9df1c84b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9df1c84b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9df1c84b

Branch: refs/heads/HDDS-4
Commit: 9df1c84be0f86f6c26030ba1b98e9a2b93dc743c
Parents: 5085e5f
Author: Márton Elek 
Authored: Wed Oct 17 11:34:03 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 12:43:44 2018 +0200

--
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java | 19 ++-
 .../ozone/s3/TestVirtualHostStyleFilter.java| 25 +++-
 2 files changed, 21 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9df1c84b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
index 3bd690b..4cf78b6 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
@@ -49,8 +49,6 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 
   private static final Logger LOG = LoggerFactory.getLogger(
   VirtualHostStyleFilter.class);
-  private static final Pattern URL_SCHEME_PATTERN = Pattern.compile("" +
-  "(?(.+))\\.(?(.+))\\.");
 
   @Inject
   private OzoneConfiguration conf;
@@ -83,24 +81,21 @@ public class VirtualHostStyleFilter implements 
ContainerRequestFilter {
 // address length means it is virtual host style, we need to convert to
 // path style.
 if (host.length() > domain.length()) {
-  String bothNames = host.substring(0, host.length() - domain.length());
-  LOG.debug("Both volume name and bucket name is {}", bothNames);
-  Matcher matcher = URL_SCHEME_PATTERN.matcher(bothNames);
+  String bucketName = host.substring(0, host.length() - domain.length());
 
-  if (!matcher.matches()) {
+  if(!bucketName.endsWith(".")) {
+//Checking this as the virtual host style pattern is 
http://bucket.host/
 throw getException("Invalid S3 Gateway request {" + requestContext
 .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host
 + " is in invalid format");
+  } else {
+bucketName = bucketName.substring(0, bucketName.length() - 1);
   }
-
-  String bucketStr = matcher.group("bucket");
-  String volumeStr = matcher.group("volume");
-
-  LOG.debug("bucket {}, volumeStr {}", bucketStr, volumeStr);
+  LOG.debug("Bucket name is {}", bucketName);
 
   URI baseURI = requestContext.getUriInfo().getBaseUri();
   String currentPath = requestContext.getUriInfo().getPath();
-  String newPath = String.format("%s/%s", volumeStr, bucketStr);
+  String newPath = bucketName;
   if (currentPath != null) {
 newPath += String.format("%s", currentPath);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9df1c84b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index ac8fa87..5548c77 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -31,6 +31,8 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.SecurityContext;
 import java.net.URI;
 
+import static org.junit.Assert.fail;
+
 /**
  * This class test virtual host style mapping conversion to path style.
  */
@@ -87,10 +89,10 @@ public class TestVirtualHostStyleFilter {
 virtualHostStyleFilter.setConfiguration(conf);
 
 ContainerRequest containerRequest = createContainerRequest("mybucket" +
-".myvolume.localhost:9878", "/myfile", true);
+".localhost:9878", "/myfile", true);
 virtualHostStyleFilter.filter(containerRequest);
 URI expected = new URI("http://; + s3HttpAddr +
-"/myvolume/mybucket/myfile");
+"/mybucket/myfile");
 Assert.assertEquals(expected, containerRequest.getRequestUri());
   }
 
@@ -102,10 +104,10 @@ public class TestVirtualHostStyleFilter {
 

[04/50] [abbrv] hadoop git commit: HDFS-13993. TestDataNodeVolumeFailure#testTolerateVolumeFailuresAfterAddingMoreVolumes is flaky. Contributed by Ayush Saxena.

2018-10-18 Thread xyao
HDFS-13993. 
TestDataNodeVolumeFailure#testTolerateVolumeFailuresAfterAddingMoreVolumes is 
flaky. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f90c64e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f90c64e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f90c64e6

Branch: refs/heads/HDDS-4
Commit: f90c64e6242facf38c2baedeeda42e4a8293e642
Parents: 5f0b43f
Author: Inigo Goiri 
Authored: Tue Oct 16 11:22:57 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 16 11:22:57 2018 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f90c64e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 7d04942..b70a356 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -389,6 +389,7 @@ public class TestDataNodeVolumeFailure {
 DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
 DataNodeTestUtils.waitForDiskError(dn0,
 DataNodeTestUtils.getVolume(dn0, dn0Vol2));
+dn0.checkDiskError();
 assertFalse(dn0.shouldRun());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HADOOP-15802. start-build-env.sh creates an invalid /etc/sudoers.d/hadoop-build-${USER_ID} file entry. Contributed by Jon Boone.

2018-10-18 Thread xyao
HADOOP-15802. start-build-env.sh creates an invalid 
/etc/sudoers.d/hadoop-build-${USER_ID} file entry. Contributed by Jon Boone.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3342a1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3342a1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3342a1a

Branch: refs/heads/HDDS-4
Commit: e3342a1abaff71823ebd952baf24a6143e711b99
Parents: 22f85f2
Author: Akira Ajisaka 
Authored: Wed Oct 17 10:11:44 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed Oct 17 10:12:57 2018 +0900

--
 start-build-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3342a1a/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index c5581ca..bf6b411 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -61,7 +61,7 @@ docker build -t "hadoop-build-${USER_ID}" - 
< 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
+RUN echo -e "${USER_NAME}\tALL=NOPASSWD: ALL" > 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
 
 UserSpecificDocker


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDDS-548. Create a Self-Signed Certificate. Contributed by Anu Engineer.

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c810faa/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyPEMWriter.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyPEMWriter.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyPEMWriter.java
new file mode 100644
index 000..db5d430
--- /dev/null
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyPEMWriter.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509.keys;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.KeyFactory;
+import java.security.KeyPair;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.security.spec.X509EncodedKeySpec;
+import java.util.Set;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Test class for HDDS pem writer.
+ */
+public class TestHDDSKeyPEMWriter {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneConfiguration configuration;
+  private HDDSKeyGenerator keyGenerator;
+  private String prefix;
+
+  @Before
+  public void init() throws IOException {
+configuration = new OzoneConfiguration();
+prefix = temporaryFolder.newFolder().toString();
+configuration.set(HDDS_METADATA_DIR_NAME, prefix);
+keyGenerator = new HDDSKeyGenerator(configuration);
+  }
+
+  /**
+   * Assert basic things like we are able to create a file, and the names are
+   * in expected format etc.
+   *
+   * @throws NoSuchProviderException - On Error, due to missing Java
+   * dependencies.
+   * @throws NoSuchAlgorithmException - On Error,  due to missing Java
+   * dependencies.
+   * @throws IOException - On I/O failure.
+   */
+  @Test
+  public void testWriteKey()
+  throws NoSuchProviderException, NoSuchAlgorithmException,
+  IOException, InvalidKeySpecException {
+KeyPair keys = keyGenerator.generateKey();
+HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+pemWriter.writeKey(keys);
+
+// Assert that locations have been created.
+Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation();
+Assert.assertTrue(keyLocation.toFile().exists());
+
+// Assert that locations are created in the locations that we specified
+// using the Config.
+Assert.assertTrue(keyLocation.toString().startsWith(prefix));
+Path privateKeyPath = Paths.get(keyLocation.toString(),
+pemWriter.getSecurityConfig().getPrivateKeyFileName());
+Assert.assertTrue(privateKeyPath.toFile().exists());
+Path publicKeyPath = Paths.get(keyLocation.toString(),
+pemWriter.getSecurityConfig().getPublicKeyFileName());
+Assert.assertTrue(publicKeyPath.toFile().exists());
+
+// Read the private key and test if the expected String in the PEM file
+// format exists.
+byte[] privateKey = Files.readAllBytes(privateKeyPath);
+String privateKeydata = new String(privateKey, StandardCharsets.UTF_8);
+Assert.assertTrue(privateKeydata.contains("PRIVATE KEY"));
+
+// Read the public key and test if the expected String in the PEM file
+// format exists.
+byte[] publicKey = Files.readAllBytes(publicKeyPath);
+

[47/50] [abbrv] hadoop git commit: HDDS-566. Move OzoneSecure docker-compose after HDDS-447. Contributed by Xiaoyu Yao.

2018-10-18 Thread xyao
HDDS-566. Move OzoneSecure docker-compose after HDDS-447. Contributed by Xiaoyu 
Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67d4b031
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67d4b031
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67d4b031

Branch: refs/heads/HDDS-4
Commit: 67d4b0318530b2eb3ca09632f6974e9d40754f6a
Parents: 28aacb0
Author: Ajay Kumar 
Authored: Tue Oct 2 10:07:35 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:04 2018 -0700

--
 hadoop-dist/src/main/compose/ozonesecure/.env   |  18 ---
 .../compose/ozonesecure/docker-compose.yaml |  57 ---
 .../src/main/compose/ozonesecure/docker-config  | 103 -
 .../ozonesecure/docker-image/runner/Dockerfile  |  39 -
 .../ozonesecure/docker-image/runner/build.sh|  26 
 .../docker-image/runner/scripts/envtoconf.py| 115 --
 .../docker-image/runner/scripts/krb5.conf   |  38 -
 .../docker-image/runner/scripts/starter.sh  | 100 -
 .../runner/scripts/transformation.py| 150 ---
 .../dist/src/main/compose/ozonesecure/.env  |  18 +++
 .../compose/ozonesecure/docker-compose.yaml |  57 +++
 .../src/main/compose/ozonesecure/docker-config  | 103 +
 .../ozonesecure/docker-image/runner/Dockerfile  |  39 +
 .../ozonesecure/docker-image/runner/build.sh|  26 
 .../docker-image/runner/scripts/envtoconf.py| 115 ++
 .../docker-image/runner/scripts/krb5.conf   |  38 +
 .../docker-image/runner/scripts/starter.sh  | 100 +
 .../runner/scripts/transformation.py| 150 +++
 18 files changed, 646 insertions(+), 646 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67d4b031/hadoop-dist/src/main/compose/ozonesecure/.env
--
diff --git a/hadoop-dist/src/main/compose/ozonesecure/.env 
b/hadoop-dist/src/main/compose/ozonesecure/.env
deleted file mode 100644
index a494004..000
--- a/hadoop-dist/src/main/compose/ozonesecure/.env
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDDS_VERSION=${hdds.version}
-SRC_VOLUME=../../

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67d4b031/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
--
diff --git a/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
deleted file mode 100644
index 42ab05e..000
--- a/hadoop-dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "3"
-services:
-   kdc:
-  image: ahadoop/kdc:v1
-  hostname: kdc
-  volumes:
-  - $SRC_VOLUME:/opt/hadoop
-   datanode:
-  image: ahadoop/runner:latest
-  volumes:
-- $SRC_VOLUME:/opt/hadoop
-  hostname: datanode
-  ports:
-- 9864
-  command: ["/opt/hadoop/bin/ozone","datanode"]
-  env_file:
-- ./docker-config
-   ozoneManager:
-  image: ahadoop/runner:latest
-  hostname: om
-  volumes:
- - 

[38/50] [abbrv] hadoop git commit: Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

2018-10-18 Thread xyao
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee307b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee307b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee307b7

Branch: refs/heads/HDDS-4
Commit: eee307b7e334daa2ef7007adde76572810286b01
Parents: af0c8f6
Author: Xiaoyu Yao 
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 13:48:55 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee307b7/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 3ed4f09..287d913 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -160,7 +160,6 @@
 
   
   
-<<< HEAD
 dfs.ratis.client.request.timeout.duration
 3s
 OZONE, RATIS, MANAGEMENT
@@ -218,9 +217,6 @@
   
   
 hdds.container.report.interval
-===
-ozone.container.report.interval
->>> HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
 6ms
 OZONE, CONTAINER, MANAGEMENT
 Time interval of the datanode to send container report. Each


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

2018-10-18 Thread xyao
YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

Change-Id: I46e8d9fd32c7745c313030da62da41486a77b3ea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46d6e001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46d6e001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46d6e001

Branch: refs/heads/HDDS-4
Commit: 46d6e0016610ced51a76189daeb3ad0e3dbbf94c
Parents: ed08dd3
Author: Wangda Tan 
Authored: Tue Oct 16 13:36:59 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |   8 +
 .../installation/install.conf   |  74 
 .../installation/install.sh | 116 +
 .../package/calico/calico-node.service  |  50 +++
 .../installation/package/calico/calicoctl.cfg   |  22 +
 .../installation/package/docker/daemon.json |  23 +
 .../installation/package/docker/docker.service  |  35 ++
 .../installation/package/etcd/etcd.service  |  40 ++
 .../package/hadoop/container-executor.cfg   |  41 ++
 .../installation/package/submarine/submarine.sh |  25 ++
 .../installation/scripts/calico.sh  | 224 ++
 .../installation/scripts/docker.sh  | 166 +++
 .../installation/scripts/download-server.sh |  42 ++
 .../installation/scripts/environment.sh | 213 +
 .../installation/scripts/etcd.sh| 152 +++
 .../installation/scripts/hadoop.sh  | 117 +
 .../installation/scripts/menu.sh| 444 +++
 .../installation/scripts/nvidia-docker.sh   |  99 +
 .../installation/scripts/nvidia.sh  | 120 +
 .../installation/scripts/submarine.sh   |  38 ++
 .../installation/scripts/utils.sh   | 123 +
 21 files changed, 2172 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index a2ea08c..9c401e8 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -66,6 +66,14 @@
   0755
 
 
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation
+  /share/hadoop/yarn/submarine-installer
+  
+**/*
+  
+  0755
+
+
   hadoop-yarn/conf
   etc/hadoop
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
new file mode 100644
index 000..82dcf61
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# install config #
+
+# DNS
+LOCAL_DNS_HOST="172.17.0.9"   # /etc/resolv.conf
+YARN_DNS_HOST="10.196.69.173" # yarn dns server ip address
+
+# etcd hosts list
+ETCD_HOSTS=(10.196.69.173 10.196.69.174 10.196.69.175)
+
+# docker registry ip:port
+DOCKER_REGISTRY="10.120.196.232:5000"
+
+# Start the http download service on the specified server,
+# Will download all the dependencies in the http server,
+# Run the install script on other servers.
+# Automatically download dependencies from http,
+# Solve the problem that all servers are slow to download online.
+# At the same time, you can also manually download the dependencies 

[29/50] [abbrv] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 3523499..badcec7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -33,8 +33,10 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -169,7 +171,7 @@ public class SCMClientProtocolServer implements
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(remoteUser);
 return scm.getContainerManager()
-.getContainer(containerID);
+.getContainer(ContainerID.valueof(containerID));
   }
 
   @Override
@@ -177,8 +179,8 @@ public class SCMClientProtocolServer implements
   throws IOException {
 if (chillModePrecheck.isInChillMode()) {
   ContainerInfo contInfo = scm.getContainerManager()
-  .getContainer(containerID);
-  if (contInfo.isContainerOpen()) {
+  .getContainer(ContainerID.valueof(containerID));
+  if (contInfo.isOpen()) {
 if (!hasRequiredReplicas(contInfo)) {
   throw new SCMException("Open container " + containerID + " doesn't"
   + " have enough replicas to service this operation in "
@@ -189,7 +191,7 @@ public class SCMClientProtocolServer implements
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(null);
 return scm.getContainerManager()
-.getContainerWithPipeline(containerID);
+.getContainerWithPipeline(ContainerID.valueof(containerID));
   }
 
   /**
@@ -198,10 +200,10 @@ public class SCMClientProtocolServer implements
*/
   private boolean hasRequiredReplicas(ContainerInfo contInfo) {
 try{
-  return getScm().getContainerManager().getStateManager()
+  return getScm().getContainerManager()
   .getContainerReplicas(contInfo.containerID())
   .size() >= contInfo.getReplicationFactor().getNumber();
-} catch (SCMException ex) {
+} catch (ContainerNotFoundException ex) {
   // getContainerReplicas throws exception if no replica's exist for given
   // container.
   return false;
@@ -212,14 +214,14 @@ public class SCMClientProtocolServer implements
   public List listContainer(long startContainerID,
   int count) throws IOException {
 return scm.getContainerManager().
-listContainer(startContainerID, count);
+listContainer(ContainerID.valueof(startContainerID), count);
   }
 
   @Override
   public void deleteContainer(long containerID) throws IOException {
 String remoteUser = getRpcRemoteUsername();
 getScm().checkAdminAccess(remoteUser);
-scm.getContainerManager().deleteContainer(containerID);
+
scm.getContainerManager().deleteContainer(ContainerID.valueof(containerID));
 
   }
 
@@ -257,10 +259,12 @@ public class SCMClientProtocolServer implements
   .ObjectStageChangeRequestProto.Op.create) {
 if (stage == StorageContainerLocationProtocolProtos
 .ObjectStageChangeRequestProto.Stage.begin) {
-  scm.getContainerManager().updateContainerState(id, HddsProtos
+  scm.getContainerManager().updateContainerState(
+  ContainerID.valueof(id), HddsProtos
   .LifeCycleEvent.CREATE);
 } else {
-  scm.getContainerManager().updateContainerState(id, HddsProtos
+  scm.getContainerManager().updateContainerState(
+  ContainerID.valueof(id), HddsProtos
   .LifeCycleEvent.CREATED);
 }
   } else {
@@ -268,10 +272,12 @@ public class SCMClientProtocolServer implements
 .ObjectStageChangeRequestProto.Op.close) {
   if (stage == StorageContainerLocationProtocolProtos
   .ObjectStageChangeRequestProto.Stage.begin) {
-scm.getContainerManager().updateContainerState(id, HddsProtos
+

[31/50] [abbrv] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-18 Thread xyao
HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. 
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50715c06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50715c06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50715c06

Branch: refs/heads/HDDS-4
Commit: 50715c0699b260363c40ef0729c83ac26cf0
Parents: a9a63ae
Author: Nandakumar 
Authored: Wed Oct 17 17:45:35 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 17:45:35 2018 +0530

--
 .../scm/client/ContainerOperationClient.java|   2 +-
 .../hadoop/hdds/scm/client/ScmClient.java   |   2 +-
 .../hdds/scm/container/ContainerException.java  |  46 ++
 .../hadoop/hdds/scm/container/ContainerID.java  |  28 +-
 .../hdds/scm/container/ContainerInfo.java   | 449 +++
 .../container/ContainerNotFoundException.java   |  44 ++
 .../ContainerReplicaNotFoundException.java  |  45 ++
 .../container/common/helpers/ContainerInfo.java | 482 
 .../common/helpers/ContainerWithPipeline.java   |   1 +
 .../StorageContainerLocationProtocol.java   |   2 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   2 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   2 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  17 +-
 .../report/CommandStatusReportPublisher.java|   2 +-
 .../common/report/TestReportPublisher.java  |  13 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  14 +-
 .../block/DatanodeDeletedBlockTransactions.java |   6 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java |   5 +-
 .../container/CloseContainerEventHandler.java   |  31 +-
 .../scm/container/CloseContainerWatcher.java|   3 +-
 .../hdds/scm/container/ContainerManager.java|  70 ++-
 .../hdds/scm/container/ContainerReplica.java| 197 +++
 .../scm/container/ContainerReportHandler.java   |  60 +-
 .../scm/container/ContainerStateManager.java| 242 
 .../hdds/scm/container/SCMContainerManager.java | 566 ---
 .../replication/ReplicationManager.java |  38 +-
 .../scm/container/states/ContainerStateMap.java | 267 +
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  74 ++-
 .../hdds/scm/server/SCMChillModeManager.java|   2 +-
 .../scm/server/SCMClientProtocolServer.java |  32 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   2 +-
 .../scm/server/StorageContainerManager.java |  22 +-
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |   2 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  48 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   2 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |  14 +-
 .../TestCloseContainerEventHandler.java |  22 +-
 .../container/TestContainerReportHandler.java   |  66 +--
 .../container/TestContainerStateManager.java|  60 +-
 .../scm/container/TestSCMContainerManager.java  | 117 ++--
 .../replication/TestReplicationManager.java |  38 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   2 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  | 195 ---
 .../scm/server/TestSCMChillModeManager.java |   2 +-
 .../container/TestCloseContainerWatcher.java|  12 +-
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  |   2 +-
 .../hdds/scm/cli/container/ListSubcommand.java  |   4 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   2 +-
 .../TestContainerStateManagerIntegration.java   | 219 ---
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   7 +-
 .../hdds/scm/pipeline/TestPipelineClose.java|  10 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java |  10 +-
 .../ozone/TestStorageContainerManager.java  |   4 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   4 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  10 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   6 +-
 .../commandhandler/TestBlockDeletion.java   |   3 +-
 .../TestCloseContainerByPipeline.java   |  10 +-
 .../TestCloseContainerHandler.java  |   4 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   2 +-
 .../hadoop/ozone/om/TestScmChillMode.java   |  14 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |   4 -
 .../genesis/BenchMarkContainerStateMap.java |  14 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   8 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   3 +-
 65 files changed, 1949 insertions(+), 1739 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 

[08/50] [abbrv] hadoop git commit: YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed by Robert Kanter)

2018-10-18 Thread xyao
YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed 
by Robert Kanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2288ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2288ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2288ac4

Branch: refs/heads/HDDS-4
Commit: c2288ac45b748b4119442c46147ccc324926c340
Parents: d59ca43
Author: Haibo Chen 
Authored: Tue Oct 16 13:36:26 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 13:36:26 2018 -0700

--
 .../org/apache/hadoop/security/Credentials.java |   1 +
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  21 +-
 .../hadoop/yarn/api/ApplicationConstants.java   |  22 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  20 +
 .../src/main/resources/yarn-default.xml |  18 +
 .../yarn/server/security/AMSecretKeys.java  |  45 ++
 .../nodemanager/DefaultContainerExecutor.java   |  14 +
 .../nodemanager/LinuxContainerExecutor.java |   4 +
 .../launcher/ContainerLaunch.java   |  50 ++
 .../launcher/ContainerRelaunch.java |  23 +
 .../runtime/DefaultLinuxContainerRuntime.java   |  14 +-
 .../runtime/DockerLinuxContainerRuntime.java|  36 +-
 .../runtime/LinuxContainerRuntimeConstants.java |   4 +
 .../executor/ContainerStartContext.java |  24 +
 .../impl/container-executor.c   | 121 -
 .../impl/container-executor.h   |  17 +-
 .../main/native/container-executor/impl/main.c  |  35 +-
 .../main/native/container-executor/impl/util.h  |   6 +-
 .../test/test-container-executor.c  | 228 +---
 .../TestDefaultContainerExecutor.java   | 157 ++
 .../TestLinuxContainerExecutorWithMocks.java| 115 ++--
 .../launcher/TestContainerLaunch.java   | 149 ++
 .../launcher/TestContainerRelaunch.java |  32 +-
 .../runtime/TestDockerContainerRuntime.java | 225 
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  11 +
 .../server/resourcemanager/ResourceManager.java |   9 +
 .../resourcemanager/amlauncher/AMLauncher.java  |  29 ++
 .../security/ProxyCAManager.java|  68 +++
 .../TestApplicationMasterLauncher.java  |  86 ++-
 .../security/TestProxyCAManager.java|  51 ++
 .../hadoop/yarn/server/webproxy/ProxyCA.java| 408 +++
 .../yarn/server/webproxy/WebAppProxy.java   |   1 +
 .../server/webproxy/WebAppProxyServlet.java |  89 +++-
 .../yarn/server/webproxy/TestProxyCA.java   | 518 +++
 .../server/webproxy/TestWebAppProxyServlet.java |  58 ++-
 37 files changed, 2406 insertions(+), 323 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index 6a9527a..4fafa4a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class Credentials implements Writable {
+
   public enum SerializedFormat {
 WRITABLE((byte) 0x00),
 PROTOBUF((byte) 0x01);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
index 898c94e..1870b22 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -50,6 +51,7 @@ import java.security.NoSuchProviderException;
 import 

[45/50] [abbrv] hadoop git commit: HDDS-548. Create a Self-Signed Certificate. Contributed by Anu Engineer.

2018-10-18 Thread xyao
HDDS-548. Create a Self-Signed Certificate. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c810faa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c810faa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c810faa

Branch: refs/heads/HDDS-4
Commit: 5c810faa42b63dc1dd1e5d8a666e6f83f17f5acf
Parents: 84f370c
Author: Ajay Kumar 
Authored: Fri Sep 28 06:52:56 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:03 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  16 ++
 .../hdds/security/x509/HDDSKeyGenerator.java|  99 ---
 .../hdds/security/x509/HDDSKeyPEMWriter.java| 254 --
 .../hdds/security/x509/SecurityConfig.java  | 105 +---
 .../certificates/SelfSignedCertificate.java | 212 +++
 .../x509/certificates/package-info.java |  22 ++
 .../x509/exceptions/CertificateException.java   |  63 +
 .../x509/exceptions/SCMSecurityException.java   |  64 +
 .../security/x509/exceptions/package-info.java  |  23 ++
 .../security/x509/keys/HDDSKeyGenerator.java| 106 
 .../security/x509/keys/HDDSKeyPEMWriter.java| 255 ++
 .../hdds/security/x509/keys/package-info.java   |  23 ++
 .../security/x509/TestHDDSKeyGenerator.java |  81 --
 .../security/x509/TestHDDSKeyPEMWriter.java | 213 ---
 .../x509/certificates/TestRootCertificate.java  | 258 +++
 .../x509/certificates/package-info.java |  22 ++
 .../x509/keys/TestHDDSKeyGenerator.java |  87 +++
 .../x509/keys/TestHDDSKeyPEMWriter.java | 216 
 .../hdds/security/x509/keys/package-info.java   |  22 ++
 .../hadoop/hdds/security/x509/package-info.java |  22 ++
 20 files changed, 1484 insertions(+), 679 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c810faa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 8088a9c..9fd5c63 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -115,4 +115,20 @@ public final class HddsConfigKeys {
   public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
   + ".name";
   public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
+
+  /**
+   * Maximum duration of certificates issued by SCM including Self-Signed 
Roots.
+   * The formats accepted are based on the ISO-8601 duration format 
PnDTnHnMn.nS
+   * Default value is 5 years and written as P1865D.
+   */
+  public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration";
+  // Limit Certificate duration to a max value of 5 years.
+  public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D";
+
+  public static final String HDDS_X509_SIGNATURE_ALGO =
+  "hdds.x509.signature.algorithm";
+  public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = 
"SHA256withRSA";
+
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c810faa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
deleted file mode 100644
index cb411b2..000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-package org.apache.hadoop.hdds.security.x509;
-
-import 

[32/50] [abbrv] hadoop git commit: HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in dead lock. Contributed by Hanisha Koneru.

2018-10-18 Thread xyao
HDDS-661. When a volume fails in datanode, VersionEndpointTask#call ends up in 
dead lock. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d54f5598
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d54f5598
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d54f5598

Branch: refs/heads/HDDS-4
Commit: d54f5598f4ccd1031e8295a215a3183f3647031a
Parents: 50715c0
Author: Nandakumar 
Authored: Wed Oct 17 18:44:05 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 18:44:05 2018 +0530

--
 .../container/common/states/endpoint/VersionEndpointTask.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d54f5598/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 2d04677..79fa174 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -75,7 +75,7 @@ public class VersionEndpointTask implements
 
   // Check volumes
   VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-  volumeSet.readLock();
+  volumeSet.writeLock();
   try {
 Map volumeMap = volumeSet.getVolumeMap();
 
@@ -94,12 +94,12 @@ public class VersionEndpointTask implements
   }
 }
 if (volumeSet.getVolumesList().size() == 0) {
-  // All volumes are inconsistent state
+  // All volumes are in inconsistent state
   throw new DiskOutOfSpaceException("All configured Volumes are in " +
   "Inconsistent State");
 }
   } finally {
-volumeSet.readUnlock();
+volumeSet.writeUnlock();
   }
 
   ozoneContainer.getDispatcher().setScmId(scmId);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-8810. Fixed a YARN service bug in comparing ConfigFile object. Contributed by Chandni Singh

2018-10-18 Thread xyao
YARN-8810.  Fixed a YARN service bug in comparing ConfigFile object.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bfd214a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bfd214a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bfd214a

Branch: refs/heads/HDDS-4
Commit: 3bfd214a59a60263aff67850c4d646c64fd76a01
Parents: 657032f
Author: Eric Yang 
Authored: Tue Oct 16 18:54:40 2018 -0400
Committer: Eric Yang 
Committed: Tue Oct 16 18:54:40 2018 -0400

--
 .../yarn/service/UpgradeComponentsFinder.java   |  2 +-
 .../yarn/service/api/records/ConfigFile.java|  3 +-
 .../TestDefaultUpgradeComponentsFinder.java | 40 ++--
 3 files changed, 40 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
index 19ff6db..96a34f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/UpgradeComponentsFinder.java
@@ -88,7 +88,7 @@ public interface UpgradeComponentsFinder {
   }
 
   if (!Objects.equals(currentDef.getConfiguration(),
-  currentDef.getConfiguration())) {
+  targetDef.getConfiguration())) {
 return targetDef.getComponents();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
index cd9dc84..1cdae86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java
@@ -199,7 +199,8 @@ public class ConfigFile implements Serializable {
 ConfigFile configFile = (ConfigFile) o;
 return Objects.equals(this.type, configFile.type)
 && Objects.equals(this.destFile, configFile.destFile)
-&& Objects.equals(this.srcFile, configFile.srcFile);
+&& Objects.equals(this.srcFile, configFile.srcFile)
+&& Objects.equals(this.properties, configFile.properties);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bfd214a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
index b0a01b3..304e740 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java
@@ 

[17/50] [abbrv] hadoop git commit: HDDS-665. Add hdds.datanode.dir to docker-config. Contributed by Bharat Viswanadham.

2018-10-18 Thread xyao
HDDS-665. Add hdds.datanode.dir to docker-config. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/657032f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/657032f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/657032f5

Branch: refs/heads/HDDS-4
Commit: 657032f5ddd2cdf111fc89c96672fe08b483cbfc
Parents: 84e22a6
Author: Bharat Viswanadham 
Authored: Tue Oct 16 15:29:53 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 16 15:29:53 2018 -0700

--
 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config   | 1 +
 hadoop-ozone/dist/src/main/compose/ozone/docker-config| 1 +
 hadoop-ozone/dist/src/main/compose/ozonefs/docker-config  | 1 +
 hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config| 2 ++
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config  | 2 ++
 hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config | 1 +
 6 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
index 3b2819f..9729aef 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
@@ -24,6 +24,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.replication=1
 
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 
 HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index f2c8db1..86257ff 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -24,6 +24,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.replication=1
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
index 4ff7f56..675dcba 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonefs/docker-config
@@ -25,6 +25,7 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 OZONE-SITE.XML_ozone.replication=1
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
index f1c0147..a814c39 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
@@ -23,6 +23,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
+
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 OZONE-SITE.XML_ozone.replication=1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657032f5/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 

[46/50] [abbrv] hadoop git commit: HDDS-591. Adding ASF license header to kadm5.acl. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-591. Adding ASF license header to kadm5.acl. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e82f1548
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e82f1548
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e82f1548

Branch: refs/heads/HDDS-4
Commit: e82f15486e632237fcc1394829ee9cecffe5f644
Parents: cbc8d39
Author: Xiaoyu Yao 
Authored: Wed Oct 10 10:01:01 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:04 2018 -0700

--
 .../docker-image/docker-krb5/Dockerfile-krb5 |  1 +
 .../docker-image/docker-krb5/kadm5.acl   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82f1548/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
--
diff --git 
a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
index b5b931d..14532d4 100644
--- 
a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
+++ 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5
@@ -28,6 +28,7 @@ RUN kdb5_util create -s -P Welcome1
 RUN kadmin.local -q "addprinc -randkey admin/ad...@example.com"
 RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/ad...@example.com"
 ADD launcher.sh .
+RUN chmod +x /opt/launcher.sh
 RUN mkdir -p /data
 ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"]
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82f1548/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
--
diff --git 
a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
index 8fe9f69..f0cd660 100644
--- 
a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
+++ 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl
@@ -1 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
 */ad...@example.com x


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: YARN-8892. YARN UI2 doc changes to update security status (verified under security environment). (Sunil G via wangda)

2018-10-18 Thread xyao
YARN-8892. YARN UI2 doc changes to update security status (verified under 
security environment). (Sunil G via wangda)

Change-Id: I8bc8622936861b8d6de3e42a0b75af86ad8a3961


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/538250db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/538250db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/538250db

Branch: refs/heads/HDDS-4
Commit: 538250db26ce0b261bb74053348cddfc2d65cf52
Parents: 143d747
Author: Wangda Tan 
Authored: Tue Oct 16 13:41:17 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/538250db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 609ebe1..4c9daed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -52,4 +52,4 @@ Open your browser, go to `rm-address:8088/ui2` and try it!
 Notes
 -
 
-- This UI framework is not verified under security environment, please use 
with caution under security environment.
+This UI framework is verified under security environment as well.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: YARN-8875. [Submarine] Add documentation for submarine installation script details. (Xun Liu via wangda)

2018-10-18 Thread xyao
YARN-8875. [Submarine] Add documentation for submarine installation script 
details. (Xun Liu via wangda)

Change-Id: I1c8d39c394e5a30f967ea514919835b951f2c124


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed08dd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed08dd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed08dd3b

Branch: refs/heads/HDDS-4
Commit: ed08dd3b0c9cec20373e8ca4e34d6526bd759943
Parents: babd144
Author: Wangda Tan 
Authored: Tue Oct 16 13:36:09 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:01 2018 -0700

--
 .../src/site/markdown/HowToInstall.md   |  36 +++
 .../src/site/markdown/Index.md  |   4 +-
 .../src/site/markdown/InstallationGuide.md  | 205 +++
 .../src/site/markdown/InstallationScriptCN.md   | 242 ++
 .../src/site/markdown/InstallationScriptEN.md   | 250 +++
 .../src/site/markdown/TestAndTroubleshooting.md | 165 
 .../resources/images/submarine-installer.gif| Bin 0 -> 546547 bytes
 7 files changed, 724 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
new file mode 100644
index 000..05d87c1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/HowToInstall.md
@@ -0,0 +1,36 @@
+
+
+# How to Install Dependencies
+
+Submarine project uses YARN Service, Docker container, and GPU (when GPU 
hardware available and properly configured).
+
+That means as an admin, you have to properly setup YARN Service related 
dependencies, including:
+- YARN Registry DNS
+
+Docker related dependencies, including:
+- Docker binary with expected versions.
+- Docker network which allows Docker container can talk to each other across 
different nodes.
+
+And when GPU wanna to be used:
+- GPU Driver.
+- Nvidia-docker.
+
+For your convenience, we provided installation documents to help you to setup 
your environment. You can always choose to have them installed in your own way.
+
+Use Submarine installer to install dependencies: 
[EN](InstallationScriptEN.html) [CN](InstallationScriptCN.html)
+
+Alternatively, you can follow manual install dependencies: 
[EN](InstallationGuide.html) [CN](InstallationGuideChineseVersion.html)
+
+Once you have installed dependencies, please follow following guide to 
[TestAndTroubleshooting](TestAndTroubleshooting.html).  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
index 0006f6c..baeaa15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
@@ -41,6 +41,4 @@ Click below contents if you want to understand more.
 
 - [Developer guide](DeveloperGuide.html)
 
-- [Installation guide](InstallationGuide.html)
-
-- [Installation guide Chinese version](InstallationGuideChineseVersion.html)
+- [Installation guides](HowToInstall.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed08dd3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
index d4f4269..4ef2bda 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
@@ -16,9 +16,11 @@
 
 ## Prerequisites
 
+(Please note that all following prerequisites are just 

[34/50] [abbrv] hadoop git commit: HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. Contributed by Jason Lowe

2018-10-18 Thread xyao
HADOOP-15859. ZStandardDecompressor.c mistakes a class for an instance. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9abda839
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9abda839
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9abda839

Branch: refs/heads/HDDS-4
Commit: 9abda83947a5babfe5a650b3409ad952f6782105
Parents: 24dc068
Author: Jason Lowe 
Authored: Wed Oct 17 14:38:42 2018 -0500
Committer: Jason Lowe 
Committed: Wed Oct 17 14:38:42 2018 -0500

--
 .../hadoop/io/compress/zstd/ZStandardCompressor.java |  2 +-
 .../apache/hadoop/io/compress/zstd/ZStandardCompressor.c | 10 +-
 .../hadoop/io/compress/zstd/ZStandardDecompressor.c  | 11 +--
 3 files changed, 11 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
index eb2121a..7445502 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
@@ -298,7 +298,7 @@ public class ZStandardCompressor implements Compressor {
   private native static void init(int level, long stream);
   private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
   int srcLen, ByteBuffer dst, int dstLen);
-  private static native int getStreamSize();
+  private native static int getStreamSize();
   private native static void end(long strm);
   private native static void initIDs();
   public native static String getLibraryName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9abda839/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
index 04f2a3e..055683a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c
@@ -139,7 +139,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // Create the compression stream
-JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jobject this) {
+JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv 
*env, jclass clazz) {
 ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
 if (stream == NULL) {
 THROW(env, "java/lang/InternalError", "Error creating the stream");
@@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompres
 }
 
 // Initialize the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jobject this, jint level, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, 
jclass clazz, jint level, jlong stream) {
 size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -158,7 +158,7 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompress
 }
 
 // free the compression stream
-JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jobject this, jlong stream) {
+JNIEXPORT void JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, 
jclass clazz, jlong stream) {
 size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
 if (dlsym_ZSTD_isError(result)) {
 THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
@@ -227,7 +227,7 @@ JNIEXPORT jint 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_defla
 }
 
 JNIEXPORT jstring JNICALL 
Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_getLibraryName
-(JNIEnv *env, 

[25/50] [abbrv] hadoop git commit: YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes other test failures. Contributed by Antal Bálint Steinbach.

2018-10-18 Thread xyao
YARN-8759. Copy of resource-types.xml is not deleted if test fails, causes 
other test failures. Contributed by Antal Bálint Steinbach.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5085e5fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5085e5fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5085e5fa

Branch: refs/heads/HDDS-4
Commit: 5085e5fa9e8c7e489a8518e8541c12b14f3651df
Parents: 41b3603
Author: Sunil G 
Authored: Wed Oct 17 16:05:08 2018 +0530
Committer: Sunil G 
Committed: Wed Oct 17 16:05:08 2018 +0530

--
 .../yarn/util/resource/TestResourceUtils.java   | 51 
 .../resourcemanager/TestClientRMService.java| 17 ---
 2 files changed, 32 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5085e5fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index 9b48017..c96982d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -39,6 +39,9 @@ import java.util.Map;
  */
 public class TestResourceUtils {
 
+  private File nodeResourcesFile;
+  private File resourceTypesFile;
+
   static class ResourceFileInformation {
 String filename;
 int resourceCount;
@@ -75,12 +78,11 @@ public class TestResourceUtils {
 
   @After
   public void teardown() {
-Configuration conf = new YarnConfiguration();
-File source = new File(
-conf.getClassLoader().getResource("resource-types-1.xml").getFile());
-File dest = new File(source.getParent(), "resource-types.xml");
-if (dest.exists()) {
-  dest.delete();
+if(nodeResourcesFile != null && nodeResourcesFile.exists()) {
+  nodeResourcesFile.delete();
+}
+if(resourceTypesFile != null && resourceTypesFile.exists()) {
+  resourceTypesFile.delete();
 }
   }
 
@@ -136,8 +138,8 @@ public class TestResourceUtils {
   File source = new File(
   conf.getClassLoader().getResource(testInformation.filename)
   .getFile());
-  File dest = new File(source.getParent(), "resource-types.xml");
-  FileUtils.copyFile(source, dest);
+  resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+  FileUtils.copyFile(source, resourceTypesFile);
   res = ResourceUtils.getResourceTypes();
   testMemoryAndVcores(res);
   Assert.assertEquals(testInformation.resourceCount, res.size());
@@ -148,7 +150,6 @@ public class TestResourceUtils {
 res.containsKey(resourceName));
 Assert.assertEquals(entry.getValue(), 
res.get(resourceName).getUnits());
   }
-  dest.delete();
 }
   }
 
@@ -161,20 +162,17 @@ public class TestResourceUtils {
 "resource-types-error-4.xml"};
 for (String resourceFile : resourceFiles) {
   ResourceUtils.resetResourceTypes();
-  File dest = null;
   try {
 File source =
 new 
File(conf.getClassLoader().getResource(resourceFile).getFile());
-dest = new File(source.getParent(), "resource-types.xml");
-FileUtils.copyFile(source, dest);
+resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, resourceTypesFile);
 ResourceUtils.getResourceTypes();
 Assert.fail("Expected error with file " + resourceFile);
   } catch (NullPointerException ne) {
 throw ne;
   } catch (Exception e) {
-if (dest != null) {
-  dest.delete();
-}
+//Test passed
   }
 }
   }
@@ -275,7 +273,7 @@ public class TestResourceUtils {
 ResourceUtils.initializeResourcesMap(conf);
 Assert.fail("resource map initialization should fail");
   } catch (Exception e) {
-// do nothing
+//Test passed
   }
 }
   }
@@ -299,11 +297,10 @@ public class TestResourceUtils {
 for (Map.Entry entry : testRun.entrySet()) {
   String resourceFile = entry.getKey();
   ResourceUtils.resetNodeResources();
-  File dest;
   File source = new File(
   conf.getClassLoader().getResource(resourceFile).getFile());
-  dest = new File(source.getParent(), "node-resources.xml");
-  FileUtils.copyFile(source, dest);
+  

[40/50] [abbrv] hadoop git commit: HDDS-546. Resolve bouncy castle dependency for hadoop-hdds-common. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-546. Resolve bouncy castle dependency for hadoop-hdds-common. Contributed 
by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84f370cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84f370cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84f370cf

Branch: refs/heads/HDDS-4
Commit: 84f370cf451694d8269c7f86cca3a040081519c0
Parents: 6c64849
Author: Ajay Kumar 
Authored: Tue Sep 25 14:19:14 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:03 2018 -0700

--
 hadoop-hdds/common/pom.xml | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84f370cf/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 94455c1..1f16d12 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -53,6 +53,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   io.dropwizard.metrics
   metrics-core
 
+
+  org.bouncycastle
+  bcprov-jdk15on
+
   
 
 
@@ -106,7 +110,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   org.bouncycastle
   bcprov-jdk15on
-  1.49
+  1.54
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.

2018-10-18 Thread xyao
HDDS-667. Fix TestOzoneFileInterfaces. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e5173b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e5173b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e5173b

Branch: refs/heads/HDDS-4
Commit: 53e5173bd1d970ec1c714568cbdb1c0dfd0fc6fb
Parents: 25f8fcb
Author: Jitendra Pandey 
Authored: Tue Oct 16 10:34:16 2018 -0700
Committer: Jitendra Pandey 
Committed: Tue Oct 16 10:34:51 2018 -0700

--
 .../hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java   | 4 ++--
 .../test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 7fa0cfb..67cda9f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -53,6 +53,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 public class BlockManagerImpl implements BlockManager {
 
   static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+  private static byte[] blockCommitSequenceIdKey =
+  DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 
   private Configuration config;
 
@@ -89,8 +91,6 @@ public class BlockManagerImpl implements BlockManager {
 Preconditions.checkNotNull(db, "DB cannot be null here");
 
 long blockCommitSequenceId = data.getBlockCommitSequenceId();
-byte[] blockCommitSequenceIdKey =
-DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 byte[] blockCommitSequenceIdValue = db.get(blockCommitSequenceIdKey);
 
 // default blockCommitSequenceId for any block is 0. It the putBlock

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e5173b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 71a4bef..ae52451 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
@@ -283,6 +284,7 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   scm.getClientProtocolServer().getScmInfo().getClusterId()));
   stop();
   FileUtils.deleteDirectory(baseDir);
+  ContainerCache.getInstance(conf).shutdownCache();
 } catch (IOException e) {
   LOG.error("Exception while shutting down the cluster.", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HDDS-656. Add logic for pipeline report and action processing in new pipeline code. Contributed by Lokesh Jain.

2018-10-18 Thread xyao
HDDS-656. Add logic for pipeline report and action processing in new pipeline 
code. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64a43c92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64a43c92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64a43c92

Branch: refs/heads/HDDS-4
Commit: 64a43c92c2133f3b9a317dcc4f0391ad6b604194
Parents: 5331387
Author: Nandakumar 
Authored: Wed Oct 17 13:56:54 2018 +0530
Committer: Nandakumar 
Committed: Wed Oct 17 13:57:38 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientRatis.java |  19 ++
 .../hadoop/hdds/scm/pipeline/Pipeline.java  | 103 ++---
 hadoop-hdds/common/src/main/proto/hdds.proto|   1 +
 .../scm/pipeline/PipelineActionHandler.java |  66 ++
 .../hdds/scm/pipeline/PipelineFactory.java  |   7 +-
 .../hdds/scm/pipeline/PipelineManager.java  |  13 +-
 .../scm/pipeline/PipelineReportHandler.java | 104 +
 .../hdds/scm/pipeline/PipelineStateManager.java | 135 +++-
 .../hdds/scm/pipeline/PipelineStateMap.java |  76 ---
 .../scm/pipeline/RatisPipelineProvider.java |  26 ++-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  81 ---
 .../scm/pipeline/SimplePipelineProvider.java|   6 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  17 ++
 .../scm/pipeline/TestPipelineStateManager.java  | 209 ++-
 .../scm/pipeline/TestRatisPipelineProvider.java |  18 +-
 .../scm/pipeline/TestSCMPipelineManager.java| 187 +
 .../pipeline/TestSimplePipelineProvider.java|  16 +-
 17 files changed, 804 insertions(+), 280 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a43c92/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 4efe7ba..45e9d6e 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.thirdparty.com.google.protobuf
@@ -73,6 +74,24 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 retryPolicy);
   }
 
+  public static XceiverClientRatis newXceiverClientRatis(
+  org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
+  Configuration ozoneConf) {
+final String rpcType = ozoneConf
+.get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+final int maxOutstandingRequests =
+HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
+final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
+Pipeline pipeline1 =
+new Pipeline(pipeline.getNodes().get(0).getUuidString(),
+HddsProtos.LifeCycleState.OPEN, pipeline.getType(),
+pipeline.getFactor(), 
PipelineID.valueOf(pipeline.getID().getId()));
+return new XceiverClientRatis(pipeline1,
+SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
+retryPolicy);
+  }
+
   private final Pipeline pipeline;
   private final RpcType rpcType;
   private final AtomicReference client = new AtomicReference<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a43c92/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index b58a001..b22a0c6 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -23,12 +23,14 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import 

[37/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af0c8f6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af0c8f6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af0c8f6d

Branch: refs/heads/HDDS-4
Commit: af0c8f6d0999a6bbd96da8d9555be2341d18
Parents: be2497e
Author: Xiaoyu Yao 
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 13:48:55 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af0c8f6d/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 287d913..3ed4f09 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -160,6 +160,7 @@
 
   
   
+<<< HEAD
 dfs.ratis.client.request.timeout.duration
 3s
 OZONE, RATIS, MANAGEMENT
@@ -217,6 +218,9 @@
   
   
 hdds.container.report.interval
+===
+ozone.container.report.interval
+>>> HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
 6ms
 OZONE, CONTAINER, MANAGEMENT
 Time interval of the datanode to send container report. Each


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HADOOP-15854. AuthToken Use StringBuilder instead of StringBuffer. Contributed by Beluga Behr.

2018-10-18 Thread xyao
HADOOP-15854. AuthToken Use StringBuilder instead of StringBuffer.
Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b738cb14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b738cb14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b738cb14

Branch: refs/heads/HDDS-4
Commit: b738cb148cb2a15e72a5c27200eca2d4b383bf9c
Parents: 64a43c9
Author: Steve Loughran 
Authored: Wed Oct 17 10:29:09 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 10:29:09 2018 +0100

--
 .../org/apache/hadoop/security/authentication/util/AuthToken.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b738cb14/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
index e959f65..844501c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
@@ -128,7 +128,7 @@ public class AuthToken implements Principal {
* Generates the token.
*/
   private void generateToken() {
-StringBuffer sb = new StringBuffer();
+StringBuilder sb = new StringBuilder();
 
sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
 sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
 sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout. Contributed by Adam Antal.

2018-10-18 Thread xyao
HADOOP-11100. Support to configure ftpClient.setControlKeepAliveTimeout.
Contributed by Adam Antal.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24dc068a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24dc068a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24dc068a

Branch: refs/heads/HDDS-4
Commit: 24dc068a361648b4e59e1807b07ff2239f41c740
Parents: d54f559
Author: Adam Antal 
Authored: Wed Oct 17 11:32:17 2018 -0700
Committer: Xiao Chen 
Committed: Wed Oct 17 11:34:50 2018 -0700

--
 .../java/org/apache/hadoop/fs/ftp/FTPFileSystem.java | 13 +
 .../src/main/resources/core-default.xml  |  8 
 .../hadoop/conf/TestCommonConfigurationFields.java   |  1 +
 .../org/apache/hadoop/fs/ftp/TestFTPFileSystem.java  | 15 +++
 4 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 676c207..4b144bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -62,6 +62,7 @@ public class FTPFileSystem extends FileSystem {
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 
   public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
+  public static final long DEFAULT_TIMEOUT = 0;
   public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
   public static final String FS_FTP_HOST = "fs.ftp.host";
   public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
@@ -71,6 +72,7 @@ public class FTPFileSystem extends FileSystem {
   public static final String FS_FTP_TRANSFER_MODE = "fs.ftp.transfer.mode";
   public static final String E_SAME_DIRECTORY_ONLY =
   "only same directory renames are supported";
+  public static final String FS_FTP_TIMEOUT = "fs.ftp.timeout";
 
   private URI uri;
 
@@ -150,6 +152,7 @@ public class FTPFileSystem extends FileSystem {
   client.setFileTransferMode(getTransferMode(conf));
   client.setFileType(FTP.BINARY_FILE_TYPE);
   client.setBufferSize(DEFAULT_BUFFER_SIZE);
+  setTimeout(client, conf);
   setDataConnectionMode(client, conf);
 } else {
   throw new IOException("Login failed on server - " + host + ", port - "
@@ -160,6 +163,16 @@ public class FTPFileSystem extends FileSystem {
   }
 
   /**
+   * Set the FTPClient's timeout based on configuration.
+   * FS_FTP_TIMEOUT is set as timeout (defaults to DEFAULT_TIMEOUT).
+   */
+  @VisibleForTesting
+  void setTimeout(FTPClient client, Configuration conf) {
+long timeout = conf.getLong(FS_FTP_TIMEOUT, DEFAULT_TIMEOUT);
+client.setControlKeepAliveTimeout(timeout);
+  }
+
+  /**
* Set FTP's transfer mode based on configuration. Valid values are
* STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 32dd622..599396f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -926,6 +926,14 @@
 
 
 
+  fs.ftp.timeout
+  0
+  
+FTP filesystem's timeout in seconds.
+  
+
+
+
   fs.df.interval
   6
   Disk usage statistics refresh interval in msec.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24dc068a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index e10617d..2766b56 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -93,6 +93,7 @@ public class 

[28/50] [abbrv] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index ed8b1e3..03c99ef 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.primitives.Longs;
 import java.util.Set;
 import java.util.UUID;
 import org.apache.commons.lang3.RandomUtils;
@@ -24,26 +23,22 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableSet;
-import java.util.Random;
+import java.util.concurrent.TimeoutException;
+
 import org.slf4j.event.Level;
 
 /**
@@ -57,7 +52,6 @@ public class TestContainerStateManagerIntegration {
   private StorageContainerManager scm;
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
-  private PipelineSelector selector;
   private String containerOwner = "OZONE";
 
 
@@ -70,8 +64,8 @@ public class TestContainerStateManagerIntegration {
 xceiverClientManager = new XceiverClientManager(conf);
 scm = cluster.getStorageContainerManager();
 containerManager = scm.getContainerManager();
-containerStateManager = containerManager.getStateManager();
-selector = containerManager.getPipelineSelector();
+containerStateManager = ((SCMContainerManager)containerManager)
+.getContainerStateManager();
   }
 
   @After
@@ -88,13 +82,13 @@ public class TestContainerStateManagerIntegration {
 .allocateContainer(
 xceiverClientManager.getType(),
 xceiverClientManager.getFactor(), containerOwner);
+ContainerStateManager stateManager = new ContainerStateManager(conf);
 ContainerInfo info = containerStateManager
 .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
 xceiverClientManager.getType(), xceiverClientManager.getFactor(),
 HddsProtos.LifeCycleState.ALLOCATED);
 Assert.assertEquals(container1.getContainerInfo().getContainerID(),
 info.getContainerID());
-Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
 Assert.assertEquals(containerOwner, info.getOwner());
 Assert.assertEquals(xceiverClientManager.getType(),
 info.getReplicationType());
@@ -117,35 +111,49 @@ public class TestContainerStateManagerIntegration {
   }
 
   @Test
-  public void testContainerStateManagerRestart() throws IOException {
+  public void testContainerStateManagerRestart()
+  throws IOException, TimeoutException, InterruptedException {
 // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
 
-List containers = new ArrayList<>();
 for (int i = 0; i < 10; i++) {
+
   ContainerWithPipeline container = scm.getClientProtocolServer()
   .allocateContainer(
   xceiverClientManager.getType(),
   xceiverClientManager.getFactor(), containerOwner);
-  containers.add(container.getContainerInfo());
   if (i >= 5) {
 scm.getContainerManager().updateContainerState(container
-.getContainerInfo().getContainerID(),
+.getContainerInfo().containerID(),
 HddsProtos.LifeCycleEvent.CREATE);
   }
 }
 
-// New instance of ContainerStateManager should load all the containers in
-// container store.
-ContainerStateManager stateManager =
-new ContainerStateManager(conf, 

[12/50] [abbrv] hadoop git commit: YARN-8798. [Submarine] Job should not be submitted if --input_path option is missing. (Zhankun Tang via wangda)

2018-10-18 Thread xyao
YARN-8798. [Submarine] Job should not be submitted if --input_path option is 
missing. (Zhankun Tang via wangda)

Change-Id: I7ae0e44eb5179b04a6ac861ec1c65f3b18c38f0f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143d7477
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143d7477
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143d7477

Branch: refs/heads/HDDS-4
Commit: 143d74775b2b62884090fdd88874134b9eab2888
Parents: 46d6e00
Author: Wangda Tan 
Authored: Tue Oct 16 13:39:34 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 16 13:51:02 2018 -0700

--
 .../client/cli/param/RunJobParameters.java  |  6 +++
 .../client/cli/TestRunJobCliParsing.java| 39 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143d7477/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
index 92a1883..d923e0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
@@ -62,6 +62,12 @@ public class RunJobParameters extends RunParameters {
 if (parsedCommandLine.getOptionValue(CliConstants.N_WORKERS) != null) {
   nWorkers = Integer.parseInt(
   parsedCommandLine.getOptionValue(CliConstants.N_WORKERS));
+  // Only check null value.
+  // Training job shouldn't ignore INPUT_PATH option
+  // But if nWorkers is 0, INPUT_PATH can be ignored because user can only 
run Tensorboard
+  if (null == input && 0 != nWorkers) {
+throw new ParseException("\"--" + CliConstants.INPUT_PATH + "\" is 
absent");
+  }
 }
 
 int nPS = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143d7477/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
index 295d6a8..240de06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/TestRunJobCliParsing.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.yarn.submarine.client.cli;
 
+import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -136,6 +137,44 @@ public class TestRunJobCliParsing {
   }
 
   @Test
+  public void testNoInputPathOptionSpecified() throws Exception {
+RunJobCli runJobCli = new RunJobCli(getMockClientContext());
+String expectedErrorMessage = "\"--" + CliConstants.INPUT_PATH + "\" is 
absent";
+String actualMessage = "";
+try {
+  runJobCli.run(
+  new String[]{"--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+  "--checkpoint_path", "hdfs://output",
+  "--num_workers", "1", "--worker_launch_cmd", "python run-job.py",
+  "--worker_resources", "memory=4g,vcores=2", "--tensorboard",
+  "true", "--verbose", "--wait_job_finish"});
+} catch (ParseException e) {
+  actualMessage = e.getMessage();
+  e.printStackTrace();
+}
+Assert.assertEquals(expectedErrorMessage, actualMessage);
+  }
+
+  /**
+   * when only run tensorboard, input_path is not needed
+   * */
+  @Test
+  public void testNoInputPathOptionButOnlyRunTensorboard() throws Exception {
+RunJobCli runJobCli = new 

[49/50] [abbrv] hadoop git commit: HDDS-10. Add kdc docker image for secure ozone cluster. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-10. Add kdc docker image for secure ozone cluster. Contributed by Ajay 
Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9200bc1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9200bc1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9200bc1f

Branch: refs/heads/HDDS-4
Commit: 9200bc1fc98288cb30a8f8f7ca34bbabe4aeddee
Parents: 67d4b03
Author: Xiaoyu Yao 
Authored: Thu Oct 4 13:20:09 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:04 2018 -0700

--
 .../dist/src/main/compose/ozonesecure/README.md | 22 +
 .../compose/ozonesecure/docker-compose.yaml | 94 
 .../docker-image/docker-krb5/Dockerfile-krb5| 33 +++
 .../docker-image/docker-krb5/README.md  | 34 +++
 .../docker-image/docker-krb5/kadm5.acl  |  1 +
 .../docker-image/docker-krb5/krb5.conf  | 40 +
 .../docker-image/docker-krb5/launcher.sh| 25 ++
 7 files changed, 210 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9200bc1f/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md
new file mode 100644
index 000..0ce9a0a
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md
@@ -0,0 +1,22 @@
+
+# Experimental UNSECURE krb5 Kerberos container.
+
+Only for development. Not for production.
+
+ Dockerfile for KDC:
+* ./docker-image/docker-krb5/Dockerfile-krb5
+
+ Dockerfile for SCM,OM and DataNode:
+* ./docker-image/runner/Dockerfile
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9200bc1f/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
--
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index 42ab05e..fab5ba9 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -16,42 +16,58 @@
 
 version: "3"
 services:
-   kdc:
-  image: ahadoop/kdc:v1
-  hostname: kdc
-  volumes:
-  - $SRC_VOLUME:/opt/hadoop
-   datanode:
-  image: ahadoop/runner:latest
-  volumes:
-- $SRC_VOLUME:/opt/hadoop
-  hostname: datanode
-  ports:
-- 9864
-  command: ["/opt/hadoop/bin/ozone","datanode"]
-  env_file:
-- ./docker-config
-   ozoneManager:
-  image: ahadoop/runner:latest
-  hostname: om
-  volumes:
- - $SRC_VOLUME:/opt/hadoop
-  ports:
- - 9874:9874
-  environment:
- ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
-  env_file:
-  - ./docker-config
-  command: ["/opt/hadoop/bin/ozone","om"]
-   scm:
-  image: ahadoop/runner:latest
-  hostname: scm
-  volumes:
- - $SRC_VOLUME:/opt/hadoop
-  ports:
- - 9876:9876
-  env_file:
-  - ./docker-config
-  environment:
-  ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-  command: ["/opt/hadoop/bin/ozone","scm"]
+  kdc:
+build:
+  context: docker-image/docker-krb5
+  dockerfile: Dockerfile-krb5
+  args:
+buildno: 1
+hostname: kdc
+volumes:
+- $SRC_VOLUME:/opt/hadoop
+  datanode:
+build:
+  context: docker-image/runner
+  dockerfile: Dockerfile
+  args:
+buildno: 1
+volumes:
+- $SRC_VOLUME:/opt/hadoop
+hostname: datanode
+ports:
+- 9864
+command: ["/opt/hadoop/bin/ozone","datanode"]
+env_file:
+- docker-config
+  om:
+build:
+  context: docker-image/runner
+  dockerfile: Dockerfile
+  args:
+buildno: 1
+hostname: om
+volumes:
+- $SRC_VOLUME:/opt/hadoop
+ports:
+- 9874:9874
+environment:
+  ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+env_file:
+- docker-config
+command: ["/opt/hadoop/bin/ozone","om"]
+  scm:
+build:
+  context: docker-image/runner
+  dockerfile: Dockerfile
+  args:
+buildno: 1
+hostname: scm
+volumes:
+- $SRC_VOLUME:/opt/hadoop
+ports:
+- 9876:9876
+env_file:
+- docker-config
+environment:
+  ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+command: ["/opt/hadoop/bin/ozone","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9200bc1f/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5

[24/50] [abbrv] hadoop git commit: HADOOP-15861. Move DelegationTokenIssuer to the right path. Contributed by Wei-Chiu Chuang.

2018-10-18 Thread xyao
HADOOP-15861. Move DelegationTokenIssuer to the right path.
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41b3603b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41b3603b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41b3603b

Branch: refs/heads/HDDS-4
Commit: 41b3603b5bcb74a7d78a314a4a5c177d941af27f
Parents: b738cb1
Author: Steve Loughran 
Authored: Wed Oct 17 11:01:53 2018 +0100
Committer: Steve Loughran 
Committed: Wed Oct 17 11:01:53 2018 +0100

--
 .../security/token/DelegationTokenIssuer.java   | 111 +++
 .../security/token/DelegationTokenIssuer.java   | 111 ---
 2 files changed, 111 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b3603b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
new file mode 100644
index 000..70a53b7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class for issuing delegation tokens.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"})
+@InterfaceStability.Unstable
+public interface DelegationTokenIssuer {
+
+  /**
+   * The service name used as the alias for the  token in the credential
+   * token map.  addDelegationTokens will use this to determine if
+   * a token exists, and if not, add a new token with this alias.
+   */
+  String getCanonicalServiceName();
+
+  /**
+   * Unconditionally get a new token with the optional renewer.  Returning
+   * null indicates the service does not issue tokens.
+   */
+  Token getDelegationToken(String renewer) throws IOException;
+
+  /**
+   * Issuers may need tokens from additional services.
+   */
+  default DelegationTokenIssuer[] getAdditionalTokenIssuers()
+  throws IOException {
+return null;
+  }
+
+  /**
+   * Given a renewer, add delegation tokens for issuer and it's child issuers
+   * to the Credentials object if it is not already present.
+   *
+   * Note: This method is not intended to be overridden.  Issuers should
+   * implement getCanonicalService and getDelegationToken to ensure
+   * consistent token acquisition behavior.
+   *
+   * @param renewer the user allowed to renew the delegation tokens
+   * @param credentials cache in which to add new delegation tokens
+   * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
+   */
+  default Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+if (credentials == null) {
+  credentials = new Credentials();
+}
+final List> tokens = new ArrayList<>();
+collectDelegationTokens(this, renewer, credentials, tokens);
+return tokens.toArray(new Token[tokens.size()]);
+  }
+
+  /**
+   * NEVER call this method directly.
+   */
+  @InterfaceAudience.Private
+  static void collectDelegationTokens(
+  final DelegationTokenIssuer issuer,
+  final String renewer,
+  final Credentials credentials,
+  final List> tokens) throws IOException {
+final String serviceName = issuer.getCanonicalServiceName();
+// Collect token of the this issuer and then of its 

[16/50] [abbrv] hadoop git commit: YARN-8842. Expose metrics for custom resource types in QueueMetrics. (Contributed by Szilard Nemeth)

2018-10-18 Thread xyao
YARN-8842. Expose metrics for custom resource types in QueueMetrics. 
(Contributed by Szilard Nemeth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84e22a6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84e22a6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84e22a6a

Branch: refs/heads/HDDS-4
Commit: 84e22a6af46db2859d7d2caf192861cae9b6a1a8
Parents: 538250d
Author: Haibo Chen 
Authored: Tue Oct 16 14:12:02 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 14:14:30 2018 -0700

--
 .../resourcetypes/ResourceTypesTestHelper.java  |  22 +
 .../resourcemanager/scheduler/QueueMetrics.java | 130 +++-
 .../QueueMetricsForCustomResources.java | 158 +
 .../scheduler/capacity/CapacityScheduler.java   |   5 +-
 .../resourcemanager/scheduler/QueueInfo.java|  90 +++
 .../scheduler/QueueMetricsTestData.java | 105 +++
 .../scheduler/ResourceMetricsChecker.java   |  88 ++-
 .../scheduler/TestQueueMetrics.java | 250 +++
 .../TestQueueMetricsForCustomResources.java | 645 +++
 9 files changed, 1325 insertions(+), 168 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
index 98a8a00..3c3c2cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java
@@ -16,6 +16,7 @@
 
 package org.apache.hadoop.yarn.resourcetypes;
 
+import com.google.common.collect.Maps;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -24,6 +25,7 @@ import 
org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 /**
  * Contains helper methods to create Resource and ResourceInformation objects.
@@ -90,4 +92,24 @@ public final class ResourceTypesTestHelper {
 return new ResourceValueAndUnit(value, matcher.group(2));
   }
 
+  public static Map extractCustomResources(Resource res) {
+Map customResources = Maps.newHashMap();
+for (int i = 0; i < res.getResources().length; i++) {
+  ResourceInformation ri = res.getResourceInformation(i);
+  if (!ri.getName().equals(ResourceInformation.MEMORY_URI)
+  && !ri.getName().equals(ResourceInformation.VCORES_URI)) {
+customResources.put(ri.getName(), ri.getValue());
+  }
+}
+return customResources;
+  }
+
+  public static Map extractCustomResourcesAsStrings(
+  Resource res) {
+Map resValues = extractCustomResources(res);
+return resValues.entrySet().stream()
+.collect(Collectors.toMap(
+Map.Entry::getKey, e -> String.valueOf(e.getValue(;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 20a5a1f..1315c2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import 

[42/50] [abbrv] hadoop git commit: Fix merge conflicts

2018-10-18 Thread xyao
Fix merge conflicts


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c648497
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c648497
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c648497

Branch: refs/heads/HDDS-4
Commit: 6c648497f893f76319f2c78b48ec41476b29b6a5
Parents: 8805c41
Author: Xiaoyu Yao 
Authored: Tue Jul 31 18:17:29 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:03 2018 -0700

--
 .../conf/TestConfigurationFieldsBase.java   |  2 -
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  1 -
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 10 ++-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  1 +
 .../common/src/main/resources/ozone-default.xml | 22 ++---
 .../scm/server/StorageContainerManager.java |  9 ++-
 .../StorageContainerManagerHttpServer.java  |  4 +-
 .../src/test/compose/compose-secure/.env|  2 +-
 .../test/compose/compose-secure/docker-config   | 55 ++---
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 84 
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  3 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  5 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 22 ++---
 13 files changed, 88 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c648497/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index bce1cd5..152159b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,8 +436,6 @@ public abstract class TestConfigurationFieldsBase {
 // Create XML key/value map
 LOG_XML.debug("Reading XML property files\n");
 xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
-// Remove hadoop property set in ozone-default.xml
-xmlKeyValueMap.remove("hadoop.custom.tags");
 LOG_XML.debug("\n=\n");
 
 // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c648497/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 0b5e4e0..8088a9c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -115,5 +115,4 @@ public final class HddsConfigKeys {
   public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
   + ".name";
   public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
->>> HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. 
Contributed by Ajay Kumar.
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c648497/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 7bf1148..86338a9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -283,10 +283,12 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
   "10m";
 
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
-  "ozone.scm.web.authentication.kerberos.principal";
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
-  "ozone.scm.web.authentication.kerberos.keytab";
+  public static final String
+  HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+  "hdds.scm.web.authentication.kerberos.principal";
+  public static final String
+  HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+  "hdds.scm.web.authentication.kerberos.keytab";
   /**
* Never constructed.
*/


[03/50] [abbrv] hadoop git commit: HDDS-378. Remove dependencies between hdds/ozone and hdfs proto files. Contributed by Elek, Marton.

2018-10-18 Thread xyao
HDDS-378. Remove dependencies between hdds/ozone and hdfs proto files. 
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f0b43fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f0b43fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f0b43fa

Branch: refs/heads/HDDS-4
Commit: 5f0b43fa93d02c90956d48eb5c6c0b60deeac91c
Parents: 53e5173
Author: Arpit Agarwal 
Authored: Tue Oct 16 10:40:00 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 16 10:40:00 2018 -0700

--
 hadoop-hdds/common/pom.xml  |  6 --
 .../main/proto/ScmBlockLocationProtocol.proto   |  1 -
 .../StorageContainerLocationProtocol.proto  |  1 -
 hadoop-hdds/container-service/pom.xml   | 11 +---
 .../apache/hadoop/ozone/client/BucketArgs.java  |  2 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java |  2 +-
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../hadoop/ozone/client/rest/RestClient.java|  2 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  2 +-
 hadoop-ozone/common/pom.xml |  6 --
 .../hadoop/hdds/protocol/StorageType.java   | 64 
 .../ozone/client/rest/response/BucketInfo.java  |  2 +-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  7 +--
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  9 ++-
 .../hadoop/ozone/web/handlers/BucketArgs.java   |  2 +-
 .../hadoop/ozone/web/response/BucketInfo.java   |  2 +-
 .../src/main/proto/OzoneManagerProtocol.proto   | 12 +++-
 .../apache/hadoop/ozone/web/TestBucketInfo.java |  2 +-
 .../TestStorageContainerManagerHelper.java  |  2 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |  2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|  2 +-
 .../om/TestMultipleContainerReadWrite.java  |  2 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  2 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  2 +-
 .../web/handlers/BucketProcessTemplate.java |  2 +-
 .../hadoop/ozone/om/BucketManagerImpl.java  |  2 +-
 .../hadoop/ozone/om/S3BucketManagerImpl.java|  2 +-
 .../hadoop/ozone/om/TestBucketManagerImpl.java  |  2 +-
 .../hadoop/ozone/client/OzoneBucketStub.java|  2 +-
 .../hadoop/ozone/client/OzoneVolumeStub.java|  2 +-
 30 files changed, 102 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index bf2a6b9..65cd1d1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -218,12 +218,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
 
-
-  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
-
-
-  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
-
 ${basedir}/src/main/proto
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 9b4e0ac..01a0dde 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -28,7 +28,6 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdds;
 
-import "hdfs.proto";
 import "hdds.proto";
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
--
diff --git 
a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index fb01d6a..49d1975 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -28,7 +28,6 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdds;
 
-import "hdfs.proto";
 import "hdds.proto";
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b43fa/hadoop-hdds/container-service/pom.xml
--
diff --git 

[30/50] [abbrv] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 7078b8f..42b39f9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -17,17 +17,12 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.container.states.ContainerState;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -45,11 +40,8 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Set;
@@ -116,7 +108,7 @@ import static 
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
  * TimeOut Delete Container State Machine - if the container creating times 
out,
  * then Container State manager decides to delete the container.
  */
-public class ContainerStateManager implements Closeable {
+public class ContainerStateManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ContainerStateManager.class);
 
@@ -135,11 +127,10 @@ public class ContainerStateManager implements Closeable {
* TODO : Add Container Tags so we know which containers are owned by SCM.
*/
   @SuppressWarnings("unchecked")
-  public ContainerStateManager(Configuration configuration,
-  ContainerManager containerManager, PipelineSelector pipelineSelector) {
+  public ContainerStateManager(final Configuration configuration) {
 
 // Initialize the container state machine.
-Set finalStates = new HashSet();
+final Set finalStates = new HashSet();
 
 // These are the steady states of a container.
 finalStates.add(LifeCycleState.OPEN);
@@ -155,22 +146,9 @@ public class ContainerStateManager implements Closeable {
 ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
 StorageUnit.BYTES);
 
-lastUsedMap = new ConcurrentHashMap<>();
-containerCount = new AtomicLong(0);
-containers = new ContainerStateMap();
-  }
-
-  /**
-   * Return the info of all the containers kept by the in-memory mapping.
-   *
-   * @return the list of all container info.
-   */
-  public List getAllContainers() {
-List list = new ArrayList<>();
-
-//No Locking needed since the return value is an immutable map.
-containers.getContainerMap().forEach((key, value) -> list.add(value));
-return list;
+this.lastUsedMap = new ConcurrentHashMap<>();
+this.containerCount = new AtomicLong(0);
+this.containers = new ContainerStateMap();
   }
 
   /*
@@ -244,17 +222,15 @@ public class ContainerStateManager implements Closeable {
 LifeCycleEvent.CLEANUP);
   }
 
-  public void addExistingContainer(ContainerInfo containerInfo)
+  void loadContainer(final ContainerInfo containerInfo)
   throws SCMException {
 containers.addContainer(containerInfo);
-long containerID = containerInfo.getContainerID();
-if (containerCount.get() < containerID) {
-  containerCount.set(containerID);
-}
+containerCount.set(Long.max(
+containerInfo.getContainerID(), containerCount.get()));
   }
 
   /**
-   * allocates a new container based on the type, replication etc.
+   * Allocates a new container based on the type, replication etc.
*
* @param selector -- Pipeline selector class.
* @param type -- Replication type.
@@ -262,25 +238,22 @@ public class ContainerStateManager implements Closeable {
* @return ContainerWithPipeline
* @throws IOException  on Failure.
*/
-  public ContainerWithPipeline allocateContainer(PipelineSelector selector,
-  HddsProtos.ReplicationType type,
-  HddsProtos.ReplicationFactor replicationFactor, String owner)
+  

[50/50] [abbrv] hadoop git commit: HDDS-588. SelfSignedCertificate#generateCertificate should sign the certificate the configured security provider. Contributed by Xiaoyu Yao.

2018-10-18 Thread xyao
HDDS-588. SelfSignedCertificate#generateCertificate should sign the certificate 
the configured security provider. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbc8d390
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbc8d390
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbc8d390

Branch: refs/heads/HDDS-4
Commit: cbc8d3900a07f76cd2870bbf037bd3eb6c96deff
Parents: 9200bc1
Author: Ajay Kumar 
Authored: Tue Oct 9 00:28:01 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:04 2018 -0700

--
 .../hdds/security/x509/certificates/SelfSignedCertificate.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc8d390/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
index fef7ac3..f221246 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
@@ -103,8 +103,8 @@ public final class SelfSignedCertificate {
 
 
 ContentSigner contentSigner =
-new JcaContentSignerBuilder(
-config.getSignatureAlgo()).build(key.getPrivate());
+new JcaContentSignerBuilder(config.getSignatureAlgo())
+.setProvider(config.getProvider()).build(key.getPrivate());
 
 // Please note: Since this is a root certificate we use "ONE" as the
 // serial number. Also note that skip enforcing locale or UTC. We are


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-8582. Document YARN support for HTTPS in AM Web server. (Contributed by Robert Kanter)

2018-10-18 Thread xyao
YARN-8582. Document YARN support for HTTPS in AM Web server. (Contributed by 
Robert Kanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22f85f29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22f85f29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22f85f29

Branch: refs/heads/HDDS-4
Commit: 22f85f2927712d9159c6ecb88e45287afa26dc73
Parents: 3bfd214
Author: Haibo Chen 
Authored: Tue Oct 16 17:21:15 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 17:23:33 2018 -0700

--
 .../site/markdown/YarnApplicationSecurity.md| 84 +++-
 1 file changed, 63 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f85f29/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
index bab46b9..5f1f325 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md
@@ -402,27 +402,69 @@ connection with the AM and pass up the current user's 
credentials).
 
 ## Securing YARN Application Web UIs and REST APIs
 
-YARN provides a straightforward way of giving every YARN application SPNEGO 
authenticated
-web pages: it implements SPNEGO authentication in the Resource Manager Proxy.
-
-YARN web UI are expected to load the AM proxy filter when setting up its web 
UI; this filter
-will redirect all HTTP Requests coming from any host other than the RM Proxy 
hosts to an
-RM proxy, to which the client app/browser must re-issue the request. The 
client will authenticate
-against the principal of the RM Proxy (usually `yarn`), and, once 
authenticated, have its
-request forwared.
-
-As a result, all client interactions are SPNEGO-authenticated, without the 
YARN application
-itself needing any kerberos principal for the clients to authenticate against.
-
-Known weaknesses in this approach are:
-
-1. As calls coming from the proxy hosts are not redirected, any application 
running
-on those hosts has unrestricted access to the YARN applications. This is why 
in a secure cluster
-the proxy hosts *must* run on cluster nodes which do not run end user code 
(i.e. not run YARN
-NodeManagers and hence schedule YARN containers, nor support logins by end 
users).
-
-1. The HTTP requests between proxy and YARN RM Server are not currently 
encrypted.
-That is: HTTPS is not supported.
+YARN provides a straightforward way of giving every YARN Application SPNEGO
+authenticated web pages: the RM implements SPNEGO authentication in the 
Resource
+Manager Proxy and restricts access to the Yarn Application's Web UI to only the
+RM Proxy.  There are two ways to do this:
+
+ Option 1: AM IP Proxy Filter
+
+A YARN Application's Web Server should load the AM proxy filter (see the
+`AmFilterInitializer` class) when setting up its web UI; this filter will
+redirect all HTTP Requests coming from any host other than the RM Proxy hosts 
to
+an RM proxy, to which the client app/browser must re-issue the request. The
+client will authenticate against the principal of the RM Proxy (usually 
`yarn`),
+and, once authenticated, have its request forwarded.
+
+Known weaknesses in this option are:
+
+1. The AM proxy filter only checks for the IP/hosts of the RM Proxy so any
+Application running on those hosts has unrestricted access to the YARN
+Application's Web UI. This is why in a secure cluster the proxy hosts *must* 
run
+on cluster nodes which do not run end user code (i.e. not running YARN
+NodeManagers, and hence not schedule YARN containers; nor support logins by end
+users).
+
+1. The HTTP requests between RM proxy and the Yarn Application are not 
currently
+encrypted. That is: HTTPS is not supported.
+
+ Option 2: HTTPS Mutual Authentication
+
+By default, YARN Application Web UIs are not encrypted (i.e. HTTPS). It is up 
to
+the Application to provide support for HTTPS. This can either be done entirely
+independently with a valid HTTPS Certificate from a public CA or source that 
the
+RM or JVM is configured to trust.  Or, alternatively, the RM can act as a
+limited CA and provide the Application with a Certificate it can use, which is
+only accepted by the RM proxy, and no other clients (e.g. web browsers). This 
is
+important because the Application cannot necessarily be trusted to not steal 
any
+issued Certificates or perform other malicious behavior. The Certificates the 
RM
+issues will be (a) expired, (b) 

[27/50] [abbrv] hadoop git commit: HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.

2018-10-18 Thread xyao
HDDS-527. Show SCM chill mode status in SCM UI. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9a63ae4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9a63ae4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9a63ae4

Branch: refs/heads/HDDS-4
Commit: a9a63ae4a8367e66d5ec86b0097326b8491e4b1e
Parents: 9df1c84
Author: Márton Elek 
Authored: Wed Oct 17 12:44:53 2018 +0200
Committer: Márton Elek 
Committed: Wed Oct 17 12:54:01 2018 +0200

--
 .../server-scm/src/main/resources/webapps/scm/scm-overview.html  | 2 +-
 hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js | 4 
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a63ae4/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
--
diff --git 
a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index fca23ba..de4894a 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -50,7 +50,7 @@
 
 
 Node Manager: Chill mode status
-{{$ctrl.nodemanagermetrics.ChillModeStatus}}
+{{$ctrl.scmmetrics.InChillMode}}
 
 
 Node Manager: Manual chill mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a63ae4/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
--
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
index bcfa8b7..7c6dc91 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
@@ -34,6 +34,10 @@
 .then(function (result) {
 ctrl.nodemanagermetrics = result.data.beans[0];
 });
+
$http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime")
+.then(function (result) {
+ctrl.scmmetrics = result.data.beans[0];
+});
 
 var statusSortOrder = {
 "HEALTHY": "a",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be2497e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be2497e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be2497e9

Branch: refs/heads/HDDS-4
Commit: be2497e9bdf51405342649f9f273662528ce48a0
Parents: 020cf74
Author: Xiaoyu Yao 
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 13:48:55 2018 -0700

--
 .../common/src/main/resources/ozone-default.xml |  32 +++-
 .../apache/hadoop/ozone/om/OMConfigKeys.java|   9 +
 .../ozone/om/protocol/OzoneManagerProtocol.java |   6 +
 .../om/protocolPB/OzoneManagerProtocolPB.java   |   4 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 168 +++
 .../apache/hadoop/ozone/om/OzoneManager.java|  69 +++-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   5 +-
 8 files changed, 246 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be2497e9/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 02c6965..287d913 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1302,7 +1302,23 @@
 ozone.scm.kerberos.principal
 
  OZONE, SECURITY
-The SCM service principal. Ex 
scm/_h...@realm.tld.
+The SCM service principal. Ex 
scm/_h...@realm.com
+  
+
+  
+ozone.om.kerberos.keytab.file
+
+ OZONE, SECURITY
+ The keytab file used by KSM daemon to login as its
+  service principal. The principal name is configured with
+  hdds.ksm.kerberos.principal.
+
+  
+  
+ozone.om.kerberos.principal
+
+ OZONE, SECURITY
+The KSM service principal. Ex 
ksm/_h...@realm.com
   
 
   
@@ -1314,4 +1330,18 @@
 /etc/security/keytabs/HTTP.keytab
   
 
+  
+ozone.om.http.kerberos.principal
+HTTP/_h...@example.com
+
+  KSM http server kerberos principal.
+
+  
+  
+ozone.om.http.kerberos.keytab.file
+/etc/security/keytabs/HTTP.keytab
+
+  KSM http server kerberos keytab.
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be2497e9/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index b9ca296..6a828ca 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -78,4 +78,13 @@ public final class OMConfigKeys {
   public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
   "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
+  + "kerberos.keytab.file";
+  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
+  + ".kerberos.principal";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+  "ozone.om.http.kerberos.keytab.file";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY
+  = "ozone.om.http.kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be2497e9/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index c021e64..805eec2 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.om.protocol;
 
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -25,14 +26,19 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import 

[07/50] [abbrv] hadoop git commit: YARN-8448. AM HTTPS Support for AM communication with RMWeb proxy. (Contributed by Robert Kanter)

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2288ac4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index a110f10..2304501c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -37,6 +37,8 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.io.LineNumberReader;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -88,12 +90,11 @@ public class TestLinuxContainerExecutorWithMocks {
   private static final Logger LOG =
LoggerFactory.getLogger(TestLinuxContainerExecutorWithMocks.class);
 
-  private static final String MOCK_EXECUTOR =
-  "./src/test/resources/mock-container-executor";
+  private static final String MOCK_EXECUTOR = "mock-container-executor";
   private static final String MOCK_EXECUTOR_WITH_ERROR =
-  "./src/test/resources/mock-container-executer-with-error";
+  "mock-container-executer-with-error";
   private static final String MOCK_EXECUTOR_WITH_CONFIG_ERROR =
-  "./src/test/resources/mock-container-executer-with-configuration-error";
+  "mock-container-executer-with-configuration-error";
 
   private String tmpMockExecutor;
   private LinuxContainerExecutor mockExec = null;
@@ -121,11 +122,13 @@ public class TestLinuxContainerExecutorWithMocks {
 return ret;
   }
 
-  private void setupMockExecutor(String executorPath, Configuration conf)
-  throws IOException {
+  private void setupMockExecutor(String executorName, Configuration conf)
+  throws IOException, URISyntaxException {
 //we'll always use the tmpMockExecutor - since
 // PrivilegedOperationExecutor can only be initialized once.
 
+URI executorPath = getClass().getClassLoader().getResource(executorName)
+.toURI();
 Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor),
 REPLACE_EXISTING);
 
@@ -140,7 +143,8 @@ public class TestLinuxContainerExecutorWithMocks {
   }
 
   @Before
-  public void setup() throws IOException, ContainerExecutionException {
+  public void setup() throws IOException, ContainerExecutionException,
+  URISyntaxException {
 assumeNotWindows();
 
 tmpMockExecutor = System.getProperty("test.build.data") +
@@ -172,7 +176,18 @@ public class TestLinuxContainerExecutorWithMocks {
   }
 
   @Test
-  public void testContainerLaunch()
+  public void testContainerLaunchWithoutHTTPS()
+  throws IOException, ConfigurationException {
+testContainerLaunch(false);
+  }
+
+  @Test
+  public void testContainerLaunchWithHTTPS()
+  throws IOException, ConfigurationException {
+testContainerLaunch(true);
+  }
+
+  private void testContainerLaunch(boolean https)
   throws IOException, ConfigurationException {
 String appSubmitter = "nobody";
 String cmd = String.valueOf(
@@ -193,41 +208,64 @@ public class TestLinuxContainerExecutorWithMocks {
 
 Path scriptPath = new Path("file:///bin/echo");
 Path tokensPath = new Path("file:///dev/null");
+Path keystorePath = new Path("file:///dev/null");
+Path truststorePath = new Path("file:///dev/null");
 Path workDir = new Path("/tmp");
 Path pidFile = new Path(workDir, "pid.txt");
 
 mockExec.activateContainer(cId, pidFile);
-int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
-.setContainer(container)
-.setNmPrivateContainerScriptPath(scriptPath)
-.setNmPrivateTokensPath(tokensPath)
-.setUser(appSubmitter)
-.setAppId(appId)
-.setContainerWorkDir(workDir)
-.setLocalDirs(dirsHandler.getLocalDirs())
-.setLogDirs(dirsHandler.getLogDirs())
-.setFilecacheDirs(new ArrayList<>())
-.setUserLocalDirs(new ArrayList<>())
-.setContainerLocalDirs(new ArrayList<>())
-.setContainerLogDirs(new ArrayList<>())
-.setUserFilecacheDirs(new ArrayList<>())
-.setApplicationLocalDirs(new ArrayList<>())
-.build());
+ContainerStartContext.Builder ctxBuilder =
+new 

[15/50] [abbrv] hadoop git commit: YARN-8842. Expose metrics for custom resource types in QueueMetrics. (Contributed by Szilard Nemeth)

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84e22a6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
new file mode 100644
index 000..76a9849
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java
@@ -0,0 +1,645 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.QueueMetricsForCustomResources.QueueMetricsCustomResource;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
+import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
+import static org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper
+.extractCustomResourcesAsStrings;
+import static 
org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper.newResource;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_ALLOCATED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_RELEASED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+
.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_MEMORY_MB_SECONDS_PREEMPTED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+
.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_VCORE_SECONDS_PREEMPTED;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_CONTAINERS;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_MB;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_V_CORES;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AVAILABLE_MB;
+import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AVAILABLE_V_CORES;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.PENDING_CONTAINERS;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.ResourceMetricsChecker.ResourceMetricsKey.PENDING_MB;
+import static 

[13/50] [abbrv] hadoop git commit: YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)

2018-10-18 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d6e001/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
new file mode 100644
index 000..7b3c2a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
@@ -0,0 +1,123 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## @description  check install user
+## @audience public
+## @stabilitystable
+function check_install_user()
+{
+  if [[ $(id -u) -ne 0 ]];then
+echo "This script must be run with a ROOT user!"
+exit # don't call exit_install()
+  fi
+}
+
+## @description  exit install
+## @audience public
+## @stabilitystable
+function exit_install()
+{
+  echo "Exit the installation!" | tee -a $LOG
+  exit $1
+}
+
+## @description  Check if the IP address format is correct
+## @audience public
+## @stabilitystable
+function valid_ip()
+{
+  local ip=$1
+  local stat=1
+
+  if [[ $ip =~ ^[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}$ ]]; then
+OIFS=$IFS
+IFS='.'
+ip=($ip)
+IFS=$OIFS
+
+if [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} 
-le 255 ]]; then
+  stat=$?
+fi
+  fi
+
+  return $stat
+}
+
+## @description  Check if the configuration file configuration is correct
+## @audience public
+## @stabilitystable
+function check_install_conf()
+{
+  echo "Check if the configuration file configuration is correct ..." | tee -a 
$LOG
+
+  # check etcd conf
+  hostCount=${#ETCD_HOSTS[@]}
+  if [[ $hostCount -lt 3 && hostCount -ne 0 ]]; then # <>2
+echo "Number of nodes = [$hostCount], must be configured to be greater 
than or equal to 3 servers! " | tee -a $LOG
+exit_install
+  fi
+  for ip in ${ETCD_HOSTS[@]}
+  do
+if ! valid_ip $ip; then
+  echo "]ETCD_HOSTS=[$ip], IP address format is incorrect! " | tee -a $LOG
+  exit_install
+fi
+  done
+  echo "Check if the configuration file configuration is correct [ Done ]" | 
tee -a $LOG
+}
+
+## @description  index by EtcdHosts list
+## @audience public
+## @stabilitystable
+function indexByEtcdHosts() {
+  index=0
+  while [ "$index" -lt "${#ETCD_HOSTS[@]}" ]; do
+if [ "${ETCD_HOSTS[$index]}" = "$1" ]; then
+  echo $index
+  return
+fi
+let "index++"
+  done
+  echo ""
+}
+
+## @description  get local IP
+## @audience public
+## @stabilitystable
+function getLocalIP()
+{
+  local _ip _myip _line _nl=$'\n'
+  while IFS=$': \t' read -a _line ;do
+  [ -z "${_line%inet}" ] &&
+ _ip=${_line[${#_line[1]}>4?1:2]} &&
+ [ "${_ip#127.0.0.1}" ] && _myip=$_ip
+done< <(LANG=C /sbin/ifconfig)
+  printf ${1+-v} $1 "%s${_nl:0:$[${#1}>0?0:1]}" $_myip
+}
+
+## @description  get ip list
+## @audience public
+## @stabilitystable
+function get_ip_list()
+{
+  array=$(ifconfig | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ 
\t]*//g' | cut -d ' ' -f2)
+
+  for ip in ${array[@]}
+  do
+LOCAL_HOST_IP_LIST+=(${ip})
+  done
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HADOOP-15826. @Retries annotation of putObject() call & uses wrong. Contributed by Steve Loughran and Ewan Higgs.

2018-10-18 Thread xyao
HADOOP-15826. @Retries annotation of putObject() call & uses wrong.
Contributed by Steve Loughran and Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d59ca43b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d59ca43b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d59ca43b

Branch: refs/heads/HDDS-4
Commit: d59ca43bff8a457ce7ab62a61acd89aacbe71b93
Parents: f90c64e
Author: Steve Loughran 
Authored: Tue Oct 16 20:02:54 2018 +0100
Committer: Steve Loughran 
Committed: Tue Oct 16 20:02:54 2018 +0100

--
 .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java   | 5 -
 .../java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java | 4 ++--
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d59ca43b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index df0ec5d..3c432fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2449,11 +2449,14 @@ public class S3AFileSystem extends FileSystem 
implements StreamCapabilities {
* Wait for an upload to complete.
* If the waiting for completion is interrupted, the upload will be
* aborted before an {@code InterruptedIOException} is thrown.
-   * @param upload upload to wait for
+   * If the upload (or its result collection) failed, this is where
+   * the failure is raised as an AWS exception
* @param key destination key
+   * @param uploadInfo upload to wait for
* @return the upload result
* @throws InterruptedIOException if the blocking was interrupted.
*/
+  @Retries.OnceRaw
   UploadResult waitForUploadCompletion(String key, UploadInfo uploadInfo)
   throws InterruptedIOException {
 Upload upload = uploadInfo.getUpload();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d59ca43b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
index a85a87f..a5f6817 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
@@ -436,7 +436,7 @@ public class WriteOperationHelper {
* @return the result of the operation
* @throws IOException on problems
*/
-  @Retries.OnceTranslated
+  @Retries.RetryTranslated
   public UploadResult uploadObject(PutObjectRequest putObjectRequest)
   throws IOException {
 // no retry; rely on xfer manager logic
@@ -451,7 +451,7 @@ public class WriteOperationHelper {
* @throws IOException on problems
* @param destKey destination key
*/
-  @Retries.RetryTranslated
+  @Retries.OnceTranslated
   public void revertCommit(String destKey) throws IOException {
 once("revert commit", destKey,
 () -> {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed 
by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8805c414
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8805c414
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8805c414

Branch: refs/heads/HDDS-4
Commit: 8805c414a6375593769f522a21308af8868297d4
Parents: 6d906d3
Author: Xiaoyu Yao 
Authored: Fri Jun 8 08:33:58 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:03 2018 -0700

--
 hadoop-hdds/common/pom.xml  |   6 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  19 ++
 .../hdds/security/x509/HDDSKeyGenerator.java|  99 
 .../hdds/security/x509/HDDSKeyPEMWriter.java| 254 +++
 .../hdds/security/x509/SecurityConfig.java  | 190 ++
 .../hadoop/hdds/security/x509/package-info.java |  25 ++
 .../common/src/main/resources/ozone-default.xml |  42 ++-
 .../security/x509/TestHDDSKeyGenerator.java |  81 ++
 .../security/x509/TestHDDSKeyPEMWriter.java | 213 
 .../ozone/TestOzoneConfigurationFields.java |   6 +
 10 files changed, 933 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8805c414/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 65cd1d1..94455c1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -75,7 +75,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   rocksdbjni
   5.14.2
 
-
 
   org.apache.hadoop
   hadoop-common
@@ -104,6 +103,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   2.6.0
 
 
+
+  org.bouncycastle
+  bcprov-jdk15on
+  1.49
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8805c414/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 210b075..0b5e4e0 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -97,4 +97,23 @@ public final class HddsConfigKeys {
   "hdds.lock.max.concurrency";
   public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
 
+  public static final String HDDS_KEY_LEN = "hdds.key.len";
+  public static final int HDDS_DEFAULT_KEY_LEN = 2048;
+  public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo";
+  public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA";
+  public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider";
+  public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC";
+  public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name";
+  public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys";
+
+  // TODO : Talk to StorageIO classes and see if they can return a secure
+  // storage location for each node.
+  public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir";
+  public static final String HDDS_PRIVATE_KEY_FILE_NAME =
+  "hdds.priv.key.file.name";
+  public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = 
"private.pem";
+  public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
+  + ".name";
+  public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
+>>> HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. 
Contributed by Ajay Kumar.
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8805c414/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
new file mode 100644
index 000..cb411b2
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of 

[41/50] [abbrv] hadoop git commit: HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d906d35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d906d35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d906d35

Branch: refs/heads/HDDS-4
Commit: 6d906d357abe051b63411cf69768ec9baac71ab6
Parents: 7cae8bd
Author: Xiaoyu Yao 
Authored: Tue May 22 13:32:28 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 14:04:03 2018 -0700

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  6 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |  2 +-
 .../StorageContainerLocationProtocol.java   |  3 +-
 .../protocolPB/ScmBlockLocationProtocolPB.java  |  4 +-
 .../StorageContainerLocationProtocolPB.java |  2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  1 -
 .../common/src/main/resources/ozone-default.xml | 31 +---
 .../StorageContainerDatanodeProtocol.java   |  2 +-
 .../StorageContainerDatanodeProtocolPB.java |  2 +-
 .../scm/server/StorageContainerManager.java | 18 ++---
 .../compose/compose-secure/docker-compose.yaml  |  6 +-
 .../test/compose/compose-secure/docker-config   | 12 +--
 .../acceptance/ozone-secure.robot   | 12 +--
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 84 
 .../ozone/om/protocol/OzoneManagerProtocol.java |  4 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java| 21 +++--
 .../apache/hadoop/ozone/om/OzoneManager.java|  4 +-
 18 files changed, 151 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d906d35/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ad5f4da..7bf1148 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -162,9 +162,9 @@ public final class ScmConfigKeys {
   "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
   "ozone.scm.https-address";
-  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
-  "ozone.scm.kerberos.keytab.file";
-  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = 
"ozone.scm.kerberos.principal";
+  public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
+  "hdds.scm.kerberos.keytab.file";
+  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = 
"hdds.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d906d35/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index e17f1c2..2d46ae0 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -33,7 +33,7 @@ import java.util.List;
  * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
  * to read/write a block.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d906d35/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 6f20c72..908244b 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ 

[39/50] [abbrv] hadoop git commit: HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.

2018-10-18 Thread xyao
HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cae8bd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cae8bd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cae8bd3

Branch: refs/heads/HDDS-4
Commit: 7cae8bd34c1ee978fce58ded030c89a9e09af100
Parents: eee307b
Author: Xiaoyu Yao 
Authored: Fri May 18 13:09:17 2018 -0700
Committer: Xiaoyu Yao 
Committed: Wed Oct 17 13:48:55 2018 -0700

--
 .../src/test/compose/compose-secure/.env| 17 
 .../compose/compose-secure/docker-compose.yaml  | 66 ++
 .../test/compose/compose-secure/docker-config   | 66 ++
 .../acceptance/ozone-secure.robot   | 95 
 .../hadoop/ozone/client/rest/RestClient.java|  4 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  6 +-
 6 files changed, 248 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cae8bd3/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
--
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
new file mode 100644
index 000..3254735
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cae8bd3/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
new file mode 100644
index 000..2661163
--- /dev/null
+++ 
b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   ozone.kdc:
+  image: ahadoop/kdc:v1
+   namenode:
+  image: ahadoop/ozone:v1
+  hostname: namenode
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+  ports:
+ - 9000:9000
+  environment:
+  ENSURE_NAMENODE_DIR: /data/namenode
+  env_file:
+ - ./docker-config
+  command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+  image: ahadoop/ozone:v1
+  hostname: datanode
+  volumes:
+- ${OZONEDIR}:/opt/hadoop
+  ports:
+- 9874
+  env_file:
+- ./docker-config
+  command: ["/opt/hadoop/bin/ozone","datanode"]
+   ksm:
+  image: ahadoop/ozone:v1
+  hostname: ksm
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+  ports:
+ - 9874:9874
+  environment:
+ ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+  env_file:
+  - ./docker-config
+  command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+  image: ahadoop/ozone:v1
+  hostname: scm
+  volumes:
+ - ${OZONEDIR}:/opt/hadoop
+  ports:
+ - 9876:9876

[09/50] [abbrv] hadoop git commit: MAPREDUCE-7150. Optimize collections used by MR JHS to reduce its memory. (Contributed by Misha Dmitriev)

2018-10-18 Thread xyao
MAPREDUCE-7150. Optimize collections used by MR JHS to reduce its memory. 
(Contributed by Misha Dmitriev)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/babd1449
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/babd1449
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/babd1449

Branch: refs/heads/HDDS-4
Commit: babd1449bf8898f44c434c852e67240721c0eb00
Parents: c2288ac
Author: Haibo Chen 
Authored: Tue Oct 16 13:44:41 2018 -0700
Committer: Haibo Chen 
Committed: Tue Oct 16 13:44:41 2018 -0700

--
 .../counters/FileSystemCounterGroup.java| 56 
 .../mapreduce/jobhistory/JobHistoryParser.java  |  2 +-
 .../hadoop/mapreduce/v2/hs/CompletedTask.java   |  5 +-
 .../mapreduce/v2/hs/CompletedTaskAttempt.java   |  2 +-
 4 files changed, 38 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/babd1449/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
index 046368e..ed7f271 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
@@ -61,8 +61,9 @@ public abstract class FileSystemCounterGroup
 
   // C[] would need Array.newInstance which requires a Class reference.
   // Just a few local casts probably worth not having to carry it around.
-  private final Map map =
-new ConcurrentSkipListMap();
+  // Initialized lazily, since in some situations millions of empty maps can
+  // waste a substantial (e.g. 4% as we observed) portion of the heap
+  private Map map;
   private String displayName;
 
   private static final Joiner NAME_JOINER = Joiner.on('_');
@@ -214,6 +215,9 @@ public abstract class FileSystemCounterGroup
   @SuppressWarnings("unchecked")
   public synchronized C findCounter(String scheme, FileSystemCounter key) {
 final String canonicalScheme = checkScheme(scheme);
+if (map == null) {
+  map = new ConcurrentSkipListMap<>();
+}
 Object[] counters = map.get(canonicalScheme);
 int ord = key.ordinal();
 if (counters == null) {
@@ -247,10 +251,12 @@ public abstract class FileSystemCounterGroup
   protected abstract C newCounter(String scheme, FileSystemCounter key);
 
   @Override
-  public int size() {
+  public synchronized int size() {
 int n = 0;
-for (Object[] counters : map.values()) {
-  n += numSetCounters(counters);
+if (map != null) {
+  for (Object[] counters : map.values()) {
+n += numSetCounters(counters);
+  }
 }
 return n;
   }
@@ -271,19 +277,23 @@ public abstract class FileSystemCounterGroup
* FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
*/
   @Override
-  public void write(DataOutput out) throws IOException {
-WritableUtils.writeVInt(out, map.size()); // #scheme
-for (Map.Entry entry : map.entrySet()) {
-  WritableUtils.writeString(out, entry.getKey()); // scheme
-  // #counter for the above scheme
-  WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
-  for (Object counter : entry.getValue()) {
-if (counter == null) continue;
-@SuppressWarnings("unchecked")
-FSCounter c = (FSCounter) ((Counter)counter).getUnderlyingCounter();
-WritableUtils.writeVInt(out, c.key.ordinal());  // key
-WritableUtils.writeVLong(out, c.getValue());// value
+  public synchronized void write(DataOutput out) throws IOException {
+if (map != null) {
+  WritableUtils.writeVInt(out, map.size()); // #scheme
+  for (Map.Entry entry : map.entrySet()) {
+WritableUtils.writeString(out, entry.getKey()); // scheme
+// #counter for the above scheme
+WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
+for (Object counter : entry.getValue()) {
+  if (counter == null) continue;
+  @SuppressWarnings("unchecked")
+  FSCounter c = (FSCounter) ((Counter) counter).getUnderlyingCounter();
+  WritableUtils.writeVInt(out, c.key.ordinal());  // key
+  WritableUtils.writeVLong(out, c.getValue());// value
+  

hadoop git commit: HDFS-13990. Synchronization Issue With HashResolver. Contributed by BELUGA BEHR.

2018-10-18 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk ba7e81667 -> 1e78dfca4


HDFS-13990. Synchronization Issue With HashResolver. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e78dfca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e78dfca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e78dfca

Branch: refs/heads/trunk
Commit: 1e78dfca40ed2242bf7785e6f23672c40931bf79
Parents: ba7e816
Author: Inigo Goiri 
Authored: Thu Oct 18 10:05:27 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Oct 18 10:05:27 2018 -0700

--
 .../server/federation/resolver/order/HashResolver.java| 10 +++---
 1 file changed, 3 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e78dfca/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java
index 4034a46..455a3ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java
@@ -98,13 +98,9 @@ public class HashResolver implements OrderedResolver {
* namespaces using the provided set of namespace identifiers.
*/
   private ConsistentHashRing getHashResolver(final Set namespaces) {
-int hash = namespaces.hashCode();
-ConsistentHashRing resolver = this.hashResolverMap.get(hash);
-if (resolver == null) {
-  resolver = new ConsistentHashRing(namespaces);
-  this.hashResolverMap.put(hash, resolver);
-}
-return resolver;
+final int hash = namespaces.hashCode();
+return this.hashResolverMap.computeIfAbsent(hash,
+k -> new ConsistentHashRing(namespaces));
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8687. Update YARN service file type in documentation. Contributed by Suma Shivaprasad

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 16e1ab4c9 -> 3cb52958b


YARN-8687. Update YARN service file type in documentation.
   Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cb52958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cb52958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cb52958

Branch: refs/heads/branch-3.1
Commit: 3cb52958b233a5dedf30bacdbdd1977427655d5f
Parents: 16e1ab4
Author: Eric Yang 
Authored: Thu Oct 18 12:02:10 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 12:04:27 2018 -0400

--
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md| 2 +-
 .../src/site/markdown/yarn-service/YarnServiceAPI.md   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb52958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
index 73e00b3..da7a9c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
@@ -48,7 +48,7 @@ Note this example requires registry DNS.
   "configuration": {
 "files": [
   {
-"type": "ENV",
+"type": "TEMPLATE",
 "dest_file": "/var/www/html/index.html",
 "properties": {
   "content": 
"TitleHello from 
${COMPONENT_INSTANCE_NAME}!"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cb52958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 4bfa742..cc4f358 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -252,7 +252,7 @@ A config file that needs to be created and made available 
as a volume in a servi
 
 |Name|Description|Required|Schema|Default|
 ||||||
-|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML, STATIC, ARCHIVE)||
+|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, HADOOP_XML, STATIC, ARCHIVE)||
 |dest_file|The path that this configuration file should be created as. If it 
is an absolute path, it will be mounted into the DOCKER container. Absolute 
paths are only allowed for DOCKER containers.  If it is a relative path, only 
the file name should be provided, and the file will be created in the container 
local working directory under a folder named conf for all types other than 
static/archive. For static/archive resource types, the files are available 
under resources directory.|false|string||
 |src_file|This provides the source location of the configuration file, the 
content of which is dumped to dest_file post property substitutions, in the 
format as specified in type. Typically the src_file would point to a source 
controlled network accessible file maintained by tools like puppet, chef, or 
hdfs etc. Currently, only hdfs is supported.|false|string||
 |properties|A blob of key value pairs that will be dumped in the dest_file in 
the format as specified in type. If src_file is specified, src_file content are 
dumped in the dest_file and these properties will overwrite, if any, existing 
properties in src_file or be added as new properties in src_file.|false|object||



hadoop git commit: YARN-8687. Update YARN service file type in documentation. Contributed by Suma Shivaprasad

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 bde4fd5ed -> 4681e17a3


YARN-8687. Update YARN service file type in documentation.
   Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4681e17a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4681e17a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4681e17a

Branch: refs/heads/branch-3.2
Commit: 4681e17a364a0d7f2ffa0cbeb2fe7d537b4becd7
Parents: bde4fd5
Author: Eric Yang 
Authored: Thu Oct 18 12:02:10 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 12:04:04 2018 -0400

--
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md| 2 +-
 .../src/site/markdown/yarn-service/YarnServiceAPI.md   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4681e17a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
index 73e00b3..da7a9c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
@@ -48,7 +48,7 @@ Note this example requires registry DNS.
   "configuration": {
 "files": [
   {
-"type": "ENV",
+"type": "TEMPLATE",
 "dest_file": "/var/www/html/index.html",
 "properties": {
   "content": 
"TitleHello from 
${COMPONENT_INSTANCE_NAME}!"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4681e17a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 7b1e74a..fe49158 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -252,7 +252,7 @@ A config file that needs to be created and made available 
as a volume in a servi
 
 |Name|Description|Required|Schema|Default|
 ||||||
-|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML, STATIC, ARCHIVE)||
+|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, HADOOP_XML, STATIC, ARCHIVE)||
 |dest_file|The path that this configuration file should be created as. If it 
is an absolute path, it will be mounted into the DOCKER container. Absolute 
paths are only allowed for DOCKER containers.  If it is a relative path, only 
the file name should be provided, and the file will be created in the container 
local working directory under a folder named conf for all types other than 
static/archive. For static/archive resource types, the files are available 
under resources directory.|false|string||
 |src_file|This provides the source location of the configuration file, the 
content of which is dumped to dest_file post property substitutions, in the 
format as specified in type. Typically the src_file would point to a source 
controlled network accessible file maintained by tools like puppet, chef, or 
hdfs etc. Currently, only hdfs is supported.|false|string||
 |properties|A blob of key value pairs that will be dumped in the dest_file in 
the format as specified in type. If src_file is specified, src_file content are 
dumped in the dest_file and these properties will overwrite, if any, existing 
properties in src_file or be added as new properties in src_file.|false|object||



hadoop git commit: YARN-8687. Update YARN service file type in documentation. Contributed by Suma Shivaprasad

2018-10-18 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 32fe351bb -> ba7e81667


YARN-8687. Update YARN service file type in documentation.
   Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba7e8166
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba7e8166
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba7e8166

Branch: refs/heads/trunk
Commit: ba7e81667ce12d5cf9d87ee18a8627323759cee0
Parents: 32fe351
Author: Eric Yang 
Authored: Thu Oct 18 12:02:10 2018 -0400
Committer: Eric Yang 
Committed: Thu Oct 18 12:02:10 2018 -0400

--
 .../hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md| 2 +-
 .../src/site/markdown/yarn-service/YarnServiceAPI.md   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba7e8166/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
index 73e00b3..da7a9c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md
@@ -48,7 +48,7 @@ Note this example requires registry DNS.
   "configuration": {
 "files": [
   {
-"type": "ENV",
+"type": "TEMPLATE",
 "dest_file": "/var/www/html/index.html",
 "properties": {
   "content": 
"TitleHello from 
${COMPONENT_INSTANCE_NAME}!"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba7e8166/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 7b1e74a..fe49158 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -252,7 +252,7 @@ A config file that needs to be created and made available 
as a volume in a servi
 
 |Name|Description|Required|Schema|Default|
 ||||||
-|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML, STATIC, ARCHIVE)||
+|type|Config file in the standard format like xml, properties, json, yaml, 
template or static/archive resource files. When static/archive types are 
specified, file must be uploaded to remote file system before launching the 
job, and YARN service framework will localize files prior to launching 
containers. Archive files are unwrapped during localization |false|enum (XML, 
PROPERTIES, JSON, YAML, TEMPLATE, HADOOP_XML, STATIC, ARCHIVE)||
 |dest_file|The path that this configuration file should be created as. If it 
is an absolute path, it will be mounted into the DOCKER container. Absolute 
paths are only allowed for DOCKER containers.  If it is a relative path, only 
the file name should be provided, and the file will be created in the container 
local working directory under a folder named conf for all types other than 
static/archive. For static/archive resource types, the files are available 
under resources directory.|false|string||
 |src_file|This provides the source location of the configuration file, the 
content of which is dumped to dest_file post property substitutions, in the 
format as specified in type. Typically the src_file would point to a source 
controlled network accessible file maintained by tools like puppet, chef, or 
hdfs etc. Currently, only hdfs is supported.|false|string||
 |properties|A blob of key value pairs that will be dumped in the dest_file in 
the format as specified in type. If src_file is specified, src_file content are 
dumped in the dest_file and these properties will overwrite, if any, existing 
properties in src_file or be added as new properties in src_file.|false|object||



hadoop git commit: YARN-8864. NM incorrectly logs container user as the user who sent a start/stop container request in its audit log. (Contributed by Wilfred Spiegelenburg)

2018-10-18 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2202e00ba -> 32fe351bb


YARN-8864. NM incorrectly logs container user as the user who sent a start/stop 
container request in its audit log. (Contributed by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fe351b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fe351b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fe351b

Branch: refs/heads/trunk
Commit: 32fe351bb654e684f127f47ab808c497e0d3f258
Parents: 2202e00
Author: Haibo Chen 
Authored: Thu Oct 18 08:27:13 2018 -0700
Committer: Haibo Chen 
Committed: Thu Oct 18 08:28:07 2018 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 55 
 .../nodemanager/DummyContainerManager.java  |  3 +-
 .../nodemanager/TestNodeManagerResync.java  |  3 +-
 .../BaseContainerManagerTest.java   | 10 ++--
 .../containermanager/TestContainerManager.java  |  3 +-
 .../TestContainerManagerRecovery.java   |  3 +-
 6 files changed, 47 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fe351b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 01d70af..8a12c3c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -920,6 +920,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   public StartContainersResponse startContainers(
   StartContainersRequest requests) throws YarnException, IOException {
 UserGroupInformation remoteUgi = getRemoteUgi();
+String remoteUser = remoteUgi.getUserName();
 NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
 authorizeUser(remoteUgi, nmTokenIdentifier);
 List succeededContainers = new ArrayList();
@@ -953,7 +954,8 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
   performContainerPreStartChecks(nmTokenIdentifier, request,
   containerTokenIdentifier);
-  startContainerInternal(containerTokenIdentifier, request);
+  startContainerInternal(containerTokenIdentifier, request,
+  remoteUser);
   succeededContainers.add(containerId);
 } catch (YarnException e) {
   failedContainers.put(containerId, 
SerializedException.newInstance(e));
@@ -1061,13 +1063,14 @@ public class ContainerManagerImpl extends 
CompositeService implements
   @SuppressWarnings("unchecked")
   protected void startContainerInternal(
   ContainerTokenIdentifier containerTokenIdentifier,
-  StartContainerRequest request) throws YarnException, IOException {
+  StartContainerRequest request, String remoteUser)
+  throws YarnException, IOException {
 
 ContainerId containerId = containerTokenIdentifier.getContainerID();
 String containerIdStr = containerId.toString();
 String user = containerTokenIdentifier.getApplicationSubmitter();
 
-LOG.info("Start request for " + containerIdStr + " by user " + user);
+LOG.info("Start request for " + containerIdStr + " by user " + remoteUser);
 
 ContainerLaunchContext launchContext = request.getContainerLaunchContext();
 
@@ -1075,14 +1078,14 @@ public class ContainerManagerImpl extends 
CompositeService implements
 for (Map.Entry rsrc : launchContext
 .getLocalResources().entrySet()) {
   if (rsrc.getValue() == null || rsrc.getValue().getResource() == null) {
-throw new YarnException(
-"Null resource URL for local resource " + rsrc.getKey() + " : " + 
rsrc.getValue());
+throw new YarnException("Null resource URL for local resource "
++ rsrc.getKey() + " : " + rsrc.getValue());
   } else if (rsrc.getValue().getType() == null) {
-throw new YarnException(
-"Null resource type for local resource " + rsrc.getKey() + " : " + 
rsrc.getValue());
+throw 

hadoop git commit: HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. Contributed by He Xiaoqiao.

2018-10-18 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ea19a3613 -> b34c650a4


HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. 
Contributed by He Xiaoqiao.

(cherry picked from commit 975d60685eaf9961bdbd3547600b3e38bb088835)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b34c650a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b34c650a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b34c650a

Branch: refs/heads/branch-3.0
Commit: b34c650a417e902060679f1241cf96d621d3c2cf
Parents: ea19a36
Author: drankye 
Authored: Mon Aug 13 17:32:56 2018 +0800
Committer: Wei-Chiu Chuang 
Committed: Thu Oct 18 08:31:12 2018 -0700

--
 .../server/namenode/FSPermissionChecker.java|  2 +-
 .../namenode/TestINodeAttributeProvider.java| 43 ++--
 2 files changed, 41 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b34c650a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 63700bf..9a6d790 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -409,7 +409,7 @@ public class FSPermissionChecker implements 
AccessControlEnforcer {
 }
 final FsPermission mode = inode.getFsPermission();
 final AclFeature aclFeature = inode.getAclFeature();
-if (aclFeature != null) {
+if (aclFeature != null && aclFeature.getEntriesSize() > 0) {
   // It's possible that the inode has a default ACL but no access ACL.
   int firstEntry = aclFeature.getEntryAt(0);
   if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b34c650a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 9c7dcd3..b3bab06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -57,6 +57,11 @@ public class TestINodeAttributeProvider {
   public static class MyAuthorizationProvider extends INodeAttributeProvider {
 
 public static class MyAccessControlEnforcer implements 
AccessControlEnforcer {
+  AccessControlEnforcer ace;
+
+  public MyAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) {
+this.ace = defaultEnforcer;
+  }
 
   @Override
   public void checkPermission(String fsOwner, String supergroup,
@@ -65,6 +70,13 @@ public class TestINodeAttributeProvider {
   int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
   FsAction parentAccess, FsAction access, FsAction subAccess,
   boolean ignoreEmptyDir) throws AccessControlException {
+if (ancestorIndex > 1
+&& inodes[1].getLocalName().equals("user")
+&& inodes[2].getLocalName().equals("acl")) {
+  this.ace.checkPermission(fsOwner, supergroup, ugi, inodeAttrs, 
inodes,
+  pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
+  ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
+}
 CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + 
"|" + access);
   }
 }
@@ -84,6 +96,7 @@ public class TestINodeAttributeProvider {
 final INodeAttributes inode) {
   CALLED.add("getAttributes");
   final boolean useDefault = useDefault(pathElements);
+  final boolean useNullAcl = useNullAclFeature(pathElements);
   return new INodeAttributes() {
 @Override
 public boolean isDirectory() {
@@ -126,7 +139,10 @@ public class TestINodeAttributeProvider {
 @Override
 public AclFeature getAclFeature() {
   AclFeature f;
-  if (useDefault) {
+  if (useNullAcl) {
+int[] entries = new int[0];
+  

hadoop git commit: HDDS-687. Exception while loading containers during SCM startup. Contributed by Lokesh Jain.

2018-10-18 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 ba7d46d21 -> eeb1d432a


HDDS-687. Exception while loading containers during SCM startup. Contributed by 
Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eeb1d432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eeb1d432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eeb1d432

Branch: refs/heads/ozone-0.3
Commit: eeb1d432a8c5e822d1737bab79448d06092a736d
Parents: ba7d46d
Author: Nandakumar 
Authored: Thu Oct 18 18:03:33 2018 +0530
Committer: Nandakumar 
Committed: Thu Oct 18 18:03:33 2018 +0530

--
 .../apache/hadoop/hdds/scm/container/SCMContainerManager.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eeb1d432/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 9d6cadb..ec2a8d0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -170,8 +170,10 @@ public class SCMContainerManager implements 
ContainerManager {
 try {
   for (ContainerInfo container : containerList) {
 containerStateManager.addExistingContainer(container);
-pipelineSelector.addContainerToPipeline(
-container.getPipelineID(), container.getContainerID());
+if (container.isContainerOpen()) {
+  pipelineSelector.addContainerToPipeline(container.getPipelineID(),
+  container.getContainerID());
+}
   }
 } catch (SCMException ex) {
   LOG.error("Unable to create a container information. ", ex);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: Preparing for 3.2.0 release

2018-10-18 Thread sunilg
Preparing for 3.2.0 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bde4fd5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bde4fd5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bde4fd5e

Branch: refs/heads/branch-3.2
Commit: bde4fd5ed9eed8edc4376a8ec5fd1c59cb3dc0b3
Parents: 30b65ea
Author: Sunil G 
Authored: Thu Oct 18 17:07:45 2018 +0530
Committer: Sunil G 
Committed: Thu Oct 18 17:07:45 2018 +0530

--
 hadoop-assemblies/pom.xml |  4 ++--
 hadoop-build-tools/pom.xml|  2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml   |  4 ++--
 .../hadoop-client-check-invariants/pom.xml|  4 ++--
 .../hadoop-client-check-test-invariants/pom.xml   |  4 ++--
 .../hadoop-client-integration-tests/pom.xml   |  4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml   |  4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml   |  4 ++--
 hadoop-client-modules/hadoop-client/pom.xml   |  4 ++--
 hadoop-client-modules/pom.xml |  2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml |  4 ++--
 hadoop-cloud-storage-project/pom.xml  |  4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml  |  4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml|  4 ++--
 hadoop-common-project/hadoop-auth/pom.xml |  4 ++--
 hadoop-common-project/hadoop-common/pom.xml   |  4 ++--
 hadoop-common-project/hadoop-kms/pom.xml  |  4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml  |  4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml  |  4 ++--
 hadoop-common-project/pom.xml |  4 ++--
 hadoop-dist/pom.xml   |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml|  4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml|  4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml   |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml   |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml   |  4 ++--
 hadoop-hdfs-project/pom.xml   |  4 ++--
 .../hadoop-mapreduce-client-app/pom.xml   |  4 ++--
 .../hadoop-mapreduce-client-common/pom.xml|  4 ++--
 .../hadoop-mapreduce-client-core/pom.xml  |  4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml|  4 ++--
 .../hadoop-mapreduce-client-hs/pom.xml|  4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml |  4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml|  4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml   |  4 ++--
 .../hadoop-mapreduce-client-uploader/pom.xml  |  4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml  |  4 ++--
 .../hadoop-mapreduce-examples/pom.xml |  4 ++--
 hadoop-mapreduce-project/pom.xml  |  4 ++--
 hadoop-maven-plugins/pom.xml  |  2 +-
 hadoop-minicluster/pom.xml|  4 ++--
 hadoop-project-dist/pom.xml   |  4 ++--
 hadoop-project/pom.xml|  6 +++---
 hadoop-tools/hadoop-aliyun/pom.xml|  2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml  |  4 ++--
 hadoop-tools/hadoop-archives/pom.xml  |  4 ++--
 hadoop-tools/hadoop-aws/pom.xml   |  4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml|  2 +-
 hadoop-tools/hadoop-azure/pom.xml |  2 +-
 hadoop-tools/hadoop-datajoin/pom.xml  |  4 ++--
 hadoop-tools/hadoop-distcp/pom.xml|  4 ++--
 hadoop-tools/hadoop-extras/pom.xml|  4 ++--
 hadoop-tools/hadoop-fs2img/pom.xml|  4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml   |  4 ++--
 hadoop-tools/hadoop-kafka/pom.xml |  4 ++--
 hadoop-tools/hadoop-openstack/pom.xml |  4 ++--
 hadoop-tools/hadoop-pipes/pom.xml |  4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml |  2 +-
 hadoop-tools/hadoop-rumen/pom.xml |  4 ++--
 hadoop-tools/hadoop-sls/pom.xml   |  4 ++--
 hadoop-tools/hadoop-streaming/pom.xml |  4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml|  4 ++--
 hadoop-tools/pom.xml  |  4 ++--
 

[1/2] hadoop git commit: Preparing for 3.2.0 release

2018-10-18 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 30b65ea18 -> bde4fd5ed


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bde4fd5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
index cb0d6e2..024927e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase
 org.apache.hadoop
-3.2.0-SNAPSHOT
+3.2.1-SNAPSHOT
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-common
   Apache Hadoop YARN TimelineService HBase Common
-  3.2.0-SNAPSHOT
+  3.2.1-SNAPSHOT
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bde4fd5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
index 79fc861..403461f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase-server
 org.apache.hadoop
-3.2.0-SNAPSHOT
+3.2.1-SNAPSHOT
   
 
   4.0.0
   hadoop-yarn-server-timelineservice-hbase-server-1
   Apache Hadoop YARN TimelineService HBase Server 1.2
-  3.2.0-SNAPSHOT
+  3.2.1-SNAPSHOT
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bde4fd5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
index c9fe5d0..564e79c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase-server
 org.apache.hadoop
-3.2.0-SNAPSHOT
+3.2.1-SNAPSHOT
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-server-2
   Apache Hadoop YARN TimelineService HBase Server 2.0
-  3.2.0-SNAPSHOT
+  3.2.1-SNAPSHOT
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bde4fd5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
index dd2928c..df006f3 100644
--- 

[hadoop] Git Push Summary

2018-10-18 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2.0 [created] 30b65ea18

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8868. Set HTTPOnly attribute to Cookie. Contributed by Chandni Singh.

2018-10-18 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3ed716330 -> 2202e00ba


YARN-8868. Set HTTPOnly attribute to Cookie. Contributed by Chandni Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2202e00b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2202e00b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2202e00b

Branch: refs/heads/trunk
Commit: 2202e00ba8a44ad70f0a90e6c519257e3ae56a36
Parents: 3ed7163
Author: Sunil G 
Authored: Thu Oct 18 15:22:50 2018 +0530
Committer: Sunil G 
Committed: Thu Oct 18 15:23:20 2018 +0530

--
 .../java/org/apache/hadoop/yarn/webapp/Dispatcher.java  | 12 +---
 .../hadoop/yarn/server/webproxy/WebAppProxyServlet.java |  1 +
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2202e00b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
index d519dbb..4d54b6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -179,10 +179,10 @@ public class Dispatcher extends HttpServlet {
 String st = devMode ? ErrorPage.toStackTrace(e, 1024 * 3) // spec: min 4KB
 : "See logs for stack trace";
 res.setStatus(res.SC_FOUND);
-Cookie cookie = new Cookie(STATUS_COOKIE, String.valueOf(500));
+Cookie cookie = createCookie(STATUS_COOKIE, String.valueOf(500));
 cookie.setPath(path);
 res.addCookie(cookie);
-cookie = new Cookie(ERROR_COOKIE, st);
+cookie = createCookie(ERROR_COOKIE, st);
 cookie.setPath(path);
 res.addCookie(cookie);
 res.setHeader("Location", path);
@@ -196,7 +196,7 @@ public class Dispatcher extends HttpServlet {
   public static void removeCookie(HttpServletResponse res, String name,
   String path) {
 LOG.debug("removing cookie {} on {}", name, path);
-Cookie c = new Cookie(name, "");
+Cookie c = createCookie(name, "");
 c.setMaxAge(0);
 c.setPath(path);
 res.addCookie(c);
@@ -249,4 +249,10 @@ public class Dispatcher extends HttpServlet {
   }
 }, 18); // enough time for the last local request to complete
   }
+
+  private static Cookie createCookie(String name, String val) {
+Cookie cookie = new Cookie(name, val);
+cookie.setHttpOnly(true);
+return cookie;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2202e00b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 2dc3a46..c804f72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -322,6 +322,7 @@ public class WebAppProxyServlet extends HttpServlet {
   
   private static Cookie makeCheckCookie(ApplicationId id, boolean isSet) {
 Cookie c = new Cookie(getCheckCookieName(id),String.valueOf(isSet));
+c.setHttpOnly(true);
 c.setPath(ProxyUriUtils.getPath(id));
 c.setMaxAge(60 * 60 * 2); //2 hours in seconds
 return c;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org