hadoop git commit: YARN-4805. Don't go through all schedulers in ParameterizedTestBase. (kasha)

2016-03-26 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ae9831498 -> 4212f2e2b


YARN-4805. Don't go through all schedulers in ParameterizedTestBase. (kasha)

(cherry picked from commit 49ff54c8609431f5d1a6bee84df3b52fec445379)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4212f2e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4212f2e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4212f2e2

Branch: refs/heads/branch-2
Commit: 4212f2e2bfc177ef8afbfb42eb09a09d23cdb0dc
Parents: ae98314
Author: Karthik Kambatla 
Authored: Sat Mar 26 21:45:13 2016 -0700
Committer: Karthik Kambatla 
Committed: Sat Mar 26 21:45:29 2016 -0700

--
 .../ParameterizedSchedulerTestBase.java | 36 ++--
 .../yarn/server/resourcemanager/TestRM.java |  4 ---
 .../server/resourcemanager/TestRMRestart.java   |  4 ---
 .../TestWorkPreservingRMRestart.java|  9 -
 .../reservation/TestReservationSystem.java  |  4 ---
 .../scheduler/TestAbstractYarnScheduler.java|  6 
 .../security/TestClientToAMTokens.java  |  4 ---
 7 files changed, 11 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4212f2e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
index b099836..3f2bfc9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -23,19 +23,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 
-
 import org.junit.Before;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.util.Arrays;
-import java.util.Collection;
 
-@RunWith(Parameterized.class)
 public abstract class ParameterizedSchedulerTestBase {
   protected final static String TEST_DIR =
   new File(System.getProperty("test.build.data", 
"/tmp")).getAbsolutePath();
@@ -49,31 +43,23 @@ public abstract class ParameterizedSchedulerTestBase {
 CAPACITY, FAIR
   }
 
-  public ParameterizedSchedulerTestBase(SchedulerType type) {
-schedulerType = type;
-  }
-
   public YarnConfiguration getConf() {
 return conf;
   }
 
-  @Parameterized.Parameters
-  public static Collection getParameters() {
-return Arrays.asList(new SchedulerType[][]{
-{SchedulerType.CAPACITY}, {SchedulerType.FAIR}});
-  }
-
   @Before
-  public void configureScheduler() throws IOException {
+  public void configureScheduler() throws IOException, ClassNotFoundException {
 conf = new YarnConfiguration();
-switch (schedulerType) {
-  case CAPACITY:
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getName());
-break;
-  case FAIR:
-configureFairScheduler(conf);
-break;
+
+Class schedulerClass =
+conf.getClass(YarnConfiguration.RM_SCHEDULER,
+Class.forName(YarnConfiguration.DEFAULT_RM_SCHEDULER));
+
+if (schedulerClass == FairScheduler.class) {
+  schedulerType = SchedulerType.FAIR;
+  configureFairScheduler(conf);
+} else if (schedulerClass == CapacityScheduler.class) {
+  schedulerType = SchedulerType.CAPACITY;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4212f2e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
--
diff --git 

hadoop git commit: YARN-4805. Don't go through all schedulers in ParameterizedTestBase. (kasha)

2016-03-26 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3a4ff7776 -> 49ff54c86


YARN-4805. Don't go through all schedulers in ParameterizedTestBase. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49ff54c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49ff54c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49ff54c8

Branch: refs/heads/trunk
Commit: 49ff54c8609431f5d1a6bee84df3b52fec445379
Parents: 3a4ff77
Author: Karthik Kambatla 
Authored: Sat Mar 26 21:45:13 2016 -0700
Committer: Karthik Kambatla 
Committed: Sat Mar 26 21:45:13 2016 -0700

--
 .../ParameterizedSchedulerTestBase.java | 36 ++--
 .../yarn/server/resourcemanager/TestRM.java |  4 ---
 .../server/resourcemanager/TestRMRestart.java   |  4 ---
 .../TestWorkPreservingRMRestart.java|  9 -
 .../reservation/TestReservationSystem.java  |  4 ---
 .../scheduler/TestAbstractYarnScheduler.java|  6 
 .../security/TestClientToAMTokens.java  |  4 ---
 7 files changed, 11 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ff54c8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
index b099836..3f2bfc9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -23,19 +23,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 
-
 import org.junit.Before;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.util.Arrays;
-import java.util.Collection;
 
-@RunWith(Parameterized.class)
 public abstract class ParameterizedSchedulerTestBase {
   protected final static String TEST_DIR =
   new File(System.getProperty("test.build.data", 
"/tmp")).getAbsolutePath();
@@ -49,31 +43,23 @@ public abstract class ParameterizedSchedulerTestBase {
 CAPACITY, FAIR
   }
 
-  public ParameterizedSchedulerTestBase(SchedulerType type) {
-schedulerType = type;
-  }
-
   public YarnConfiguration getConf() {
 return conf;
   }
 
-  @Parameterized.Parameters
-  public static Collection getParameters() {
-return Arrays.asList(new SchedulerType[][]{
-{SchedulerType.CAPACITY}, {SchedulerType.FAIR}});
-  }
-
   @Before
-  public void configureScheduler() throws IOException {
+  public void configureScheduler() throws IOException, ClassNotFoundException {
 conf = new YarnConfiguration();
-switch (schedulerType) {
-  case CAPACITY:
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getName());
-break;
-  case FAIR:
-configureFairScheduler(conf);
-break;
+
+Class schedulerClass =
+conf.getClass(YarnConfiguration.RM_SCHEDULER,
+Class.forName(YarnConfiguration.DEFAULT_RM_SCHEDULER));
+
+if (schedulerClass == FairScheduler.class) {
+  schedulerType = SchedulerType.FAIR;
+  configureFairScheduler(conf);
+} else if (schedulerClass == CapacityScheduler.class) {
+  schedulerType = SchedulerType.CAPACITY;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ff54c8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
 

hadoop git commit: HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped blocks. Contributed by Kai Zheng

2016-03-26 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk a337ceb74 -> 3a4ff7776


HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped blocks. 
Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a4ff777
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a4ff777
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a4ff777

Branch: refs/heads/trunk
Commit: 3a4ff7776e8fab6cc87932b9aa8fb48f7b69c720
Parents: a337ceb
Author: Uma Maheswara Rao G 
Authored: Sat Mar 26 19:58:09 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Sat Mar 26 19:58:09 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   1 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 187 ++--
 .../hadoop/hdfs/protocol/StripedBlockInfo.java  |  61 
 .../datatransfer/DataTransferProtocol.java  |  16 +-
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   1 +
 .../hdfs/protocol/datatransfer/Sender.java  |  19 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  42 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  12 +
 .../src/main/proto/datatransfer.proto   |   9 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  28 ++
 .../server/datanode/BlockChecksumHelper.java| 284 +++
 .../hdfs/server/datanode/DataXceiver.java   |  43 +++
 .../apache/hadoop/hdfs/TestFileChecksum.java| 247 
 14 files changed, 878 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a4ff777/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 2c3329e..9d6ab9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -8,6 +8,7 @@
   
   
   
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a4ff777/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3506d3a..88bd219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1704,7 +1704,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /**
* Get the checksum of the whole file or a range of the file. Note that the
-   * range always starts from the beginning of the file.
+   * range always starts from the beginning of the file. The file can be
+   * in replicated form, or striped mode. It can be used to checksum and 
compare
+   * two replicated files, or two striped files, but not applicable for two
+   * files of different block layout forms.
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
@@ -1717,7 +1720,11 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
 LocatedBlocks blockLocations = getBlockLocations(src, length);
 
-FileChecksumHelper.FileChecksumComputer maker =
+FileChecksumHelper.FileChecksumComputer maker;
+ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
+maker = ecPolicy != null ?
+new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
+length, blockLocations, namenode, this, ecPolicy) :
 new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
 blockLocations, namenode, this);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a4ff777/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index d15db9f..dfd9393 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 

hadoop git commit: HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey. (cherry picked from commit 3fe61e0bb0d025a6acbb754027f73f3084

2016-03-26 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 02e81caa2 -> 17dbf82f5


HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes 
at runtime. Contributed by Sean Busbey.
(cherry picked from commit 3fe61e0bb0d025a6acbb754027f73f3084b2f4d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17dbf82f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17dbf82f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17dbf82f

Branch: refs/heads/branch-2.7
Commit: 17dbf82f54a14926b7d0dc5d8eb1b35f69b70d3c
Parents: 02e81ca
Author: Aaron T. Myers 
Authored: Thu Apr 9 09:40:08 2015 -0700
Committer: Haohui Mai 
Committed: Sat Mar 26 17:54:09 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 +++--
 .../apache/hadoop/hdfs/TestDFSConfigKeys.java   | 37 
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f01e697..838dd5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -167,6 +167,9 @@ Release 2.7.2 - 2016-01-25
 
 HDFS-9574. Reduce client failures during datanode restart (kihwal)
 
+HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 57d9d59..e73b9da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -234,7 +233,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY 
= "dfs.namenode.replication.max-streams-hard-limit";
   public static final int 
DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
"dfs.web.authentication.filter";
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = 
AuthFilter.class.getName();
+  /* Phrased as below to avoid javac inlining as a constant, to match the 
behavior when
+ this was AuthFilter.class.getName(). Note that if you change the import 
for AuthFilter, you
+ need to update the literal here as well as TestDFSConfigKeys.
+   */
+  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
+  "org.apache.hadoop.hdfs.web.AuthFilter".toString();
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = 
"dfs.webhdfs.user.provider.user.pattern";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
new file mode 100644
index 000..c7df891
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this 

[Hadoop Wiki] Update of "ZooKeeper/HowToContribute" by PatrickHunt

2016-03-26 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "ZooKeeper/HowToContribute" page has been changed by PatrickHunt:
https://wiki.apache.org/hadoop/ZooKeeper/HowToContribute?action=diff=10=11

+ = This page is deprecated - please see our new home at 
https://cwiki.apache.org/confluence/display/ZOOKEEPER =
+ 
  = How to Contribute to  ZooKeeper =
  
  This page describes the mechanics of ''how'' to contribute software to  
ZooKeeper.  For ideas about ''what'' you might contribute, please see the 
[[ZooKeeper/ProjectSuggestions| ProjectSuggestions page]].


[Hadoop Wiki] Update of "ZooKeeper" by PatrickHunt

2016-03-26 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "ZooKeeper" page has been changed by PatrickHunt:
https://wiki.apache.org/hadoop/ZooKeeper?action=diff=29=30

+ = This page is deprecated - please see our new home at 
https://cwiki.apache.org/confluence/display/ZOOKEEPER =
+ 
+ 
  == General Information ==
  ZooKeeper: Because coordinating distributed systems is a Zoo
  


[1/2] hadoop git commit: HDFS-10195. Ozone: Add container persistence. Contributed by Anu Engineer.

2016-03-26 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2dc48b7f1 -> 643c5e5bd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
new file mode 100644
index 000..3b498e2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.container.ContainerTestHelper
+.createSingleNodePipeline;
+import static org.junit.Assert.fail;
+
+/**
+ * Simple tests to verify that container persistence works as expected.
+ */
+public class TestContainerPersistence {
+
+  static String path;
+  static ContainerManagerImpl containerManager;
+  static OzoneConfiguration conf;
+  static FsDatasetSpi fsDataSet;
+  static MiniDFSCluster cluster;
+  static List pathLists = new LinkedList<>();
+
+  @BeforeClass
+  public static void init() throws IOException {
+conf = new OzoneConfiguration();
+URL p = conf.getClass().getResource("");
+path = p.getPath().concat(
+TestContainerPersistence.class.getSimpleName());
+path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
+OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
+conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+
+File containerDir = new File(path);
+if (containerDir.exists()) {
+  FileUtils.deleteDirectory(new File(path));
+}
+
+Assert.assertTrue(containerDir.mkdirs());
+
+cluster = new MiniDFSCluster.Builder(conf).build();
+cluster.waitActive();
+fsDataSet = cluster.getDataNodes().get(0).getFSDataset();
+containerManager = new ContainerManagerImpl();
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+cluster.shutdown();
+FileUtils.deleteDirectory(new File(path));
+  }
+
+  @Before
+  public void setupPaths() throws IOException {
+if (!new File(path).exists()) {
+  new File(path).mkdirs();
+}
+pathLists.clear();
+containerManager.getContainerMap().clear();
+pathLists.add(Paths.get(path));
+containerManager.init(conf, pathLists, fsDataSet);
+  }
+
+  @After
+  public void cleanupDir() throws IOException {
+FileUtils.deleteDirectory(new File(path));
+  }
+
+  @Test
+  public void testCreateContainer() throws Exception {
+
+String containerName = OzoneUtils.getRequestID();
+ContainerData data = new ContainerData(containerName);
+data.addMetadata("VOLUME", "shire");
+data.addMetadata("owner)", "bilbo");
+

[2/2] hadoop git commit: HDFS-10195. Ozone: Add container persistence. Contributed by Anu Engineer.

2016-03-26 Thread cnauroth
HDFS-10195. Ozone: Add container persistence. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/643c5e5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/643c5e5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/643c5e5b

Branch: refs/heads/HDFS-7240
Commit: 643c5e5bdc2c7c33b796a006e9643fc196c5d573
Parents: 2dc48b7
Author: Chris Nauroth 
Authored: Sat Mar 26 11:40:00 2016 -0700
Committer: Chris Nauroth 
Committed: Sat Mar 26 11:40:00 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  28 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   3 +
 .../org/apache/hadoop/ozone/OzoneConsts.java|  79 +++
 .../container/common/helpers/ContainerData.java |  47 +-
 .../common/helpers/ContainerUtils.java  | 144 ++
 .../impl/ContainerLocationManagerImpl.java  | 113 
 .../common/impl/ContainerManagerImpl.java   | 514 +++
 .../interfaces/ContainerLocationManager.java|  44 ++
 .../common/interfaces/ContainerManager.java |  33 +-
 .../common/transport/client/XceiverClient.java  |   6 +-
 .../common/transport/server/XceiverServer.java  |   4 +-
 .../container/common/utils/LevelDBStore.java| 124 +
 .../container/ozoneimpl/OzoneContainer.java | 121 +
 .../ozone/container/ozoneimpl/package-info.java |  21 +
 .../hadoop/ozone/web/client/OzoneBucket.java|   2 +-
 .../hadoop/ozone/web/client/OzoneVolume.java|   2 +-
 .../hadoop/ozone/web/handlers/BucketArgs.java   |   2 +-
 .../ozone/web/handlers/BucketHandler.java   |   4 +-
 .../web/handlers/BucketProcessTemplate.java |  11 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |   9 +-
 .../ozone/web/handlers/VolumeHandler.java   |   2 +-
 .../web/handlers/VolumeProcessTemplate.java |   8 +-
 .../web/localstorage/OzoneLevelDBStore.java |  90 
 .../web/localstorage/OzoneMetadataManager.java  |  11 +-
 .../hadoop/ozone/web/request/OzoneAcl.java  |   2 +-
 .../hadoop/ozone/web/response/BucketInfo.java   |   2 +-
 .../hadoop/ozone/web/userauth/Simple.java   |   2 +-
 .../hadoop/ozone/web/utils/OzoneConsts.java |  65 ---
 .../hadoop/ozone/web/utils/OzoneUtils.java  |   1 +
 .../main/proto/DatanodeContainerProtocol.proto  |   8 +-
 .../common/impl/TestContainerPersistence.java   | 256 +
 .../container/ozoneimpl/TestOzoneContainer.java | 118 +
 .../apache/hadoop/ozone/web/TestBucketInfo.java |   2 +-
 .../hadoop/ozone/web/TestOzoneVolumes.java  |   2 +-
 .../hadoop/ozone/web/TestOzoneWebAccess.java|   3 +-
 .../hadoop/ozone/web/client/TestVolume.java |   3 +-
 36 files changed, 1679 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cbbb9a6..a5d5015 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -188,6 +188,8 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SecurityUtil;
@@ -365,6 +367,7 @@ public class DataNode extends ReconfigurableBase
   private final String confVersion;
   private final long maxNumberOfBlocksToLog;
   private final boolean pipelineSupportECN;
+  private final boolean ozoneEnabled;
 
   private final List usersWithLocalPathAccess;
   private final boolean connectToDnViaHostname;
@@ -387,6 +390,7 @@ public class DataNode extends ReconfigurableBase
   private static final int NUM_CORES = Runtime.getRuntime()
   .availableProcessors();
   private static final double CONGESTION_RATIO = 1.5;
+  private OzoneContainer ozoneServer;
 
   private static Tracer createTracer(Configuration conf) {
 return new Tracer.Builder("DataNode").
@@ -417,6 +421,7 @@ public class DataNode extends ReconfigurableBase
 this.connectToDnViaHostname = false;
 this.blockScanner = new BlockScanner(this, conf);
 this.pipelineSupportECN = 

hadoop git commit: Revert "HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped blocks. Contributed by Kai Zheng"

2016-03-26 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk e5ff0ea7b -> a337ceb74


Revert "HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped 
blocks. Contributed by Kai Zheng"

This reverts commit e5ff0ea7ba087984262f1f27200ae5bb40d9b838.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a337ceb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a337ceb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a337ceb7

Branch: refs/heads/trunk
Commit: a337ceb74e984991dbf976236d2e785cf5921b16
Parents: e5ff0ea
Author: Arpit Agarwal 
Authored: Sat Mar 26 09:20:01 2016 -0700
Committer: Arpit Agarwal 
Committed: Sat Mar 26 09:20:01 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   1 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 187 ++--
 .../datatransfer/DataTransferProtocol.java  |  16 +-
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   1 -
 .../hdfs/protocol/datatransfer/Sender.java  |  19 --
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  42 +--
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  12 -
 .../src/main/proto/datatransfer.proto   |   9 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  28 --
 .../server/datanode/BlockChecksumHelper.java| 284 ---
 .../hdfs/server/datanode/DataXceiver.java   |  43 ---
 12 files changed, 83 insertions(+), 570 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a337ceb7/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 9d6ab9a..2c3329e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -8,7 +8,6 @@
   
   
   
-  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a337ceb7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 88bd219..3506d3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1704,10 +1704,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /**
* Get the checksum of the whole file or a range of the file. Note that the
-   * range always starts from the beginning of the file. The file can be
-   * in replicated form, or striped mode. It can be used to checksum and 
compare
-   * two replicated files, or two striped files, but not applicable for two
-   * files of different block layout forms.
+   * range always starts from the beginning of the file.
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
@@ -1720,11 +1717,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
 LocatedBlocks blockLocations = getBlockLocations(src, length);
 
-FileChecksumHelper.FileChecksumComputer maker;
-ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
-maker = ecPolicy != null ?
-new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
-length, blockLocations, namenode, this, ecPolicy) :
+FileChecksumHelper.FileChecksumComputer maker =
 new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
 blockLocations, namenode, this);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a337ceb7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index dfd9393..d15db9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -22,13 +22,10 

hadoop git commit: HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped blocks. Contributed by Kai Zheng

2016-03-26 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk fde8ac5d8 -> e5ff0ea7b


HDFS-9694. Make existing DFSClient#getFileChecksum() work for striped blocks. 
Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5ff0ea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5ff0ea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5ff0ea7

Branch: refs/heads/trunk
Commit: e5ff0ea7ba087984262f1f27200ae5bb40d9b838
Parents: fde8ac5
Author: Uma Maheswara Rao G 
Authored: Sat Mar 26 00:52:50 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Sat Mar 26 00:52:50 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   1 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 187 ++--
 .../datatransfer/DataTransferProtocol.java  |  16 +-
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |   1 +
 .../hdfs/protocol/datatransfer/Sender.java  |  19 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  42 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  12 +
 .../src/main/proto/datatransfer.proto   |   9 +-
 .../hdfs/protocol/datatransfer/Receiver.java|  28 ++
 .../server/datanode/BlockChecksumHelper.java| 284 +++
 .../hdfs/server/datanode/DataXceiver.java   |  43 +++
 12 files changed, 570 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5ff0ea7/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 2c3329e..9d6ab9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -8,6 +8,7 @@
   
   
   
+  
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5ff0ea7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3506d3a..88bd219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1704,7 +1704,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /**
* Get the checksum of the whole file or a range of the file. Note that the
-   * range always starts from the beginning of the file.
+   * range always starts from the beginning of the file. The file can be
+   * in replicated form, or striped mode. It can be used to checksum and 
compare
+   * two replicated files, or two striped files, but not applicable for two
+   * files of different block layout forms.
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
@@ -1717,7 +1720,11 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
 LocatedBlocks blockLocations = getBlockLocations(src, length);
 
-FileChecksumHelper.FileChecksumComputer maker =
+FileChecksumHelper.FileChecksumComputer maker;
+ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
+maker = ecPolicy != null ?
+new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
+length, blockLocations, namenode, this, ecPolicy) :
 new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
 blockLocations, namenode, this);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5ff0ea7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index d15db9f..dfd9393 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -22,10 +22,13 @@ import