hadoop git commit: HADOOP-14271. Correct spelling of 'occurred' and variants. Contributed by Yeliang Cang

2017-04-03 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5faa949b7 -> 6eba79232


HADOOP-14271. Correct spelling of 'occurred' and variants. Contributed by 
Yeliang Cang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eba7923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eba7923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eba7923

Branch: refs/heads/trunk
Commit: 6eba79232f36b36e0196163adc8fe4219a6b6bf9
Parents: 5faa949
Author: Chris Douglas 
Authored: Mon Apr 3 20:13:14 2017 -0700
Committer: Chris Douglas 
Committed: Mon Apr 3 20:13:14 2017 -0700

--
 .../org/apache/hadoop/util/Progressable.java|  2 +-
 .../apache/hadoop/util/UTF8ByteArrayUtils.java  |  4 +--
 .../src/main/native/gtest/gtest-all.cc  |  2 +-
 .../src/main/native/gtest/include/gtest/gtest.h |  2 +-
 .../hadoop/test/MultithreadedTestUtil.java  |  2 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  2 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  2 +-
 .../hadoop/fs/http/server/FSOperations.java | 34 ++--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  2 +-
 .../mapreduce/lib/jobcontrol/JobControl.java|  2 +-
 .../hadoop/fs/azure/BlockBlobAppendStream.java  |  2 +-
 .../hadoop/streaming/StreamKeyValUtil.java  |  8 ++---
 .../distributedshell/TestDistributedShell.java  | 30 -
 .../launcher/TestContainerLaunch.java   | 12 +++
 .../scheduler/capacity/TestParentQueue.java | 18 +--
 16 files changed, 63 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
index 495ca82..201ee5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * to explicitly report progress to the Hadoop framework. This is especially
  * important for operations which take significant amount of time since,
  * in-lieu of the reported progress, the framework has to assume that an error
- * has occured and time-out the operation.
+ * has occurred and time-out the operation.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eba7923/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
index 2a804c6..069494f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
@@ -30,7 +30,7 @@ public class UTF8ByteArrayUtils {
* @param start starting offset
* @param end ending position
* @param b the byte to find
-   * @return position that first byte occures otherwise -1
+   * @return position that first byte occurs, otherwise -1
*/
   public static int findByte(byte [] utf, int start, int end, byte b) {
 for(int i=start; i

hadoop git commit: HDFS-11566. Ozone: Document missing metrics for container operations. Contributed by Yiqun Lin.

2017-04-03 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 68eab679e -> ec4d25fc5


HDFS-11566. Ozone: Document missing metrics for container operations. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec4d25fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec4d25fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec4d25fc

Branch: refs/heads/HDFS-7240
Commit: ec4d25fc537bceae7c9021c313681d5db2f1b753
Parents: 68eab67
Author: Anu Engineer 
Authored: Mon Apr 3 15:01:23 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 3 15:01:23 2017 -0700

--
 .../src/site/markdown/Ozonemetrics.md   | 89 
 hadoop-project/src/site/site.xml|  1 +
 2 files changed, 90 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d25fc/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Ozonemetrics.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Ozonemetrics.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Ozonemetrics.md
new file mode 100644
index 000..4822607
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Ozonemetrics.md
@@ -0,0 +1,89 @@
+
+
+
+
+HDFS Ozone Metrics
+===
+
+
+
+Overview
+
+
+The container metrics that is used in HDFS Ozone.
+
+### Storage Container Metrics
+
+The metrics for various storage container operations in HDFS Ozone.
+
+Storage container is an optional service that can be enabled by setting
+'ozone.enabled' to true.
+These metrics are only available when ozone is enabled.
+
+Storage Container Metrics maintains a set of generic metrics for all
+container RPC calls that can be made to a datandoe/container.
+
+Along with the total number of RPC calls containers maintain a set of metrics
+for each RPC call. Following is the set of counters maintained for each RPC
+operation.
+
+*Total number of operation* - We maintain an array which counts how
+many times a specific operation has been performed.
+Eg.`NumCreateContainer` tells us how many times create container has been
+invoked on this datanode.
+
+*Number of bytes involved in a specific command* - This is an array that is
+maintained for all operations, but makes sense only for read and write
+operations.
+
+While it is possible to read the bytes in update container, it really makes
+no sense, since no data stream involved. Users are advised to use this
+metric only when it makes sense. Eg. `BytesReadChunk` -- Tells us how
+many bytes have been read from this data using Read Chunk operation.
+
+*Average Latency of each operation* - The average latency of the operation.
+Eg. `LatencyCreateContainerAvgTime` - This tells us the average latency of
+Create Container.
+
+*Quantiles for each of these operations* - The 50/75/90/95/99th percentile
+of these operations. Eg. `CreateContainerNanos60s50thPercentileLatency` --
+gives latency of the create container operations at the 50th percentile latency
+(1 minute granularity). We report 50th, 75th, 90th, 95th and 99th percentile
+for all RPCs.
+
+So this leads to the containers reporting these counters for each of these
+RPC operations.
+
+| Name | Description |
+|: |: |
+| `NumOps` | Total number of container operations |
+| `CreateContainer` | Create container operation |
+| `ReadContainer` | Read container operation |
+| `UpdateContainer` | Update container operations |
+| `DeleteContainer` | Delete container operations |
+| `ListContainer` | List container operations |
+| `PutKey` | Put key operations |
+| `GetKey` | Get key operations |
+| `DeleteKey` | Delete key operations |
+| `ListKey` | List key operations |
+| `ReadChunk` | Read chunk operations |
+| `DeleteChunk` | Delete chunk operations |
+| `WriteChunk` | Write chunk operations|
+| `ListChunk` | List chunk operations |
+| `CompactChunk` | Compact chunk operations |
+| `PutSmallFile` | Put small file operations |
+| `GetSmallFile` | Get small file operations |
+| `CloseContainer` | Close container operations |
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d25fc/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index ae3aef5..bf1fd40 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -180,6 +180,7 @@
   
   
   
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-6354. LeveldbRMStateStore can parse invalid keys when recovering reservations. Contributed by Jason Lowe

2017-04-03 Thread subru
YARN-6354. LeveldbRMStateStore can parse invalid keys when recovering 
reservations. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/318bfb01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/318bfb01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/318bfb01

Branch: refs/heads/YARN-2915
Commit: 318bfb01bc6793da09e32e9cc292eb63224b6ca2
Parents: 4d1fac5
Author: Eric Payne 
Authored: Fri Mar 31 12:30:35 2017 -0500
Committer: Eric Payne 
Committed: Fri Mar 31 12:30:35 2017 -0500

--
 .../recovery/LeveldbRMStateStore.java|  8 
 .../recovery/TestLeveldbRMStateStore.java| 19 +++
 2 files changed, 23 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/318bfb01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
index 02f90dd..2ca53db 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
@@ -214,6 +214,11 @@ public class LeveldbRMStateStore extends RMStateStore {
 return db == null;
   }
 
+  @VisibleForTesting
+  DB getDatabase() {
+return db;
+  }
+
   @Override
   protected Version loadVersion() throws Exception {
 Version version = null;
@@ -284,6 +289,9 @@ public class LeveldbRMStateStore extends RMStateStore {
   while (iter.hasNext()) {
 Entry entry = iter.next();
 String key = asString(entry.getKey());
+if (!key.startsWith(RM_RESERVATION_KEY_PREFIX)) {
+  break;
+}
 
 String planReservationString =
 key.substring(RM_RESERVATION_KEY_PREFIX.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/318bfb01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
index 4297e73..51adbe1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.records.Version;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.fusesource.leveldbjni.JniDBFactory;
 import org.iq80.leveldb.DB;
 import org.junit.After;
 import org.junit.Before;
@@ -125,17 +126,27 @@ public class TestLeveldbRMStateStore extends 
RMStateStoreTestBase {
   public void testCompactionCycle() throws Exception {
 final DB mockdb = mock(DB.class);
 conf.setLong(YarnConfiguration.RM_LEVELDB_COMPACTION_INTERVAL_SECS, 1);
-LeveldbRMStateStore store = new LeveldbRMStateStore() {
+stateStore = new LeveldbRMStateStore() {
   @Override
   protected DB openDatabase() throws Exception {
 return mockdb;
   }
 };
-store.init(conf);
-store.start();
+stateStore.init(conf);
+stateStore.start();
 verify(mockdb, timeout(1)).compactRange(
 (byte[]) isNull(), (byte[]) isNull());
-store.close();
+  }
+
+  @Test
+  public void 

[46/50] [abbrv] hadoop git commit: YARN-3671. Integrate Federation services with ResourceManager. Contributed by Subru Krishnan

2017-04-03 Thread subru
YARN-3671. Integrate Federation services with ResourceManager. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81c94489
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81c94489
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81c94489

Branch: refs/heads/YARN-2915
Commit: 81c94489a6b5061c3afc13d2d52d424689d0432f
Parents: 683b08e
Author: Jian He 
Authored: Tue Aug 30 12:20:52 2016 +0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +-
 .../failover/FederationProxyProviderUtil.java   |   2 +-
 .../FederationRMFailoverProxyProvider.java  |   4 +-
 ...ationMembershipStateStoreInputValidator.java |   7 +-
 .../TestFederationStateStoreInputValidator.java |  10 +-
 .../server/resourcemanager/ResourceManager.java |  26 ++
 .../FederationStateStoreHeartbeat.java  | 108 +++
 .../federation/FederationStateStoreService.java | 304 +++
 .../federation/package-info.java|  17 ++
 .../webapp/dao/ClusterMetricsInfo.java  |   5 +-
 .../TestFederationRMStateStoreService.java  | 170 +++
 12 files changed, 648 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81c94489/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 570ec4a..4e614f7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2541,9 +2541,6 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_PREFIX + "failover.enabled";
   public static final boolean DEFAULT_FEDERATION_FAILOVER_ENABLED = true;
 
-  public static final String FEDERATION_SUBCLUSTER_ID =
-  FEDERATION_PREFIX + "sub-cluster.id";
-
   public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
   FEDERATION_PREFIX + "state-store.class";
 
@@ -2556,6 +2553,14 @@ public class YarnConfiguration extends Configuration {
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
+  public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
+
+  // 5 minutes
+  public static final int
+  DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81c94489/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index c4d8f38..5e0876f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -72,9 +72,9 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
 configurationPropsToSkipCompare
-.add(YarnConfiguration.FEDERATION_SUBCLUSTER_ID);
-configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81c94489/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java

[09/50] [abbrv] hadoop git commit: HADOOP-14267. Make DistCpOptions immutable. Contributed by Mingliang Liu

2017-04-03 Thread subru
HADOOP-14267. Make DistCpOptions immutable. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26172a94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26172a94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26172a94

Branch: refs/heads/YARN-2915
Commit: 26172a94d6431e70d7fe15d66be9a7e195f79f60
Parents: 73835c7
Author: Mingliang Liu 
Authored: Thu Jun 23 00:21:49 2016 -0700
Committer: Mingliang Liu 
Committed: Fri Mar 31 20:04:26 2017 -0700

--
 .../org/apache/hadoop/tools/CopyListing.java|  38 +-
 .../java/org/apache/hadoop/tools/DistCp.java| 106 +--
 .../org/apache/hadoop/tools/DistCpContext.java  | 198 
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   2 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  | 925 +--
 .../org/apache/hadoop/tools/DistCpSync.java |  42 +-
 .../hadoop/tools/FileBasedCopyListing.java  |  12 +-
 .../apache/hadoop/tools/GlobbedCopyListing.java |  17 +-
 .../org/apache/hadoop/tools/OptionsParser.java  | 299 ++
 .../apache/hadoop/tools/SimpleCopyListing.java  | 115 +--
 .../hadoop/tools/mapred/CopyCommitter.java  |  15 +-
 .../apache/hadoop/tools/util/DistCpUtils.java   |   8 +-
 .../apache/hadoop/tools/TestCopyListing.java|  51 +-
 .../apache/hadoop/tools/TestDistCpOptions.java  | 500 ++
 .../org/apache/hadoop/tools/TestDistCpSync.java |  68 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java |  44 +-
 .../apache/hadoop/tools/TestDistCpViewFs.java   |  10 +-
 .../hadoop/tools/TestFileBasedCopyListing.java  |   9 +-
 .../hadoop/tools/TestGlobbedCopyListing.java|  11 +-
 .../apache/hadoop/tools/TestIntegration.java|  20 +-
 .../apache/hadoop/tools/TestOptionsParser.java  |  81 +-
 .../contract/AbstractContractDistCpTest.java|   6 +-
 .../hadoop/tools/mapred/TestCopyCommitter.java  |  34 +-
 .../mapred/TestUniformSizeInputFormat.java  |  15 +-
 .../mapred/lib/TestDynamicInputFormat.java  |  17 +-
 25 files changed, 1574 insertions(+), 1069 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26172a94/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index 9ebf9d2..908b558 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -77,41 +77,41 @@ public abstract class CopyListing extends Configured {
* TARGET IS DIR: Key-"/file1", Value-FileStatus(/tmp/file1)  
*
* @param pathToListFile - Output file where the listing would be stored
-   * @param options - Input options to distcp
+   * @param distCpContext - distcp context associated with input options
* @throws IOException - Exception if any
*/
   public final void buildListing(Path pathToListFile,
- DistCpOptions options) throws IOException {
-validatePaths(options);
-doBuildListing(pathToListFile, options);
+  DistCpContext distCpContext) throws IOException {
+validatePaths(distCpContext);
+doBuildListing(pathToListFile, distCpContext);
 Configuration config = getConf();
 
 config.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, 
pathToListFile.toString());
 config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, 
getBytesToCopy());
 config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, 
getNumberOfPaths());
 
-validateFinalListing(pathToListFile, options);
+validateFinalListing(pathToListFile, distCpContext);
 LOG.info("Number of paths in the copy list: " + this.getNumberOfPaths());
   }
 
   /**
* Validate input and output paths
*
-   * @param options - Input options
+   * @param distCpContext - Distcp context
* @throws InvalidInputException If inputs are invalid
* @throws IOException any Exception with FS
*/
-  protected abstract void validatePaths(DistCpOptions options)
+  protected abstract void validatePaths(DistCpContext distCpContext)
   throws IOException, InvalidInputException;
 
   /**
* The interface to be implemented by sub-classes, to create the 
source/target file listing.
* @param pathToListFile Path on HDFS where the listing file is written.
-   * @param options Input Options for DistCp (indicating source/target paths.)
+   * @param distCpContext - Distcp context
* @throws IOException Thrown on failure to create the listing file.
*/
   protected 

[43/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5676. Add a HashBasedRouterPolicy, and small policies and test 
refactoring. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7a86d91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7a86d91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7a86d91

Branch: refs/heads/YARN-2915
Commit: c7a86d91375ceeb4028e13d1aa88c652481e6bd1
Parents: 8b081b1
Author: Subru Krishnan 
Authored: Tue Nov 22 15:02:22 2016 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +-
 .../policies/AbstractPolicyManager.java | 175 -
 .../policies/FederationPolicyManager.java   | 117 
 .../PriorityBroadcastPolicyManager.java |  66 ---
 .../federation/policies/RouterPolicyFacade.java |   1 +
 .../policies/UniformBroadcastPolicyManager.java |  56 --
 .../policies/WeightedLocalityPolicyManager.java |  67 ---
 .../policies/manager/AbstractPolicyManager.java | 190 +++
 .../manager/FederationPolicyManager.java| 118 
 .../manager/HashBroadcastPolicyManager.java |  38 
 .../manager/PriorityBroadcastPolicyManager.java |  66 +++
 .../manager/UniformBroadcastPolicyManager.java  |  44 +
 .../manager/WeightedLocalityPolicyManager.java  |  67 +++
 .../policies/manager/package-info.java  |  19 ++
 .../policies/router/AbstractRouterPolicy.java   |  19 ++
 .../policies/router/HashBasedRouterPolicy.java  |  81 
 .../policies/router/LoadBasedRouterPolicy.java  |   3 +
 .../policies/router/PriorityRouterPolicy.java   |   3 +
 .../router/UniformRandomRouterPolicy.java   |  10 +-
 .../router/WeightedRandomRouterPolicy.java  |   3 +
 .../policies/BaseFederationPoliciesTest.java|  17 +-
 .../policies/BasePolicyManagerTest.java | 108 ---
 ...ionPolicyInitializationContextValidator.java |   1 +
 .../TestPriorityBroadcastPolicyManager.java |  72 ---
 .../policies/TestRouterPolicyFacade.java|   2 +
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/manager/BasePolicyManagerTest.java | 104 ++
 .../TestHashBasedBroadcastPolicyManager.java|  40 
 .../TestPriorityBroadcastPolicyManager.java |  72 +++
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/router/BaseRouterPoliciesTest.java |  51 +
 .../router/TestHashBasedRouterPolicy.java   |  83 
 .../router/TestLoadBasedRouterPolicy.java   |   3 +-
 .../router/TestPriorityRouterPolicy.java|   3 +-
 .../router/TestUniformRandomRouterPolicy.java   |   3 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  15 +-
 38 files changed, 1160 insertions(+), 798 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a86d91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 67a6f3c..ff19554 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2573,7 +2573,8 @@ public class YarnConfiguration extends Configuration {
   + "policy-manager";
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
-  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+  + ".hadoop.yarn.server.federation.policies"
+  + ".manager.UniformBroadcastPolicyManager";
 
   public static final String FEDERATION_POLICY_MANAGER_PARAMS =
   FEDERATION_PREFIX + "policy-manager-params";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a86d91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
 

[38/50] [abbrv] hadoop git commit: YARN-5307. Federation Application State Store internal APIs

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9bdcc8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
new file mode 100644
index 000..8b72a1e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.records.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProtoOrBuilder;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol buffer based implementation of
+ * {@link GetApplicationsHomeSubClusterResponse}.
+ */
+@Private
+@Unstable
+public class GetApplicationsHomeSubClusterResponsePBImpl
+extends GetApplicationsHomeSubClusterResponse {
+
+  private GetApplicationsHomeSubClusterResponseProto proto =
+  GetApplicationsHomeSubClusterResponseProto.getDefaultInstance();
+  private GetApplicationsHomeSubClusterResponseProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private List appsHomeSubCluster;
+
+  public GetApplicationsHomeSubClusterResponsePBImpl() {
+builder = GetApplicationsHomeSubClusterResponseProto.newBuilder();
+  }
+
+  public GetApplicationsHomeSubClusterResponsePBImpl(
+  GetApplicationsHomeSubClusterResponseProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public GetApplicationsHomeSubClusterResponseProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void mergeLocalToProto() {
+if (viaProto) {
+  maybeInitBuilder();
+}
+mergeLocalToBuilder();
+proto = builder.build();
+viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = GetApplicationsHomeSubClusterResponseProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+if (this.appsHomeSubCluster != null) {
+  addSubClustersInfoToProto();
+}
+  }
+
+  @Override
+  public int hashCode() {
+return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (other == null) {
+  return false;
+}
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
+  }
+
+  @Override
+  public String toString() {
+return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public List getAppsHomeSubClusters() {
+initSubClustersInfoList();
+return appsHomeSubCluster;
+  }
+
+  @Override
+  public void setAppsHomeSubClusters(
+  List appsHomeSubClusters) {
+maybeInitBuilder();
+if 

[08/50] [abbrv] hadoop git commit: HADOOP-14267. Make DistCpOptions immutable. Contributed by Mingliang Liu

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/26172a94/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index af91347..8111b04 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -123,10 +123,10 @@ public class SimpleCopyListing extends CopyListing {
   }
 
   @Override
-  protected void validatePaths(DistCpOptions options)
+  protected void validatePaths(DistCpContext context)
   throws IOException, InvalidInputException {
 
-Path targetPath = options.getTargetPath();
+Path targetPath = context.getTargetPath();
 FileSystem targetFS = targetPath.getFileSystem(getConf());
 boolean targetExists = false;
 boolean targetIsFile = false;
@@ -142,12 +142,12 @@ public class SimpleCopyListing extends CopyListing {
 
 //If target is a file, then source has to be single file
 if (targetIsFile) {
-  if (options.getSourcePaths().size() > 1) {
+  if (context.getSourcePaths().size() > 1) {
 throw new InvalidInputException("Multiple source being copied to a 
file: " +
 targetPath);
   }
 
-  Path srcPath = options.getSourcePaths().get(0);
+  Path srcPath = context.getSourcePaths().get(0);
   FileSystem sourceFS = srcPath.getFileSystem(getConf());
   if (!sourceFS.isFile(srcPath)) {
 throw new InvalidInputException("Cannot copy " + srcPath +
@@ -155,12 +155,12 @@ public class SimpleCopyListing extends CopyListing {
   }
 }
 
-if (options.shouldAtomicCommit() && targetExists) {
+if (context.shouldAtomicCommit() && targetExists) {
   throw new InvalidInputException("Target path for atomic-commit already 
exists: " +
 targetPath + ". Cannot atomic-commit to pre-existing target-path.");
 }
 
-for (Path path: options.getSourcePaths()) {
+for (Path path: context.getSourcePaths()) {
   FileSystem fs = path.getFileSystem(getConf());
   if (!fs.exists(path)) {
 throw new InvalidInputException(path + " doesn't exist");
@@ -184,7 +184,7 @@ public class SimpleCopyListing extends CopyListing {
 }
 
 if (targetIsReservedRaw) {
-  options.preserveRawXattrs();
+  context.setPreserveRawXattrs(true);
   getConf().setBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, 
true);
 }
 
@@ -194,18 +194,19 @@ public class SimpleCopyListing extends CopyListing {
  */
 Credentials credentials = getCredentials();
 if (credentials != null) {
-  Path[] inputPaths = options.getSourcePaths().toArray(new Path[1]);
+  Path[] inputPaths = context.getSourcePaths()
+  .toArray(new Path[1]);
   TokenCache.obtainTokensForNamenodes(credentials, inputPaths, getConf());
 }
   }
 
   @Override
   protected void doBuildListing(Path pathToListingFile,
-DistCpOptions options) throws IOException {
-if(options.shouldUseSnapshotDiff()) {
-  doBuildListingWithSnapshotDiff(getWriter(pathToListingFile), options);
-}else {
-  doBuildListing(getWriter(pathToListingFile), options);
+DistCpContext context) throws IOException {
+if (context.shouldUseSnapshotDiff()) {
+  doBuildListingWithSnapshotDiff(getWriter(pathToListingFile), context);
+} else {
+  doBuildListing(getWriter(pathToListingFile), context);
 }
   }
 
@@ -232,22 +233,22 @@ public class SimpleCopyListing extends CopyListing {
* @throws IOException
*/
   private void addToFileListing(SequenceFile.Writer fileListWriter,
-  Path sourceRoot, Path path, DistCpOptions options) throws IOException {
+  Path sourceRoot, Path path, DistCpContext context) throws IOException {
 sourceRoot = getPathWithSchemeAndAuthority(sourceRoot);
 path = getPathWithSchemeAndAuthority(path);
 path = makeQualified(path);
 
 FileSystem sourceFS = sourceRoot.getFileSystem(getConf());
 FileStatus fileStatus = sourceFS.getFileStatus(path);
-final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
-final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
-final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
+final boolean preserveAcls = context.shouldPreserve(FileAttribute.ACL);
+final boolean preserveXAttrs = context.shouldPreserve(FileAttribute.XATTR);
+final boolean preserveRawXAttrs = context.shouldPreserveRawXattrs();
 LinkedList fileCopyListingStatus =
 DistCpUtils.toCopyListingFileStatus(sourceFS, fileStatus,
 preserveAcls, preserveXAttrs, preserveRawXAttrs,
- 

[50/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-04-03 Thread subru
YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni 
Matteo Fumarola via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c62eb30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c62eb30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c62eb30

Branch: refs/heads/YARN-2915
Commit: 2c62eb30e6a0db4c01be3ff2975e0e2c4d11b372
Parents: 97e5bf1
Author: Subru Krishnan 
Authored: Wed Aug 17 12:07:06 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../store/impl/MemoryFederationStateStore.java  |   30 +
 ...cationHomeSubClusterStoreInputValidator.java |  183 +++
 ...ationMembershipStateStoreInputValidator.java |  317 +
 .../FederationPolicyStoreInputValidator.java|  144 ++
 ...derationStateStoreInvalidInputException.java |   48 +
 .../federation/store/utils/package-info.java|   17 +
 .../impl/FederationStateStoreBaseTest.java  |6 +-
 .../TestFederationStateStoreInputValidator.java | 1265 ++
 8 files changed, 2007 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c62eb30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index 8144435..6e564dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -57,6 +57,9 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegister
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import org.apache.hadoop.yarn.server.records.Version;
 import org.apache.hadoop.yarn.util.MonotonicClock;
 
@@ -88,6 +91,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterRegisterResponse registerSubCluster(
   SubClusterRegisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterRegisterRequest(request);
 SubClusterInfo subClusterInfo = request.getSubClusterInfo();
 membership.put(subClusterInfo.getSubClusterId(), subClusterInfo);
 return SubClusterRegisterResponse.newInstance();
@@ -96,6 +101,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterDeregisterResponse deregisterSubCluster(
   SubClusterDeregisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterDeregisterRequest(request);
 SubClusterInfo subClusterInfo = membership.get(request.getSubClusterId());
 if (subClusterInfo == null) {
   throw new YarnException(
@@ -111,6 +118,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   public SubClusterHeartbeatResponse subClusterHeartbeat(
   SubClusterHeartbeatRequest request) throws YarnException {
 
+FederationMembershipStateStoreInputValidator
+.validateSubClusterHeartbeatRequest(request);
 SubClusterId subClusterId = request.getSubClusterId();
 SubClusterInfo subClusterInfo = membership.get(subClusterId);
 
@@ -129,6 +138,9 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public GetSubClusterInfoResponse getSubCluster(
   GetSubClusterInfoRequest request) throws YarnException {
+
+FederationMembershipStateStoreInputValidator
+

[35/50] [abbrv] hadoop git commit: YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo 
Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3e85276
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3e85276
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3e85276

Branch: refs/heads/YARN-2915
Commit: b3e85276e9011d07c95ec3a8be9a464100c8f68b
Parents: b7646b6
Author: Subru Krishnan 
Authored: Wed Sep 7 17:33:34 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../policies/ConfigurableFederationPolicy.java  |  44 +++
 .../policies/FederationPolicyConfigurator.java  |  91 +
 .../FederationPolicyInitializationContext.java  | 109 
 ...ionPolicyInitializationContextValidator.java |  82 
 .../policies/FederationPolicyWriter.java|  45 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  66 ++
 .../policies/amrmproxy/package-info.java|  20 +++
 .../exceptions/FederationPolicyException.java   |  33 +
 ...FederationPolicyInitializationException.java |  33 +
 .../NoActiveSubclustersException.java   |  27 
 .../exceptions/UnknownSubclusterException.java  |  28 
 .../policies/exceptions/package-info.java   |  20 +++
 .../federation/policies/package-info.java   |  20 +++
 .../policies/router/FederationRouterPolicy.java |  45 +++
 .../policies/router/package-info.java   |  20 +++
 ...ionPolicyInitializationContextValidator.java | 128 +++
 .../utils/FederationPoliciesTestUtil.java   |  83 
 17 files changed, 894 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e85276/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
new file mode 100644
index 000..fd6ceea
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+
+/**
+ * This interface provides a general method to reinitialize a policy. The
+ * semantics are try-n-swap, so in case of an exception is thrown the
+ * implmentation must ensure the previous state and configuration is preserved.
+ */
+public interface ConfigurableFederationPolicy {
+
+  /**
+   * This method is invoked to initialize of update the configuration of
+   * policies. The implementor should provide try-n-swap semantics, and retain
+   * state if possible.
+   *
+   * @param federationPolicyInitializationContext the new context to provide to
+   *  implementor.
+   *
+   * @throws FederationPolicyInitializationException in case the initialization
+   * fails.
+   */
+  void reinitialize(
+  FederationPolicyInitializationContext
+  federationPolicyInitializationContext)
+  throws FederationPolicyInitializationException;
+}


[40/50] [abbrv] hadoop git commit: YARN-3672. Create Facade for Federation State and Policy Store. Contributed by Subru Krishnan

2017-04-03 Thread subru
YARN-3672. Create Facade for Federation State and Policy Store. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97e5bf1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97e5bf1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97e5bf1f

Branch: refs/heads/YARN-2915
Commit: 97e5bf1fd4044bf48181c8456665cce578f0c63e
Parents: 3954485
Author: Jian He 
Authored: Wed Aug 17 11:13:19 2016 +0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 hadoop-project/pom.xml  |  13 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  20 +-
 .../hadoop-yarn-server-common/pom.xml   |  10 +
 .../utils/FederationStateStoreFacade.java   | 532 +++
 .../server/federation/utils/package-info.java   |  17 +
 .../utils/FederationStateStoreTestUtil.java | 149 ++
 .../utils/TestFederationStateStoreFacade.java   | 148 ++
 9 files changed, 905 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e5bf1f/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f327933..8214caf 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -94,6 +94,9 @@
 2.0.0-M21
 1.0.0-M33
 
+1.0.0
+3.0.3
+
 
 1.8
 
@@ -1256,6 +1259,16 @@
   kerb-simplekdc
   1.0.0-RC2
 
+
+  javax.cache
+  cache-api
+  ${jcache.version}
+
+
+  org.ehcache
+  ehcache
+  ${ehcache.version}
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e5bf1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 504c9e1..4db637e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2533,6 +2533,19 @@ public class YarnConfiguration extends Configuration {
   
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
+
+  public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
+  FEDERATION_PREFIX + "state-store.class";
+
+  public static final String DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS =
+  
"org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore";
+
+  public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
+  FEDERATION_PREFIX + "cache-ttl.secs";
+
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97e5bf1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3da4bab..bfc2534 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -68,6 +68,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
 
configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
 
+// Federation default configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
+
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"
 configurationPropsToSkipCompare.add(YarnConfiguration.


[18/50] [abbrv] hadoop git commit: YARN-5601. Make the RM epoch base value configurable. Contributed by Subru Krishnan

2017-04-03 Thread subru
YARN-5601. Make the RM epoch base value configurable. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7646b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7646b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7646b6f

Branch: refs/heads/YARN-2915
Commit: b7646b6f3811b39a8e648540a9c9818f3d0d2d78
Parents: 72294e3
Author: Jian He 
Authored: Fri Sep 2 12:23:57 2016 +0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 5 -
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 3 +++
 .../apache/hadoop/yarn/conf/TestYarnConfigurationFields.java | 2 ++
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java  | 7 +++
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/LeveldbRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/MemoryRMStateStore.java  | 1 +
 .../yarn/server/resourcemanager/recovery/RMStateStore.java   | 4 
 .../yarn/server/resourcemanager/recovery/ZKRMStateStore.java | 2 +-
 .../resourcemanager/recovery/RMStateStoreTestBase.java   | 8 +---
 .../server/resourcemanager/recovery/TestFSRMStateStore.java  | 1 +
 .../resourcemanager/recovery/TestLeveldbRMStateStore.java| 1 +
 .../server/resourcemanager/recovery/TestZKRMStateStore.java  | 1 +
 13 files changed, 32 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7646b6f/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2f5451d..bbd03a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -293,7 +293,10 @@
   
   
 
-
+
+  
+  
+
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7646b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4e614f7..1cab595 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -142,6 +142,9 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
 
+  public static final String RM_EPOCH = RM_PREFIX + "epoch";
+  public static final long DEFAULT_RM_EPOCH = 0L;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
 RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7646b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 5e0876f..3f3a06c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -75,6 +75,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.RM_EPOCH);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7646b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java

[10/50] [abbrv] hadoop git commit: YARN-6414. ATSv2 HBase related tests fail due to guava version upgrade (Haibo Chen via Varun Saxena)

2017-04-03 Thread subru
YARN-6414. ATSv2 HBase related tests fail due to guava version upgrade (Haibo 
Chen via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8838578
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8838578
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8838578

Branch: refs/heads/YARN-2915
Commit: b8838578ce1f1ae1f93cc6b40cc98b58321ad9ee
Parents: 26172a9
Author: Varun Saxena 
Authored: Sun Apr 2 04:37:34 2017 +0530
Committer: Varun Saxena 
Committed: Sun Apr 2 04:37:34 2017 +0530

--
 hadoop-project/pom.xml   |  1 +
 .../pom.xml  | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8838578/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e55308f..f327933 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -51,6 +51,7 @@
 0.8.2.1
 1.2.4
 2.5.1
+11.0.2
 
 ${project.version}
 1.0.13

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8838578/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index d44aa22..afe440f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -141,6 +141,7 @@
 
   com.google.guava
   guava
+  ${hbase-compatible-guava.version}
   test
 
 
@@ -369,6 +370,24 @@
   
 
   
+
+  
+org.apache.maven.plugins
+maven-enforcer-plugin
+
+  
+depcheck
+
+  
+  true
+
+
+  enforce
+
+
+  
+
+  
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. Contributed by Yufei Gu

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d1fac5d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
index b4adf48..75ef5c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.event.Dispatcher;
-import org.apache.hadoop.yarn.event.DrainDispatcher;
 import 
org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRestart;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -65,8 +63,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -122,7 +119,7 @@ public class TestNodeBlacklistingOnAMFailures {
 // Try the current node a few times
 for (int i = 0; i <= 2; i++) {
   currentNode.nodeHeartbeat(true);
-  dispatcher.await();
+  rm.drainEvents();
 
   Assert.assertEquals(
   "AppAttemptState should still be SCHEDULED if currentNode is "
@@ -132,7 +129,7 @@ public class TestNodeBlacklistingOnAMFailures {
 
 // Now try the other node
 otherNode.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -169,8 +166,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -227,7 +223,7 @@ public class TestNodeBlacklistingOnAMFailures {
 System.out.println("New AppAttempt launched " + attempt.getAppAttemptId());
 
 nm2.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -257,8 +253,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -319,7 +314,7 @@ public class TestNodeBlacklistingOnAMFailures {
 nm3.nodeHeartbeat(true);
 nm4.nodeHeartbeat(true);
 nm5.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -352,8 +347,7 @@ public class TestNodeBlacklistingOnAMFailures {
 1.5f);
 conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 100);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 
 MockNM node =
 new MockNM("127.0.0.1:1234", 8000, rm.getResourceTrackerService());
@@ -367,7 +361,7 @@ public class TestNodeBlacklistingOnAMFailures {
 // Now the AM container should be allocated
 RMAppAttempt attempt = MockRM.waitForAttemptScheduled(app, rm);
 

[29/50] [abbrv] hadoop git commit: YARN-5407. In-memory based implementation of the FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via Subru)

2017-04-03 Thread subru
YARN-5407. In-memory based implementation of the 
FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via 
Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49a6c583
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49a6c583
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49a6c583

Branch: refs/heads/YARN-2915
Commit: 49a6c5837edcae1a9cd79ae613eb18c88af5d9a0
Parents: d046d9e
Author: Subru Krishnan 
Authored: Tue Aug 9 16:07:55 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../store/impl/MemoryFederationStateStore.java  | 158 +++-
 ...SubClusterPoliciesConfigurationsRequest.java |   2 +-
 ...ubClusterPoliciesConfigurationsResponse.java |   2 +-
 ...GetSubClusterPolicyConfigurationRequest.java |   3 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 ...SetSubClusterPolicyConfigurationRequest.java |  20 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 .../records/SubClusterPolicyConfiguration.java  |  27 +-
 ...tApplicationHomeSubClusterRequestPBImpl.java |   4 +
 ...ClusterPolicyConfigurationRequestPBImpl.java |  17 -
 .../pb/SubClusterPolicyConfigurationPBImpl.java |  17 +
 .../proto/yarn_server_federation_protos.proto   |   8 +-
 .../impl/FederationStateStoreBaseTest.java  | 367 ++-
 .../impl/TestMemoryFederationStateStore.java|   4 +-
 14 files changed, 558 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a6c583/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index cea4ac2..a540dff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -20,35 +20,72 @@ package org.apache.hadoop.yarn.server.federation.store.impl;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
-import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 

[49/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c62eb30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
new file mode 100644
index 000..13175ae
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
@@ -0,0 +1,1265 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.utils;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for FederationApplicationInputValidator,
+ * FederationMembershipInputValidator, and FederationPolicyInputValidator.
+ */
+public class TestFederationStateStoreInputValidator {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestFederationStateStoreInputValidator.class);
+
+  private static SubClusterId subClusterId;
+  private static String amRMServiceAddress;
+  private static String clientRMServiceAddress;
+  private static String rmAdminServiceAddress;
+  private static String rmWebServiceAddress;
+  private static int lastHeartBeat;
+  private static SubClusterState stateNew;
+  private static SubClusterState stateLost;
+  private static ApplicationId appId;
+  private static int lastStartTime;
+  private static String capability;
+  private static String queue;
+  private static String type;
+  private static ByteBuffer params;
+
+  private static SubClusterId subClusterIdInvalid;
+  private static SubClusterId subClusterIdNull;
+
+  private static int lastHeartBeatNegative;
+  private static int lastStartTimeNegative;
+
+  private static SubClusterState stateNull;
+  private static ApplicationId appIdNull;
+
+  private static String capabilityNull;
+  private static String capabilityEmpty;
+
+  private static String addressNull;
+  private static String addressEmpty;
+  private static String addressWrong;
+  private static String addressWrongPort;
+
+  private static String queueEmpty;
+  private static String 

[22/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-04-03 Thread subru
YARN-3662. Federation Membership State Store internal APIs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3587929d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3587929d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3587929d

Branch: refs/heads/YARN-2915
Commit: 3587929d9ea7179cd2b6322a69a2c54a3a652437
Parents: 704b36e
Author: Subru Krishnan 
Authored: Fri Jul 29 16:53:40 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop-yarn-server-common/pom.xml   |   8 +
 .../store/FederationMembershipStateStore.java   | 126 +
 .../server/federation/store/package-info.java   |  17 ++
 .../store/records/GetSubClusterInfoRequest.java |  62 +
 .../records/GetSubClusterInfoResponse.java  |  62 +
 .../records/GetSubClustersInfoRequest.java  |  66 +
 .../records/GetSubClustersInfoResponse.java |  66 +
 .../records/SubClusterDeregisterRequest.java|  89 +++
 .../records/SubClusterDeregisterResponse.java   |  42 +++
 .../records/SubClusterHeartbeatRequest.java | 149 +++
 .../records/SubClusterHeartbeatResponse.java|  45 
 .../federation/store/records/SubClusterId.java  | 100 +++
 .../store/records/SubClusterInfo.java   | 263 ++
 .../records/SubClusterRegisterRequest.java  |  74 +
 .../records/SubClusterRegisterResponse.java |  44 +++
 .../store/records/SubClusterState.java  |  60 +
 .../impl/pb/GetSubClusterInfoRequestPBImpl.java | 125 +
 .../pb/GetSubClusterInfoResponsePBImpl.java | 134 ++
 .../pb/GetSubClustersInfoRequestPBImpl.java | 108 
 .../pb/GetSubClustersInfoResponsePBImpl.java| 184 +
 .../pb/SubClusterDeregisterRequestPBImpl.java   | 156 +++
 .../pb/SubClusterDeregisterResponsePBImpl.java  |  77 ++
 .../pb/SubClusterHeartbeatRequestPBImpl.java| 192 +
 .../pb/SubClusterHeartbeatResponsePBImpl.java   |  77 ++
 .../records/impl/pb/SubClusterIdPBImpl.java |  75 ++
 .../records/impl/pb/SubClusterInfoPBImpl.java   | 267 +++
 .../pb/SubClusterRegisterRequestPBImpl.java | 134 ++
 .../pb/SubClusterRegisterResponsePBImpl.java|  77 ++
 .../store/records/impl/pb/package-info.java |  17 ++
 .../federation/store/records/package-info.java  |  17 ++
 .../proto/yarn_server_federation_protos.proto   |  93 +++
 .../records/TestFederationProtocolRecords.java  | 133 +
 32 files changed, 3139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3587929d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index fc23af8..9cc3cae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -58,6 +58,13 @@
   org.apache.hadoop
   hadoop-yarn-common
 
+
+
+  org.apache.hadoop
+  hadoop-yarn-common
+  test-jar
+  test
+
 
 
   com.google.guava
@@ -146,6 +153,7 @@
   yarn_server_common_protos.proto
   yarn_server_common_service_protos.proto
   yarn_server_common_service_protos.proto
+  yarn_server_federation_protos.proto
   ResourceTracker.proto
   SCMUploader.proto
   collectornodemanager_protocol.proto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3587929d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
new file mode 100644
index 000..378eadc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or 

[45/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5324. Stateless Federation router policies implementation. (Carlo Curino 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d8a5eac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d8a5eac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d8a5eac

Branch: refs/heads/YARN-2915
Commit: 4d8a5eacee9ce7b09a52aaf7a77fbe2963c5081d
Parents: b3e8527
Author: Subru Krishnan 
Authored: Thu Sep 22 17:06:57 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../policies/FederationPolicyConfigurator.java  |  91 ---
 .../FederationPolicyInitializationContext.java  |  11 +-
 .../policies/FederationPolicyManager.java   | 126 +
 .../policies/FederationPolicyWriter.java|  45 
 .../policies/dao/WeightedPolicyInfo.java| 253 +++
 .../federation/policies/dao/package-info.java   |  20 ++
 .../router/BaseWeightedRouterPolicy.java| 150 +++
 .../policies/router/LoadBasedRouterPolicy.java  | 109 
 .../policies/router/PriorityRouterPolicy.java   |  66 +
 .../router/UniformRandomRouterPolicy.java   |  85 +++
 .../router/WeightedRandomRouterPolicy.java  |  79 ++
 .../store/records/SubClusterIdInfo.java |  75 ++
 .../policies/BaseFederationPoliciesTest.java| 155 
 ...ionPolicyInitializationContextValidator.java |  17 +-
 .../router/TestLoadBasedRouterPolicy.java   | 109 
 .../router/TestPriorityRouterPolicy.java|  87 +++
 .../router/TestUniformRandomRouterPolicy.java   |  65 +
 .../router/TestWeightedRandomRouterPolicy.java  | 127 ++
 .../utils/FederationPoliciesTestUtil.java   |  82 +-
 19 files changed, 1604 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8a5eac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
deleted file mode 100644
index fdc3857..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
-
-
-import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
-
-import org.apache.hadoop.yarn.server.federation.policies.router
-.FederationRouterPolicy;
-
-/**
- * Implementors of this interface are capable to instantiate and (re)initalize
- * {@link FederationAMRMProxyPolicy} and {@link FederationRouterPolicy} based 
on
- * a {@link FederationPolicyInitializationContext}. The reason to bind these 
two
- * policies together is to make sure we remain consistent across the router and
- * amrmproxy policy decisions.
- */
-public interface FederationPolicyConfigurator {
-
-  /**
-   * If the current instance is compatible, this method returns the same
-   * instance of {@link FederationAMRMProxyPolicy} reinitialized with the
-   * current context, otherwise a new instance initialized with the current
-   * context is provided. If the instance is compatible with the current class
-   * the implementors should attempt to reinitalize (retaining state). To 
affect
-   * a complete policy reset 

[01/50] [abbrv] hadoop git commit: HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun Zhang, Wei-Chiu Chuang, Xiao Chen, Rosie Li. [Forced Update!]

2017-04-03 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 39d352caf -> 6e34518b9 (forced update)


HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun 
Zhang, Wei-Chiu Chuang, Xiao Chen, Rosie Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf3fb585
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf3fb585
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf3fb585

Branch: refs/heads/YARN-2915
Commit: bf3fb585aaf2b179836e139c041fc87920a3c886
Parents: 144f1cf
Author: Yongjun Zhang 
Authored: Thu Mar 30 17:01:15 2017 -0700
Committer: Yongjun Zhang 
Committed: Thu Mar 30 17:38:56 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  22 +-
 .../org/apache/hadoop/tools/CopyListing.java|  37 +-
 .../hadoop/tools/CopyListingFileStatus.java |  87 -
 .../java/org/apache/hadoop/tools/DistCp.java|  52 +++
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  10 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  22 +-
 .../org/apache/hadoop/tools/OptionsParser.java  |  36 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  83 +++--
 .../hadoop/tools/mapred/CopyCommitter.java  | 174 -
 .../apache/hadoop/tools/mapred/CopyMapper.java  |  40 +-
 .../tools/mapred/RetriableFileCopyCommand.java  |  26 +-
 .../tools/mapred/UniformSizeInputFormat.java|   5 +-
 .../apache/hadoop/tools/util/DistCpUtils.java   | 111 +-
 .../src/site/markdown/DistCp.md.vm  |   1 +
 .../apache/hadoop/tools/TestDistCpSystem.java   | 368 +--
 .../apache/hadoop/tools/TestOptionsParser.java  |   2 +-
 .../hadoop/tools/mapred/TestCopyCommitter.java  |   5 +-
 17 files changed, 971 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3fb585/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1329195..9b782f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -862,7 +862,27 @@ public class DFSTestUtil {
   out.write(toAppend);
 }
   }
-  
+
+  /**
+   * Append specified length of bytes to a given file, starting with new block.
+   * @param fs The file system
+   * @param p Path of the file to append
+   * @param length Length of bytes to append to the file
+   * @throws IOException
+   */
+  public static void appendFileNewBlock(DistributedFileSystem fs,
+  Path p, int length) throws IOException {
+assert fs.exists(p);
+assert length >= 0;
+byte[] toAppend = new byte[length];
+Random random = new Random();
+random.nextBytes(toAppend);
+try (FSDataOutputStream out = fs.append(p,
+EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)) {
+  out.write(toAppend);
+}
+  }
+
   /**
* @return url content as string (UTF-8 encoding assumed)
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3fb585/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index 481aa61..9ebf9d2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -145,12 +145,22 @@ public abstract class CopyListing extends Configured {
 Configuration config = getConf();
 FileSystem fs = pathToListFile.getFileSystem(config);
 
-Path sortedList = DistCpUtils.sortListing(fs, config, pathToListFile);
+final boolean splitLargeFile = options.splitLargeFile();
+
+// When splitLargeFile is enabled, we don't randomize the copylist
+// earlier, so we don't do the sorting here. For a file that has
+// multiple entries due to split, we check here that their
+//  is continuous.
+//
+Path checkPath = splitLargeFile?
+pathToListFile : DistCpUtils.sortListing(fs, config, pathToListFile);
 
 SequenceFile.Reader reader = new SequenceFile.Reader(
-  config, SequenceFile.Reader.file(sortedList));
+  config, SequenceFile.Reader.file(checkPath));
 try {
   Text 

[21/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3587929d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
new file mode 100644
index 000..d4c5451
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProtoOrBuilder;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol buffer based implementation of {@link SubClusterDeregisterRequest}.
+ */
+@Private
+@Unstable
+public class SubClusterDeregisterRequestPBImpl
+extends SubClusterDeregisterRequest {
+
+  private SubClusterDeregisterRequestProto proto =
+  SubClusterDeregisterRequestProto.getDefaultInstance();
+  private SubClusterDeregisterRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  public SubClusterDeregisterRequestPBImpl() {
+builder = SubClusterDeregisterRequestProto.newBuilder();
+  }
+
+  public SubClusterDeregisterRequestPBImpl(
+  SubClusterDeregisterRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public SubClusterDeregisterRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void mergeLocalToProto() {
+if (viaProto) {
+  maybeInitBuilder();
+}
+mergeLocalToBuilder();
+proto = builder.build();
+viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = SubClusterDeregisterRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+  }
+
+  @Override
+  public int hashCode() {
+return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (other == null) {
+  return false;
+}
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
+  }
+
+  @Override
+  public String toString() {
+return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public SubClusterId getSubClusterId() {
+SubClusterDeregisterRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasSubClusterId()) {
+  return null;
+}
+return convertFromProtoFormat(p.getSubClusterId());
+  }
+
+  @Override
+  public void setSubClusterId(SubClusterId subClusterId) {
+maybeInitBuilder();
+if (subClusterId == null) {
+  builder.clearSubClusterId();
+  return;
+}
+

[19/50] [abbrv] hadoop git commit: YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8737d15e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8737d15e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8737d15e

Branch: refs/heads/YARN-2915
Commit: 8737d15e3452160440e9125c3abe6475a96eab02
Parents: 1bb520e
Author: Subru Krishnan 
Authored: Tue Nov 22 18:37:30 2016 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../amrmproxy/RejectAMRMProxyPolicy.java| 67 +
 .../manager/RejectAllPolicyManager.java | 40 ++
 .../policies/router/RejectRouterPolicy.java | 66 +
 .../amrmproxy/TestRejectAMRMProxyPolicy.java| 78 
 .../manager/TestRejectAllPolicyManager.java | 40 ++
 .../policies/router/TestRejectRouterPolicy.java | 63 
 6 files changed, 354 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8737d15e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
new file mode 100644
index 000..3783df6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+/**
+ * An implementation of the {@link FederationAMRMProxyPolicy} that simply
+ * rejects all requests. Useful to prevent apps from accessing any sub-cluster.
+ */
+public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
+
+  private Set knownClusterIds = new HashSet<>();
+
+  @Override
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
+  throws FederationPolicyInitializationException {
+// overrides initialize to avoid weight checks that do no apply for
+// this policy.
+FederationPolicyInitializationContextValidator.validate(policyContext,
+this.getClass().getCanonicalName());
+setPolicyContext(policyContext);
+  }
+
+  @Override
+  public Map splitResourceRequests(
+  List resourceRequests) throws YarnException {
+throw new FederationPolicyException("The policy configured for this queue "
++ "rejects all routing requests by construction.");
+  }
+
+  @Override
+  public void notifyOfResponse(SubClusterId subClusterId,
+  AllocateResponse response) throws YarnException {
+// This might be invoked for applications started with 

[42/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a86d91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
index 4975a9f..5fa02d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.PriorityBroadcastPolicyManager;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a86d91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
deleted file mode 100644
index 542a5ae..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy;
-import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
-import org.junit.Before;
-
-/**
- * Simple test of {@link UniformBroadcastPolicyManager}.
- */
-public class TestUniformBroadcastPolicyManager extends BasePolicyManagerTest {
-
-  @Before
-  public void setup() {
-//config policy
-wfp = new UniformBroadcastPolicyManager();
-wfp.setQueue("queue1");
-
-//set expected params that the base test class will use for tests
-expectedPolicyManager = UniformBroadcastPolicyManager.class;
-expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class;
-expectedRouterPolicy = UniformRandomRouterPolicy.class;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a86d91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
 

[14/50] [abbrv] hadoop git commit: HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.

2017-04-03 Thread subru
HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbd68478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbd68478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbd68478

Branch: refs/heads/YARN-2915
Commit: bbd68478d5743b3b2911bf3febed7daa89479e45
Parents: bc7aff7
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:57:28 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:57:28 2017 -0700

--
 .../apache/hadoop/fs/shell/SetReplication.java  | 17 ++--
 .../hadoop/hdfs/TestSetrepIncreasing.java   | 44 
 2 files changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd68478/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
index 2231c58..16e6e92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
@@ -85,11 +85,20 @@ class SetReplication extends FsCommand {
 }
 
 if (item.stat.isFile()) {
-  if (!item.fs.setReplication(item.path, newRep)) {
-throw new IOException("Could not set replication for: " + item);
+  // Do the checking if the file is erasure coded since
+  // replication factor for an EC file is meaningless.
+  if (!item.stat.isErasureCoded()) {
+if (!item.fs.setReplication(item.path, newRep)) {
+  throw new IOException("Could not set replication for: " + item);
+}
+out.println("Replication " + newRep + " set: " + item);
+if (waitOpt) {
+  waitList.add(item);
+}
+  } else {
+out.println("Did not set replication for: " + item
++ ", because it's an erasure coded file.");
   }
-  out.println("Replication " + newRep + " set: " + item);
-  if (waitOpt) waitList.add(item);
 } 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd68478/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index fee30b5..50d7b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.junit.Test;
 
@@ -102,4 +105,45 @@ public class TestSetrepIncreasing {
   cluster.shutdown();
 }
  }
+
+  @Test
+  public void testSetRepOnECFile() throws Exception {
+ClientProtocol client;
+Configuration conf = new HdfsConfiguration();
+conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+StripedFileTestUtil.getDefaultECPolicy().getName());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+cluster.waitActive();
+client = NameNodeProxies.createProxy(conf,
+cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+client.setErasureCodingPolicy("/",
+StripedFileTestUtil.getDefaultECPolicy().getName());
+
+FileSystem dfs = cluster.getFileSystem();
+try {
+  Path d = new Path("/tmp");
+  dfs.mkdirs(d);
+  Path f = new Path(d, "foo");
+  dfs.createNewFile(f);
+  FileStatus file = dfs.getFileStatus(f);
+  assertTrue(file.isErasureCoded());
+
+  ByteArrayOutputStream out = new ByteArrayOutputStream();
+  System.setOut(new PrintStream(out));
+  String[] 

[37/50] [abbrv] hadoop git commit: YARN-5406. In-memory based implementation of the FederationMembershipStateStore. Contributed by Ellen Hui.

2017-04-03 Thread subru
YARN-5406. In-memory based implementation of the 
FederationMembershipStateStore. Contributed by Ellen Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3a55ba6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3a55ba6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3a55ba6

Branch: refs/heads/YARN-2915
Commit: f3a55ba655a1c4e13a0ced81b97194397d96c238
Parents: 3587929
Author: Subru Krishnan 
Authored: Thu Aug 4 15:54:38 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../store/impl/MemoryFederationStateStore.java  | 138 
 .../federation/store/impl/package-info.java |  17 ++
 .../records/GetSubClustersInfoRequest.java  |   4 +
 .../store/records/SubClusterState.java  |   4 +
 .../impl/FederationStateStoreBaseTest.java  | 221 +++
 .../impl/TestMemoryFederationStateStore.java|  49 
 6 files changed, 433 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3a55ba6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
new file mode 100644
index 000..7fdc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.MonotonicClock;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * In-memory implementation of FederationMembershipStateStore.
+ */
+public class MemoryFederationStateStore
+implements FederationMembershipStateStore {
+
+  private final Map membership =
+  new ConcurrentHashMap();
+  private final MonotonicClock clock = new MonotonicClock();
+
+  @Override
+  public Version getMembershipStateStoreVersion() {
+return null;
+  }
+
+  @Override
+  public 

[11/50] [abbrv] hadoop git commit: YARN-6377. NMTimelinePublisher#serviceStop does not stop timeline clients (Haibo Chen via Varun Saxena)

2017-04-03 Thread subru
YARN-6377. NMTimelinePublisher#serviceStop does not stop timeline clients 
(Haibo Chen via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4b5aa84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4b5aa84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4b5aa84

Branch: refs/heads/YARN-2915
Commit: a4b5aa8493e0bd9006f44291d265c28ab86497e1
Parents: b883857
Author: Varun Saxena 
Authored: Sun Apr 2 04:54:12 2017 +0530
Committer: Varun Saxena 
Committed: Sun Apr 2 04:54:12 2017 +0530

--
 .../nodemanager/timelineservice/NMTimelinePublisher.java  |  8 
 .../timelineservice/TestNMTimelinePublisher.java  | 10 +-
 2 files changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b5aa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index ce2c656..8aaae79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -101,6 +101,14 @@ public class NMTimelinePublisher extends CompositeService {
 this.nodeId = context.getNodeId();
   }
 
+  @Override
+  protected void serviceStop() throws Exception {
+for(ApplicationId app : appToClientMap.keySet()) {
+  stopTimelineClient(app);
+}
+super.serviceStop();
+  }
+
   @VisibleForTesting
   Map getAppToClientMap() {
 return appToClientMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b5aa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
index e116122..0b8eaa9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -53,14 +54,21 @@ public class TestNMTimelinePublisher {
 final DummyTimelineClient timelineClient = new DummyTimelineClient(null);
 when(context.getNodeId()).thenReturn(NodeId.newInstance("localhost", 0));
 when(context.getHttpPort()).thenReturn(0);
+
+Configuration conf = new Configuration();
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+
 NMTimelinePublisher publisher = new NMTimelinePublisher(context) {
   public void createTimelineClient(ApplicationId appId) {
 if (!getAppToClientMap().containsKey(appId)) {
+  timelineClient.init(getConfig());
+  timelineClient.start();
   getAppToClientMap().put(appId, timelineClient);
 }
   }
 };
-

[34/50] [abbrv] hadoop git commit: YARN-6281. Cleanup when AMRMProxy fails to initialize a new interceptor chain. (Botong Huang via Subru)

2017-04-03 Thread subru
YARN-6281. Cleanup when AMRMProxy fails to initialize a new interceptor chain. 
(Botong Huang via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b942762
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b942762
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b942762

Branch: refs/heads/YARN-2915
Commit: 4b9427629bf700cb025e967ee5aa67ac7c8792fd
Parents: 3a840ae
Author: Subru Krishnan 
Authored: Fri Mar 10 18:13:29 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java | 25 ++--
 .../amrmproxy/BaseAMRMProxyTest.java| 21 -
 .../amrmproxy/TestAMRMProxyService.java | 31 
 3 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b942762/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 5e91a20..c17d8ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -297,11 +297,16 @@ public class AMRMProxyService extends AbstractService 
implements
 + " ApplicationId:" + applicationAttemptId + " for the user: "
 + user);
 
-RequestInterceptor interceptorChain =
-this.createRequestInterceptorChain();
-interceptorChain.init(createApplicationMasterContext(
-applicationAttemptId, user, amrmToken, localToken));
-chainWrapper.init(interceptorChain, applicationAttemptId);
+try {
+  RequestInterceptor interceptorChain =
+  this.createRequestInterceptorChain();
+  interceptorChain.init(createApplicationMasterContext(this.nmContext,
+  applicationAttemptId, user, amrmToken, localToken));
+  chainWrapper.init(interceptorChain, applicationAttemptId);
+} catch (Exception e) {
+  this.applPipelineMap.remove(applicationAttemptId.getApplicationId());
+  throw e;
+}
   }
 
   /**
@@ -317,8 +322,10 @@ public class AMRMProxyService extends AbstractService 
implements
 this.applPipelineMap.remove(applicationId);
 
 if (pipeline == null) {
-  LOG.info("Request to stop an application that does not exist. Id:"
-  + applicationId);
+  LOG.info(
+  "No interceptor pipeline for application {},"
+  + " likely because its AM is not run in this node.",
+  applicationId);
 } else {
   LOG.info("Stopping the request processing pipeline for application: "
   + applicationId);
@@ -387,11 +394,11 @@ public class AMRMProxyService extends AbstractService 
implements
   }
 
   private AMRMProxyApplicationContext createApplicationMasterContext(
-  ApplicationAttemptId applicationAttemptId, String user,
+  Context context, ApplicationAttemptId applicationAttemptId, String user,
   Token amrmToken,
   Token localToken) {
 AMRMProxyApplicationContextImpl appContext =
-new AMRMProxyApplicationContextImpl(this.nmContext, getConfig(),
+new AMRMProxyApplicationContextImpl(context, getConfig(),
 applicationAttemptId, user, amrmToken, localToken);
 return appContext;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b942762/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 7f96947..6f5009e 100644
--- 

[48/50] [abbrv] hadoop git commit: YARN-5408. Compose Federation membership/application/policy APIs into an uber FederationStateStore API. (Ellen Hui via Subru).

2017-04-03 Thread subru
YARN-5408. Compose Federation membership/application/policy APIs into an uber 
FederationStateStore API. (Ellen Hui via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d046d9ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d046d9ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d046d9ea

Branch: refs/heads/YARN-2915
Commit: d046d9ea9403902ad3d91d5c07f64db78e5ddbb4
Parents: ed1868a
Author: Subru Krishnan 
Authored: Mon Aug 8 14:53:38 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 ...ederationApplicationHomeSubClusterStore.java | 18 ++
 .../store/FederationMembershipStateStore.java   | 14 +
 .../federation/store/FederationStateStore.java  | 64 
 .../store/impl/MemoryFederationStateStore.java  | 19 --
 .../impl/FederationStateStoreBaseTest.java  | 57 +
 .../impl/TestMemoryFederationStateStore.java| 21 +--
 6 files changed, 99 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d046d9ea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 217ee2e..22bb88a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -30,7 +30,6 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHom
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
-import org.apache.hadoop.yarn.server.records.Version;
 
 /**
  * FederationApplicationHomeSubClusterStore maintains the state of all
@@ -50,15 +49,6 @@ import org.apache.hadoop.yarn.server.records.Version;
 public interface FederationApplicationHomeSubClusterStore {
 
   /**
-   * Get the {@link Version} of the underlying federation application state
-   * store.
-   *
-   * @return the {@link Version} of the underlying federation application state
-   * store
-   */
-  Version getApplicationStateStoreVersion();
-
-  /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
* successful, if not an exception reporting reason for a failure.
@@ -91,16 +81,16 @@ public interface FederationApplicationHomeSubClusterStore {
* {@code ApplicationId}.
*
* @param request contains the application queried
-   * @return {@code ApplicationHomeSubCluster} containing the application's
-   * home subcluster
+   * @return {@code ApplicationHomeSubCluster} containing the application's 
home
+   * subcluster
* @throws YarnException if the request is invalid/fails
*/
   GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
-   * Get the {@code ApplicationHomeSubCluster} list representing the mapping
-   * of all submitted applications to it's home sub-cluster.
+   * Get the {@code ApplicationHomeSubCluster} list representing the mapping of
+   * all submitted applications to it's home sub-cluster.
*
* @param request empty representing all applications
* @return the mapping of all submitted application to it's home sub-cluster

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d046d9ea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
--
diff --git 

[04/50] [abbrv] hadoop git commit: YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. Contributed by Yufei Gu

2017-04-03 Thread subru
YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. 
Contributed by Yufei Gu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d1fac5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d1fac5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d1fac5d

Branch: refs/heads/YARN-2915
Commit: 4d1fac5df2508011adfc4c5d683beb00fd5ced45
Parents: 28cdc5a
Author: Jason Lowe 
Authored: Fri Mar 31 10:05:34 2017 -0500
Committer: Jason Lowe 
Committed: Fri Mar 31 10:05:34 2017 -0500

--
 .../v2/app/rm/TestRMContainerAllocator.java | 453 +--
 .../api/impl/TestAMRMClientOnRMRestart.java |  59 +--
 .../server/resourcemanager/ACLsTestBase.java|  10 -
 .../server/resourcemanager/RMHATestBase.java|  20 +-
 .../ReservationACLsTestBase.java|   5 +-
 .../resourcemanager/TestApplicationCleanup.java |  44 +-
 .../TestApplicationMasterLauncher.java  |  11 +-
 .../TestApplicationMasterService.java   |  19 +-
 .../TestNodeBlacklistingOnAMFailures.java   |  41 +-
 .../TestReservationSystemWithRMHA.java  |   5 +-
 .../TestAMRMRPCNodeUpdates.java |  18 +-
 .../resourcetracker/TestNMReconnect.java|  14 +-
 .../rmcontainer/TestRMContainerImpl.java|   1 -
 .../capacity/TestApplicationPriority.java   |  29 +-
 .../security/TestClientToAMTokens.java  |  23 +-
 15 files changed, 277 insertions(+), 475 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d1fac5d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index e6aee6e..933bd01 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -179,21 +179,19 @@ public class TestRMContainerAllocator {
 Configuration conf = new Configuration();
 MyResourceManager rm = new MyResourceManager(conf);
 rm.start();
-DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
-.getDispatcher();
 
 // Submit the application
 RMApp app = rm.submitApp(1024);
-dispatcher.await();
+rm.drainEvents();
 
 MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
 amNodeManager.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
 .getAppAttemptId();
 rm.sendAMLaunched(appAttemptId);
-dispatcher.await();
+rm.drainEvents();
 
 JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
 Job mockJob = mock(Job.class);
@@ -207,7 +205,7 @@ public class TestRMContainerAllocator {
 MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
 MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
 MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
-dispatcher.await();
+rm.drainEvents();
 
 // create the container request
 ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
@@ -222,7 +220,7 @@ public class TestRMContainerAllocator {
 // this tells the scheduler about the requests
 // as nodes are not added, no allocations
 List assigned = allocator.schedule();
-dispatcher.await();
+rm.drainEvents();
 Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
 
@@ -234,7 +232,7 @@ public class TestRMContainerAllocator {
 // this tells the scheduler about the requests
 // as nodes are not added, no allocations
 assigned = allocator.schedule();
-dispatcher.await();
+rm.drainEvents();
 Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
 
@@ -242,18 +240,18 @@ public class TestRMContainerAllocator {
 nodeManager1.nodeHeartbeat(true); // Node heartbeat
 nodeManager2.nodeHeartbeat(true); // Node heartbeat
 nodeManager3.nodeHeartbeat(true); // Node heartbeat
-

[44/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8a5eac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
index 8c2115b..f901329 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
@@ -19,13 +19,20 @@ package org.apache.hadoop.yarn.server.federation.utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
-import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.*;
 
 import java.net.URL;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
@@ -41,6 +48,41 @@ public final class FederationPoliciesTestUtil {
 // disabled.
   }
 
+
+  public static void initializePolicyContext(
+  FederationPolicyInitializationContext fpc, ConfigurableFederationPolicy
+  policy, WeightedPolicyInfo policyInfo,
+  Map activeSubclusters)
+  throws YarnException {
+ByteBuffer buf = policyInfo.toByteBuffer();
+fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration
+.newInstance("queue1", policy.getClass().getCanonicalName(), buf));
+FederationStateStoreFacade facade = FederationStateStoreFacade
+.getInstance();
+FederationStateStore fss = mock(FederationStateStore.class);
+
+if (activeSubclusters == null) {
+  activeSubclusters = new HashMap();
+}
+GetSubClustersInfoResponse response = GetSubClustersInfoResponse
+.newInstance(new 
ArrayList(activeSubclusters.values()));
+
+when(fss.getSubClusters(any())).thenReturn(response);
+facade.reinitialize(fss, new Configuration());
+fpc.setFederationStateStoreFacade(facade);
+policy.reinitialize(fpc);
+  }
+
+  public static void initializePolicyContext(
+  ConfigurableFederationPolicy policy,
+  WeightedPolicyInfo policyInfo, Map activeSubclusters) throws YarnException {
+FederationPolicyInitializationContext context =
+new FederationPolicyInitializationContext(null, initResolver(),
+initFacade());
+initializePolicyContext(context, policy, policyInfo, activeSubclusters);
+  }
+
   /**
* Initialize a {@link SubClusterResolver}.
*
@@ -66,18 +108,52 @@ public final class FederationPoliciesTestUtil {
* Initialiaze a main-memory {@link FederationStateStoreFacade} used for
* testing, wiht a mock resolver.
*
+   * @param subClusterInfos the list of subclusters to be served on
+   *getSubClusters invocations.
+   *
* @return the facade.
*
* @throws YarnException in case the initialization is not successful.
*/
-  public static FederationStateStoreFacade initFacade() throws YarnException {
+
+  public static FederationStateStoreFacade initFacade(
+  List subClusterInfos, SubClusterPolicyConfiguration
+  policyConfiguration) throws YarnException {
 FederationStateStoreFacade goodFacade = FederationStateStoreFacade
 .getInstance();
 FederationStateStore fss = mock(FederationStateStore.class);
 GetSubClustersInfoResponse response = GetSubClustersInfoResponse
-.newInstance(new ArrayList<>());
+.newInstance(subClusterInfos);
 when(fss.getSubClusters(any())).thenReturn(response);
+
+List configurations = new ArrayList<>();
+

[02/50] [abbrv] hadoop git commit: HDFS-11551. Handle SlowDiskReport from DataNode at the NameNode. Contributed by Hanisha Koneru.

2017-04-03 Thread subru
HDFS-11551. Handle SlowDiskReport from DataNode at the NameNode. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28cdc5a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28cdc5a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28cdc5a8

Branch: refs/heads/YARN-2915
Commit: 28cdc5a8dc37ade1f45bda3aede589ee8593945e
Parents: bf3fb58
Author: Hanisha Koneru 
Authored: Thu Mar 30 22:41:26 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu Mar 30 22:41:26 2017 -0700

--
 .../hdfs/server/protocol/SlowDiskReports.java   |  28 +-
 .../server/blockmanagement/DatanodeManager.java |  34 +-
 .../server/blockmanagement/SlowDiskTracker.java | 291 
 .../datanode/metrics/DataNodeDiskMetrics.java   |  35 +-
 .../blockmanagement/TestSlowDiskTracker.java| 448 +++
 5 files changed, 812 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28cdc5a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index ef4d09e..8095c2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -48,7 +48,7 @@ public final class SlowDiskReports {
   private final Map> slowDisks;
 
   /**
-   * An object representing a SlowPeerReports with no entries. Should
+   * An object representing a SlowDiskReports with no entries. Should
* be used instead of null or creating new objects when there are
* no slow peers to report.
*/
@@ -119,8 +119,28 @@ public final class SlowDiskReports {
* Lists the types of operations on which disk latencies are measured.
*/
   public enum DiskOp {
-METADATA,
-READ,
-WRITE
+METADATA("MetadataOp"),
+READ("ReadIO"),
+WRITE("WriteIO");
+
+private final String value;
+
+DiskOp(final String v) {
+  this.value = v;
+}
+
+@Override
+public String toString() {
+  return value;
+}
+
+public static DiskOp fromValue(final String value) {
+  for (DiskOp as : DiskOp.values()) {
+if (as.value.equals(value)) {
+  return as;
+}
+  }
+  return null;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28cdc5a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index e22b7af..18135a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
@@ -180,9 +181,15 @@ public class DatanodeManager {
* True if we should process latency metrics from downstream peers.
*/
   private final boolean dataNodePeerStatsEnabled;
+  /**
+   *  True if we should process latency metrics from individual DN disks.
+   */
+  private final boolean dataNodeDiskStatsEnabled;
 
   @Nullable
   private final SlowPeerTracker slowPeerTracker;
+  @Nullable
+  private final SlowDiskTracker slowDiskTracker;
   
   /**
* The minimum time between resending caching directives to Datanodes,
@@ -208,9 +215,16 @@ public class DatanodeManager {
 this.dataNodePeerStatsEnabled = conf.getBoolean(
 DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
 

[20/50] [abbrv] hadoop git commit: YARN-5300. Exclude generated federation protobuf sources from YARN Javadoc/findbugs build

2017-04-03 Thread subru
YARN-5300. Exclude generated federation protobuf sources from YARN 
Javadoc/findbugs build


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/704b36e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/704b36e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/704b36e3

Branch: refs/heads/YARN-2915
Commit: 704b36e345dfeee8fb22d86ea6c9f9a5d53b63b8
Parents: 5faa949
Author: Subru Krishnan 
Authored: Tue Jul 19 15:08:25 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml | 3 +++
 hadoop-yarn-project/hadoop-yarn/pom.xml  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/704b36e3/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index c090749..2f5451d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -21,6 +21,9 @@
 
   
   
+
+  
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/704b36e3/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index c43588a..99b8b5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -75,7 +75,7 @@
 org.apache.maven.plugins
 maven-javadoc-plugin
 
-  
org.apache.hadoop.yarn.proto
+  
org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-5410. Bootstrap Router server module. (Giovanni Matteo Fumarola via Subru).

2017-04-03 Thread subru
YARN-5410. Bootstrap Router server module. (Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fcd3475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fcd3475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fcd3475

Branch: refs/heads/YARN-2915
Commit: 2fcd34751875b4eaffae1a7a8be1539b74ac86cf
Parents: f455a0f
Author: Subru Krishnan 
Authored: Fri Feb 24 12:08:53 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 hadoop-project/pom.xml  |  6 ++
 .../hadoop-yarn-server-router/pom.xml   | 62 
 .../hadoop/yarn/server/router/Router.java   | 38 
 .../hadoop/yarn/server/router/package-info.java | 20 +++
 .../hadoop/yarn/server/router/TestRouter.java   | 26 
 .../hadoop-yarn/hadoop-yarn-server/pom.xml  |  1 +
 hadoop-yarn-project/pom.xml |  4 ++
 7 files changed, 157 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fcd3475/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
old mode 100644
new mode 100755
index 8214caf..3c3e7cf
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -397,6 +397,12 @@
 
   
 org.apache.hadoop
+hadoop-yarn-server-router
+${project.version}
+  
+
+  
+org.apache.hadoop
  hadoop-mapreduce-client-jobclient
 ${project.version}
 test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fcd3475/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
new file mode 100644
index 000..25afa5c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -0,0 +1,62 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  
+hadoop-yarn-server
+org.apache.hadoop
+3.0.0-alpha3-SNAPSHOT
+  
+  4.0.0
+  org.apache.hadoop
+  hadoop-yarn-server-router
+  3.0.0-alpha3-SNAPSHOT
+  Apache Hadoop YARN Router
+
+  
+
+${project.parent.parent.basedir}
+  
+
+  
+
+  org.apache.hadoop
+  hadoop-yarn-api
+
+
+
+  org.apache.hadoop
+  hadoop-common
+
+
+
+  org.apache.hadoop
+  hadoop-yarn-common
+
+
+
+  org.apache.hadoop
+  hadoop-yarn-server-common
+
+  
+
+  
+
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fcd3475/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java
new file mode 100644
index 000..7be8a59
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router;
+
+/**
+ * The router is a stateless YARN component which is the entry point to the
+ * cluster. It can be deployed on multiple nodes behind a Virtual IP (VIP) with
+ * a LoadBalancer.

[26/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/baac9273
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/baac9273
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/baac9273

Branch: refs/heads/YARN-2915
Commit: baac927382757d78eee14a8b6812a387170e1f02
Parents: 4d8a5ea
Author: Subru Krishnan 
Authored: Thu Oct 13 17:59:13 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../AbstractConfigurableFederationPolicy.java   | 155 +
 .../policies/ConfigurableFederationPolicy.java  |   9 +-
 .../FederationPolicyInitializationContext.java  |  37 +-
 ...ionPolicyInitializationContextValidator.java |  28 +-
 .../policies/FederationPolicyManager.java   |  59 +-
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |  47 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |  85 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  25 +-
 .../LocalityMulticastAMRMProxyPolicy.java   | 583 +++
 .../policies/amrmproxy/package-info.java|   1 -
 .../policies/dao/WeightedPolicyInfo.java| 180 +++---
 .../federation/policies/dao/package-info.java   |   1 -
 .../policies/exceptions/package-info.java   |   1 -
 .../federation/policies/package-info.java   |   1 -
 .../policies/router/AbstractRouterPolicy.java   |  47 ++
 .../router/BaseWeightedRouterPolicy.java| 150 -
 .../policies/router/FederationRouterPolicy.java |   5 +-
 .../policies/router/LoadBasedRouterPolicy.java  |  36 +-
 .../policies/router/PriorityRouterPolicy.java   |  19 +-
 .../router/UniformRandomRouterPolicy.java   |  28 +-
 .../router/WeightedRandomRouterPolicy.java  |  32 +-
 .../policies/router/package-info.java   |   1 -
 .../resolver/AbstractSubClusterResolver.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|  28 +-
 ...ionPolicyInitializationContextValidator.java |  25 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java | 112 
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 566 ++
 .../router/TestLoadBasedRouterPolicy.java   |  18 +-
 .../router/TestPriorityRouterPolicy.java|  15 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  35 +-
 .../utils/FederationPoliciesTestUtil.java   |  64 ++
 .../src/test/resources/nodes|   6 +-
 32 files changed, 1950 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac9273/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
new file mode 100644
index 000..4cb9bbe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException;
+import 

[07/50] [abbrv] hadoop git commit: HDFS-11560. Expose slow disks via NameNode JMX. Contributed by Hanisha Koneru.

2017-04-03 Thread subru
HDFS-11560. Expose slow disks via NameNode JMX. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73835c73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73835c73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73835c73

Branch: refs/heads/YARN-2915
Commit: 73835c73e2d34b3854a71dd29d88c8303d698ac8
Parents: 5485d93
Author: Hanisha Koneru 
Authored: Fri Mar 31 13:50:29 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 31 13:50:29 2017 -0700

--
 .../server/blockmanagement/DatanodeManager.java |  9 +++
 .../server/blockmanagement/SlowDiskTracker.java |  3 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |  6 ++
 .../server/namenode/NameNodeStatusMXBean.java   |  8 +++
 .../blockmanagement/TestSlowDiskTracker.java| 13 +
 .../namenode/TestNameNodeStatusMXBean.java  | 59 +++-
 6 files changed, 85 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73835c73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 18135a8..c7bdca9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1907,5 +1907,14 @@ public class DatanodeManager {
   public SlowDiskTracker getSlowDiskTracker() {
 return slowDiskTracker;
   }
+  /**
+   * Retrieve information about slow disks as a JSON.
+   * Returns null if we are not tracking slow disks.
+   * @return
+   */
+  public String getSlowDisksReport() {
+return slowDiskTracker != null ?
+slowDiskTracker.getSlowDiskReportAsJsonString() : null;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73835c73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
index 25920a2..52fce5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
@@ -256,6 +256,9 @@ public class SlowDiskTracker {
   public String getSlowDiskReportAsJsonString() {
 ObjectMapper objectMapper = new ObjectMapper();
 try {
+  if (slowDisksReport.isEmpty()) {
+return null;
+  }
   return objectMapper.writeValueAsString(slowDisksReport);
 } catch (JsonProcessingException e) {
   // Failed to serialize. Don't log the exception call stack.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73835c73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index e7841f0..32d268a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1826,6 +1826,12 @@ public class NameNode extends ReconfigurableBase 
implements
 .getSlowPeersReport();
   }
 
+  @Override //NameNodeStatusMXBean
+  public String getSlowDisksReport() {
+return namesystem.getBlockManager().getDatanodeManager()
+.getSlowDisksReport();
+  }
+
   /**
* Shutdown the NN immediately in an ungraceful way. Used when it would be
* unsafe for the NN to continue operating, e.g. during a failed HA state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73835c73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeStatusMXBean.java
--
diff --git 

[06/50] [abbrv] hadoop git commit: HDFS-11603. Improve slow mirror/disk warnings in BlockReceiver.

2017-04-03 Thread subru
HDFS-11603. Improve slow mirror/disk warnings in BlockReceiver.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5485d93b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5485d93b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5485d93b

Branch: refs/heads/YARN-2915
Commit: 5485d93bda3329a7c80767c3723cc6e1a9233dbc
Parents: 318bfb0
Author: Arpit Agarwal 
Authored: Fri Mar 31 12:10:20 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Mar 31 12:10:27 2017 -0700

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  |  1 +
 .../hdfs/server/datanode/BlockReceiver.java | 61 +++-
 2 files changed, 47 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5485d93b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e1698c9..0a8c915 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -55,6 +55,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
+  public static final DatanodeInfo[] EMPTY_ARRAY = {};
 
   // Datanode administrative states
   public enum AdminStates {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5485d93b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index bb6bd55..00109e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -100,6 +100,7 @@ class BlockReceiver implements Closeable {
   private DataTransferThrottler throttler;
   private ReplicaOutputStreams streams;
   private DatanodeInfo srcDataNode = null;
+  private DatanodeInfo[] downstreamDNs = DatanodeInfo.EMPTY_ARRAY;
   private final DataNode datanode;
   volatile private boolean mirrorError;
 
@@ -424,10 +425,10 @@ class BlockReceiver implements Closeable {
   }
 }
 long duration = Time.monotonicNow() - begin;
-if (duration > datanodeSlowLogThresholdMs) {
+if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
   LOG.warn("Slow flushOrSync took " + duration + "ms (threshold="
   + datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", 
flushTotalNanos="
-  + flushTotalNanos + "ns");
+  + flushTotalNanos + "ns, volume=" + getVolumeBaseUri());
 }
   }
 
@@ -578,9 +579,10 @@ class BlockReceiver implements Closeable {
 mirrorAddr,
 duration);
 trackSendPacketToLastNodeInPipeline(duration);
-if (duration > datanodeSlowLogThresholdMs) {
+if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
   LOG.warn("Slow BlockReceiver write packet to mirror took " + duration
-  + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
+  + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), "
+  + "downstream DNs=" + Arrays.toString(downstreamDNs));
 }
   } catch (IOException e) {
 handleMirrorOutError(e);
@@ -711,9 +713,10 @@ class BlockReceiver implements Closeable {
   streams.writeDataToDisk(dataBuf.array(),
   startByteToDisk, numBytesToDisk);
   long duration = Time.monotonicNow() - begin;
-  if (duration > datanodeSlowLogThresholdMs) {
+  if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
 LOG.warn("Slow BlockReceiver write data to disk cost:" + duration
-+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
++ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), "
++ "volume=" + getVolumeBaseUri());
   }
 
   if (duration > maxWriteToDiskMs) {
@@ -902,9 +905,10 @@ class BlockReceiver implements Closeable {
 }
   

[16/50] [abbrv] hadoop git commit: YARN-6370. Properly handle rack requests for non-active subclusters in LocalityMulticastAMRMProxyPolicy. (Contributed by Botong Huang via curino).

2017-04-03 Thread subru
YARN-6370. Properly handle rack requests for non-active subclusters in 
LocalityMulticastAMRMProxyPolicy. (Contributed by Botong Huang via curino).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e34518b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e34518b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e34518b

Branch: refs/heads/YARN-2915
Commit: 6e34518b9f8ee1a5311945399229ad57b838aa93
Parents: 4b94276
Author: Carlo Curino 
Authored: Wed Mar 22 13:53:47 2017 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   |  6 ++-
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 53 +---
 2 files changed, 41 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e34518b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 6f97a51..454962f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -261,7 +261,11 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
   // If home-subcluster is not active, ignore node/rack request
   if (bookkeeper.isActiveAndEnabled(homeSubcluster)) {
-bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
+if (targetIds != null && targetIds.size() > 0) {
+  bookkeeper.addRackRR(homeSubcluster, rr);
+} else {
+  bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
+}
   } else {
 if (LOG.isDebugEnabled()) {
   LOG.debug("The homeSubCluster (" + homeSubcluster + ") we are "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e34518b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
index 5b3cf74..6e3a2f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
@@ -339,19 +339,20 @@ public class TestLocalityMulticastAMRMProxyPolicy
 validateSplit(response, resourceRequests);
 prettyPrintRequests(response);
 
-// we expect 4 entry for home subcluster (3 for request-id 4, and a part
-// of the broadcast of request-id 2
-checkExpectedAllocation(response, getHomeSubCluster().getId(), 4, 23);
+// we expect 7 entries for home subcluster (2 for request-id 4, 3 for
+// request-id 5, and a part of the broadcast of request-id 2
+checkExpectedAllocation(response, getHomeSubCluster().getId(), 7, 29);
 
-// for subcluster0 we expect 3 entry from request-id 0, and 3 from
-// request-id 3, as well as part of the request-id 2 broadast
-checkExpectedAllocation(response, "subcluster0", 7, 26);
+// for subcluster0 we expect 10 entries, 3 from request-id 0, and 3 from
+// request-id 3, 3 entries from request-id 5, as well as part of the
+// request-id 2 broadast
+checkExpectedAllocation(response, "subcluster0", 10, 32);
 
-// we expect 5 entry for subcluster1 (4 from 

[33/50] [abbrv] hadoop git commit: YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru Krishnan

2017-04-03 Thread subru
YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/683b08e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/683b08e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/683b08e0

Branch: refs/heads/YARN-2915
Commit: 683b08e0671cfec66375730b9e24132fa2a545a7
Parents: 2c62eb3
Author: Jian He 
Authored: Mon Aug 22 14:43:07 2016 +0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../org/apache/hadoop/yarn/conf/HAUtil.java |  30 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../TestFederationRMFailoverProxyProvider.java  | 154 ++
 .../hadoop/yarn/client/ClientRMProxy.java   |   4 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java  |  23 +-
 .../src/main/resources/yarn-default.xml |   7 +
 .../hadoop-yarn-server-common/pom.xml   |   2 -
 .../hadoop/yarn/server/api/ServerRMProxy.java   |   4 +-
 .../failover/FederationProxyProviderUtil.java   | 163 ++
 .../FederationRMFailoverProxyProvider.java  | 211 +++
 .../federation/failover/package-info.java   |  17 ++
 12 files changed, 613 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/683b08e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
index 133b377..528b642 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.yarn.conf;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -27,8 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import java.net.InetSocketAddress;
-import java.util.Collection;
+import com.google.common.annotations.VisibleForTesting;
 
 @InterfaceAudience.Private
 public class HAUtil {
@@ -45,6 +46,29 @@ public class HAUtil {
   }
 
   /**
+   * Returns true if Federation is configured.
+   *
+   * @param conf Configuration
+   * @return true if federation is configured in the configuration; else false.
+   */
+  public static boolean isFederationEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+  }
+
+  /**
+   * Returns true if RM failover is enabled in a Federation setting.
+   *
+   * @param conf Configuration
+   * @return if RM failover is enabled in conjunction with Federation in the
+   * configuration; else false.
+   */
+  public static boolean isFederationFailoverEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_FAILOVER_ENABLED);
+  }
+
+  /**
* Returns true if Resource Manager HA is configured.
*
* @param conf Configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/683b08e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4db637e..570ec4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2534,6 +2534,16 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
 
+  public static final String FEDERATION_ENABLED = FEDERATION_PREFIX + 
"enabled";
+  public static final boolean DEFAULT_FEDERATION_ENABLED = false;
+
+  public 

[27/50] [abbrv] hadoop git commit: YARN-6093. Minor bugs with AMRMtoken renewal and state store availability when using FederationRMFailoverProxyProvider during RM failover. (Botong Huang via Subru).

2017-04-03 Thread subru
YARN-6093. Minor bugs with AMRMtoken renewal and state store availability when 
using FederationRMFailoverProxyProvider during RM failover. (Botong Huang via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f455a0f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f455a0f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f455a0f3

Branch: refs/heads/YARN-2915
Commit: f455a0f3e4bf507b85ffb023399e8d35216383e9
Parents: 8737d15
Author: Subru Krishnan 
Authored: Wed Feb 22 13:16:22 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../TestFederationRMFailoverProxyProvider.java  | 69 +++
 .../FederationRMFailoverProxyProvider.java  | 88 +++-
 2 files changed, 118 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f455a0f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
index fa3523c..e3f9155 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java
@@ -19,17 +19,21 @@ package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import 
org.apache.hadoop.yarn.server.federation.failover.FederationProxyProviderUtil;
+import 
org.apache.hadoop.yarn.server.federation.failover.FederationRMFailoverProxyProvider;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
@@ -44,6 +48,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 /**
  * Unit tests for FederationRMFailoverProxyProvider.
  */
@@ -151,4 +159,65 @@ public class TestFederationRMFailoverProxyProvider {
 }
   }
 
+  @SuppressWarnings({ "rawtypes", "unchecked" })
+  @Test
+  public void testUGIForProxyCreation()
+  throws IOException, InterruptedException {
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+
+UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+UserGroupInformation user1 =
+UserGroupInformation.createProxyUser("user1", currentUser);
+UserGroupInformation user2 =
+UserGroupInformation.createProxyUser("user2", currentUser);
+
+final TestableFederationRMFailoverProxyProvider provider =
+new TestableFederationRMFailoverProxyProvider();
+
+InetSocketAddress addr =
+conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,
+YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
+YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+final ClientRMProxy rmProxy = mock(ClientRMProxy.class);
+when(rmProxy.getRMAddress(any(YarnConfiguration.class), any(Class.class)))
+.thenReturn(addr);
+
+user1.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Object run() {
+provider.init(conf, rmProxy, ApplicationMasterProtocol.class);
+return null;
+  }
+});
+
+final ProxyInfo currentProxy = provider.getProxy();
+Assert.assertEquals("user1", provider.getLastProxyUGI().getUserName());
+
+user2.doAs(new PrivilegedExceptionAction() {
+  @Override
+  

[39/50] [abbrv] hadoop git commit: YARN-5307. Federation Application State Store internal APIs

2017-04-03 Thread subru
YARN-5307. Federation Application State Store internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9bdcc8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9bdcc8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9bdcc8b

Branch: refs/heads/YARN-2915
Commit: a9bdcc8b93c6cf3a7312efbac6bf9d4109656796
Parents: caf8814
Author: Subru Krishnan 
Authored: Fri Aug 5 11:52:44 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 ...ederationApplicationHomeSubClusterStore.java | 126 
 .../AddApplicationHomeSubClusterRequest.java|  72 +++
 .../AddApplicationHomeSubClusterResponse.java   |  44 +
 .../records/ApplicationHomeSubCluster.java  | 124 
 .../DeleteApplicationHomeSubClusterRequest.java |  65 +++
 ...DeleteApplicationHomeSubClusterResponse.java |  43 +
 .../GetApplicationHomeSubClusterRequest.java|  64 +++
 .../GetApplicationHomeSubClusterResponse.java   |  73 +++
 .../GetApplicationsHomeSubClusterRequest.java   |  40 
 .../GetApplicationsHomeSubClusterResponse.java  |  75 
 .../UpdateApplicationHomeSubClusterRequest.java |  74 
 ...UpdateApplicationHomeSubClusterResponse.java |  43 +
 ...dApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../pb/ApplicationHomeSubClusterPBImpl.java | 167 
 ...eApplicationHomeSubClusterRequestPBImpl.java | 130 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 ...tApplicationHomeSubClusterRequestPBImpl.java | 135 +
 ...ApplicationHomeSubClusterResponsePBImpl.java | 132 +
 ...ApplicationsHomeSubClusterRequestPBImpl.java |  78 
 ...pplicationsHomeSubClusterResponsePBImpl.java | 190 +++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   6 +-
 ...eApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../proto/yarn_server_federation_protos.proto   |  45 -
 .../records/TestFederationProtocolRecords.java  |  81 
 26 files changed, 2301 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9bdcc8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
new file mode 100644
index 000..217ee2e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 

[41/50] [abbrv] hadoop git commit: YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router Failover. (Ellen Hui via Subru)

2017-04-03 Thread subru
YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router 
Failover. (Ellen Hui via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39544851
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39544851
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39544851

Branch: refs/heads/YARN-2915
Commit: 39544851ee8f1098c2235f65abb19bdd8089d15b
Parents: 49a6c58
Author: Subru Krishnan 
Authored: Mon Aug 15 14:47:02 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 ...ederationApplicationHomeSubClusterStore.java | 21 +++---
 .../store/impl/MemoryFederationStateStore.java  | 22 +++---
 .../AddApplicationHomeSubClusterResponse.java   | 29 ++--
 ...ApplicationHomeSubClusterResponsePBImpl.java | 39 +++
 .../proto/yarn_server_federation_protos.proto   |  1 +
 .../impl/FederationStateStoreBaseTest.java  | 71 +---
 6 files changed, 120 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39544851/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 22bb88a..ace2457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -51,15 +51,20 @@ public interface FederationApplicationHomeSubClusterStore {
   /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
-   * successful, if not an exception reporting reason for a failure.
+   * successful, if not an exception reporting reason for a failure. If a
+   * mapping for the application already existed, the {@code SubClusterId} in
+   * this response will return the existing mapping which might be different
+   * from that in the {@code AddApplicationHomeSubClusterRequest}.
*
* @param request the request to register a new application with its home
*  sub-cluster
-   * @return empty on successful registration of the application in the
-   * StateStore, if not an exception reporting reason for a failure
+   * @return upon successful registration of the application in the StateStore,
+   * {@code AddApplicationHomeSubClusterRequest} containing the home
+   * sub-cluster of the application. Otherwise, an exception reporting
+   * reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  AddApplicationHomeSubClusterResponse addApplicationHomeSubClusterMap(
+  AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster(
   AddApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -73,7 +78,7 @@ public interface FederationApplicationHomeSubClusterStore {
* not an exception reporting reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubClusterMap(
+  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster(
   UpdateApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -85,7 +90,7 @@ public interface FederationApplicationHomeSubClusterStore {
* subcluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
+  GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -96,7 +101,7 @@ public interface FederationApplicationHomeSubClusterStore {
* @return the mapping of all submitted application to it's home sub-cluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubClusterMap(
+  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster(
   

[17/50] [abbrv] hadoop git commit: YARN-6247. Share a single instance of SubClusterResolver instead of instantiating one per AM. (Botong Huang via Subru)

2017-04-03 Thread subru
YARN-6247. Share a single instance of SubClusterResolver instead of 
instantiating one per AM. (Botong Huang via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a840ae5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a840ae5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a840ae5

Branch: refs/heads/YARN-2915
Commit: 3a840ae56d5b9aba08c0338ab5aabb6c97b878cb
Parents: eae5ad6
Author: Subru Krishnan 
Authored: Thu Mar 2 18:54:53 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +++
 .../src/main/resources/yarn-default.xml |  7 +++
 .../resolver/AbstractSubClusterResolver.java|  6 +--
 .../federation/resolver/SubClusterResolver.java |  4 +-
 .../utils/FederationStateStoreFacade.java   | 48 +---
 5 files changed, 59 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a840ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ff19554..856beb9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2567,6 +2567,12 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 
+  public static final String FEDERATION_CLUSTER_RESOLVER_CLASS =
+  FEDERATION_PREFIX + "subcluster-resolver.class";
+  public static final String DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS =
+  "org.apache.hadoop.yarn.server.federation.resolver."
+  + "DefaultSubClusterResolverImpl";
+
   public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
 
   public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a840ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 29a9dec..e1b3700 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2663,6 +2663,13 @@
 
 yarn.federation.machine-list
   
+  
+
+  Class name for SubClusterResolver
+
+yarn.federation.subcluster-resolver.class
+
org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a840ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
index 6b4f60c..bccff2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.federation.resolver;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 
-import java.util.HashMap;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.Map;
 
 /**
@@ -31,9 +31,9 @@ import java.util.Map;
  */
 public abstract class AbstractSubClusterResolver implements SubClusterResolver 
{
   private Map

[47/50] [abbrv] hadoop git commit: YARN-5391. PolicyManager to tie together Router/AMRM Federation policies. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5391. PolicyManager to tie together Router/AMRM Federation policies. 
(Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a916b340
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a916b340
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a916b340

Branch: refs/heads/YARN-2915
Commit: a916b34088f6c83e21c1e12ac4116c72cf4c8c17
Parents: baac927
Author: Subru Krishnan 
Authored: Tue Nov 1 19:54:18 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../policies/AbstractPolicyManager.java | 175 +++
 .../FederationPolicyInitializationContext.java  |   3 +-
 .../policies/UniformBroadcastPolicyManager.java |  56 ++
 .../policies/WeightedLocalityPolicyManager.java |  67 +++
 .../records/SubClusterPolicyConfiguration.java  |  13 ++
 .../policies/BasePolicyManagerTest.java | 108 
 ...ionPolicyInitializationContextValidator.java |   5 +-
 .../TestUniformBroadcastPolicyManager.java  |  40 +
 .../TestWeightedLocalityPolicyManager.java  |  79 +
 .../utils/FederationPoliciesTestUtil.java   |   2 +-
 10 files changed, 545 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a916b340/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
new file mode 100644
index 000..e77f2e3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides basic implementation for common methods that multiple
+ * policies will need to implement.
+ */
+public abstract class AbstractPolicyManager implements
+FederationPolicyManager {
+
+  private String queue;
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Class routerFederationPolicy;
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Class amrmProxyFederationPolicy;
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AbstractPolicyManager.class);
+  /**
+   * This default implementation validates the
+   * {@link FederationPolicyInitializationContext},
+   * then checks whether it needs to reinstantiate the class (null or
+   * mismatching type), and reinitialize the policy.
+   *
+   * @param federationPolicyContext the current context
+   * @param oldInstance the existing (possibly null) instance.
+   *
+   * @return a valid and fully reinitalized {@link FederationAMRMProxyPolicy}
+   * instance
+   *
+   * @throws FederationPolicyInitializationException if the reinitalization is
+   * not valid, and ensure
+   * previous state is 
preserved
+   */
+  public FederationAMRMProxyPolicy 

[25/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-04-03 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac9273/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
index e57709f..5de749f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.yarn.server.federation.policies.router;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -30,34 +30,27 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
-import java.util.Map;
-
 /**
  * This implements a simple load-balancing policy. The policy "weights" are
  * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks
  * the sub-cluster with the least load to forward this application.
  */
-public class LoadBasedRouterPolicy
-extends BaseWeightedRouterPolicy {
-
-  private static final Log LOG =
-  LogFactory.getLog(LoadBasedRouterPolicy.class);
+public class LoadBasedRouterPolicy extends AbstractRouterPolicy {
 
   @Override
-  public void reinitialize(FederationPolicyInitializationContext
-  federationPolicyContext)
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
   throws FederationPolicyInitializationException {
 
 // remember old policyInfo
 WeightedPolicyInfo tempPolicy = getPolicyInfo();
 
-//attempt new initialization
-super.reinitialize(federationPolicyContext);
+// attempt new initialization
+super.reinitialize(policyContext);
 
-//check extra constraints
+// check extra constraints
 for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) {
   if (weight != 0 && weight != 1) {
-//reset to old policyInfo if check fails
+// reset to old policyInfo if check fails
 setPolicyInfo(tempPolicy);
 throw new FederationPolicyInitializationException(
 this.getClass().getCanonicalName()
@@ -69,18 +62,16 @@ public class LoadBasedRouterPolicy
 
   @Override
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext)
-  throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();
 
-Map weights = getPolicyInfo()
-.getRouterPolicyWeights();
+Map weights =
+getPolicyInfo().getRouterPolicyWeights();
 SubClusterIdInfo chosen = null;
 long currBestMem = -1;
-for (Map.Entry entry :
-activeSubclusters
+for (Map.Entry entry : activeSubclusters
 .entrySet()) {
   SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey());
   if (weights.containsKey(id) && weights.get(id) > 0) {
@@ -95,8 +86,7 @@ public class LoadBasedRouterPolicy
 return chosen.toId();
   }
 
-  private long getAvailableMemory(SubClusterInfo value)
-  throws YarnException {
+  private long getAvailableMemory(SubClusterInfo value) throws YarnException {
 try {
   long mem = -1;
   JSONObject obj = new JSONObject(value.getCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac9273/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
 

[15/50] [abbrv] hadoop git commit: HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by Akira Ajisaka

2017-04-03 Thread subru
HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5faa949b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5faa949b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5faa949b

Branch: refs/heads/YARN-2915
Commit: 5faa949b782be48ef400d2eb1695f420455de764
Parents: bbd6847
Author: Mingliang Liu 
Authored: Mon Apr 3 11:07:14 2017 -0700
Committer: Mingliang Liu 
Committed: Mon Apr 3 11:07:14 2017 -0700

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md   | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5faa949b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 82c3588..18c0ceb 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -41,6 +41,7 @@ The specifics of using these filesystems are documented in 
this section.
 
 
 See also:
+
 * [Testing](testing.html)
 * [Troubleshooting S3a](troubleshooting_s3a.html)
 
@@ -99,6 +100,7 @@ access to the data. Anyone with the credentials can not only 
read your datasets
 —they can delete them.
 
 Do not inadvertently share these credentials through means such as
+
 1. Checking in to SCM any configuration files containing the secrets.
 1. Logging them to a console, as they invariably end up being seen.
 1. Defining filesystem URIs with the credentials in the URL, such as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5faa949b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 79551a3..39ca8f4 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -643,6 +643,7 @@ located.
 
 New tests are always welcome. Bear in mind that we need to keep costs
 and test time down, which is done by
+
 * Not duplicating tests.
 * Being efficient in your use of Hadoop API calls.
 * Isolating large/slow tests into the "scale" test group.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-5612. Return SubClusterId in FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. (Giovanni Matteo Fumarola via Subru).

2017-04-03 Thread subru
YARN-5612. Return SubClusterId in 
FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. 
(Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72294e3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72294e3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72294e3e

Branch: refs/heads/YARN-2915
Commit: 72294e3e56278269c27b317e92ab0e75ea14e0fa
Parents: 81c9448
Author: Subru Krishnan 
Authored: Thu Sep 1 13:55:54 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../utils/FederationStateStoreFacade.java   | 11 ---
 .../utils/TestFederationStateStoreFacade.java   | 30 
 2 files changed, 37 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72294e3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index f1c8218..66a0b60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
@@ -298,13 +299,15 @@ public final class FederationStateStoreFacade {
*
* @param appHomeSubCluster the mapping of the application to it's home
*  sub-cluster
+   * @return the stored Subcluster from StateStore
* @throws YarnException if the call to the state store is unsuccessful
*/
-  public void addApplicationHomeSubCluster(
+  public SubClusterId addApplicationHomeSubCluster(
   ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
-stateStore.addApplicationHomeSubCluster(
-AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
-return;
+AddApplicationHomeSubClusterResponse response =
+stateStore.addApplicationHomeSubCluster(
+
AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
+return response.getHomeSubCluster();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72294e3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
index 53f4f84..d46bef0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import 

[24/50] [abbrv] hadoop git commit: YARN-5390. Federation Subcluster Resolver. Contributed by Ellen Hui.

2017-04-03 Thread subru
YARN-5390. Federation Subcluster Resolver. Contributed by Ellen Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caf88143
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caf88143
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caf88143

Branch: refs/heads/YARN-2915
Commit: caf88143697e107a62cc1578381db351d2d002d9
Parents: f3a55ba
Author: Subru Krishnan 
Authored: Thu Aug 4 15:58:31 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../hadoop-yarn-server-common/pom.xml   |  10 +
 .../resolver/AbstractSubClusterResolver.java|  67 +++
 .../resolver/DefaultSubClusterResolverImpl.java | 164 +
 .../federation/resolver/SubClusterResolver.java |  58 ++
 .../federation/resolver/package-info.java   |  17 ++
 .../resolver/TestDefaultSubClusterResolver.java | 184 +++
 .../src/test/resources/nodes|   4 +
 .../src/test/resources/nodes-malformed  |   3 +
 10 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caf88143/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 81cb8c6..504c9e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2529,6 +2529,14 @@ public class YarnConfiguration extends Configuration {
   public static final int DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT = 20;
 
   
+  // Federation Configs
+  
+
+  public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
+  public static final String FEDERATION_MACHINE_LIST =
+  FEDERATION_PREFIX + "machine-list";
+
+  
   // Other Configs
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caf88143/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index bdd4de5..fc478bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2651,6 +2651,13 @@
   
 
   
+
+  Machine list file to be loaded by the FederationSubCluster Resolver
+
+yarn.federation.machine-list
+  
+
+  
 The interval that the yarn client library uses to poll the
 completion status of the asynchronous API of application client protocol.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caf88143/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 9cc3cae..6d2fbef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -177,6 +177,16 @@
   
 
   
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/test/resources/nodes
+src/test/resources/nodes-malformed
+  
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caf88143/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
 

[30/50] [abbrv] hadoop git commit: YARN-5634. Simplify initialization/use of RouterPolicy via a RouterPolicyFacade. (Carlo Curino via Subru).

2017-04-03 Thread subru
YARN-5634. Simplify initialization/use of RouterPolicy via a 
RouterPolicyFacade. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b081b1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b081b1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b081b1f

Branch: refs/heads/YARN-2915
Commit: 8b081b1f3aed8952079a21a2d4421c81c1d1a5bb
Parents: a916b34
Author: Subru Krishnan 
Authored: Wed Nov 16 19:39:25 2016 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |  12 +
 ...ionPolicyInitializationContextValidator.java |   2 +-
 .../PriorityBroadcastPolicyManager.java |  66 +
 .../federation/policies/RouterPolicyFacade.java | 266 +++
 .../policies/dao/WeightedPolicyInfo.java|   6 +-
 .../utils/FederationStateStoreFacade.java   |  16 +-
 .../TestPriorityBroadcastPolicyManager.java |  72 +
 .../policies/TestRouterPolicyFacade.java| 220 +++
 .../utils/FederationStateStoreTestUtil.java |  22 +-
 11 files changed, 693 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b081b1f/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index bbd03a9..ee51094 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -310,6 +310,15 @@
 
   
 
+  
+
+
+  
+  
+
+
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b081b1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 1cab595..67a6f3c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2567,6 +2567,19 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 
+  public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
+
+  public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX
+  + "policy-manager";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
+  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+
+  public static final String FEDERATION_POLICY_MANAGER_PARAMS =
+  FEDERATION_PREFIX + "policy-manager-params";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b081b1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3f3a06c..6e33c0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -78,6 +78,18 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 
+// Federation policies configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS);
+configurationPropsToSkipCompare
+

[13/50] [abbrv] hadoop git commit: HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang.

2017-04-03 Thread subru
HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan 
Fajth, Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc7aff7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc7aff7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc7aff7c

Branch: refs/heads/YARN-2915
Commit: bc7aff7cec07bbc3fed63a00c8f1584c34670998
Parents: 845529b
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:32:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:35:09 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 ++
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9addbfa..9840679 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,6 +633,11 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
+if (deletedNode.isDirectory()){
+  DirectoryWithSnapshotFeature sf =
+  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
+  sf.computeContentSummary4Snapshot(context);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index d1b3aa6..d06c384 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,6 +26,7 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2429,7 +2430,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp");
+File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2539,7 +2540,8 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
+getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..7926e44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 

[36/50] [abbrv] hadoop git commit: YARN-6190. Validation and synchronization fixes in LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)

2017-04-03 Thread subru
YARN-6190. Validation and synchronization fixes in 
LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eae5ad68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eae5ad68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eae5ad68

Branch: refs/heads/YARN-2915
Commit: eae5ad68643f6a0d887685f7f5a7502dea77c235
Parents: 2fcd347
Author: Carlo Curino 
Authored: Tue Feb 28 17:04:20 2017 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   | 63 +---
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 21 ++-
 .../policies/manager/BasePolicyManagerTest.java |  3 -
 .../resolver/TestDefaultSubClusterResolver.java |  9 ++-
 .../utils/FederationPoliciesTestUtil.java   |  6 +-
 5 files changed, 73 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eae5ad68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 283f89e..6f97a51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
@@ -143,10 +144,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 Map newWeightsConverted = new HashMap<>();
 boolean allInactive = true;
 WeightedPolicyInfo policy = getPolicyInfo();
-if (policy.getAMRMPolicyWeights() == null
-|| policy.getAMRMPolicyWeights().size() == 0) {
-  allInactive = false;
-} else {
+
+if (policy.getAMRMPolicyWeights() != null
+&& policy.getAMRMPolicyWeights().size() > 0) {
   for (Map.Entry e : policy.getAMRMPolicyWeights()
   .entrySet()) {
 if (e.getValue() > 0) {
@@ -180,7 +180,6 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
 this.federationFacade =
 policyContext.getFederationStateStoreFacade();
-this.bookkeeper = new AllocationBookkeeper();
 this.homeSubcluster = policyContext.getHomeSubcluster();
 
   }
@@ -197,7 +196,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   List resourceRequests) throws YarnException {
 
 // object used to accumulate statistics about the answer, initialize with
-// active subclusters.
+// active subclusters. Create a new instance per call because this method
+// can be called concurrently.
+bookkeeper = new AllocationBookkeeper();
 bookkeeper.reinitialize(federationFacade.getSubClusters(true));
 
 List nonLocalizedRequests =
@@ -238,12 +239,16 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 // we log altogether later
   }
   if (targetIds != null && targetIds.size() > 0) {
+boolean hasActive = false;
 for (SubClusterId tid : targetIds) {
   if (bookkeeper.isActiveAndEnabled(tid)) {
 bookkeeper.addRackRR(tid, rr);
+hasActive = true;
   }
 }
-continue;
+if (hasActive) {
+  continue;
+}
   }
 
   // Handle node/rack requests that the SubClusterResolver cannot map to
@@ -347,7 +352,7 

[12/50] [abbrv] hadoop git commit: MAPREDUCE-6824. TaskAttemptImpl#createCommonContainerLaunchContext is longer than 150 lines. Contributed by Chris Trezzo.

2017-04-03 Thread subru
MAPREDUCE-6824. TaskAttemptImpl#createCommonContainerLaunchContext is longer 
than 150 lines. Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/845529b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/845529b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/845529b3

Branch: refs/heads/YARN-2915
Commit: 845529b3ab338e759665a687eb525fb2cccde7bf
Parents: a4b5aa8
Author: Akira Ajisaka 
Authored: Mon Apr 3 13:06:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Mon Apr 3 13:06:54 2017 +0900

--
 .../v2/app/job/impl/TaskAttemptImpl.java| 285 ++-
 1 file changed, 153 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/845529b3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 4305824..9ea1b9a 100755
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -755,7 +755,7 @@ public abstract class TaskAttemptImpl implements
 new HashMap();
 
 // Application environment
-Map environment = new HashMap();
+Map environment;
 
 // Service data
 Map serviceData = new HashMap();
@@ -763,157 +763,178 @@ public abstract class TaskAttemptImpl implements
 // Tokens
 ByteBuffer taskCredentialsBuffer = ByteBuffer.wrap(new byte[]{});
 try {
-  FileSystem remoteFS = FileSystem.get(conf);
-
-  //  Set up JobJar to be localized properly on the remote NM.
-  String jobJar = conf.get(MRJobConfig.JAR);
-  if (jobJar != null) {
-final Path jobJarPath = new Path(jobJar);
-final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf);
-Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(),
-jobJarFs.getWorkingDirectory());
-LocalResource rc = createLocalResource(jobJarFs, remoteJobJar,
-LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
-String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
-JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
-rc.setPattern(pattern);
-localResources.put(MRJobConfig.JOB_JAR, rc);
-LOG.info("The job-jar file on the remote FS is "
-+ remoteJobJar.toUri().toASCIIString());
-  } else {
-// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
-// mapreduce jar itself which is already on the classpath.
-LOG.info("Job jar is not present. "
-+ "Not adding any jar to the list of resources.");
-  }
-  //  End of JobJar setup
-
-  //  Set up JobConf to be localized properly on the remote NM.
-  Path path =
-  MRApps.getStagingAreaDir(conf, UserGroupInformation
-  .getCurrentUser().getShortUserName());
-  Path remoteJobSubmitDir =
-  new Path(path, oldJobId.toString());
-  Path remoteJobConfPath = 
-  new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
-  localResources.put(
-  MRJobConfig.JOB_CONF_FILE,
-  createLocalResource(remoteFS, remoteJobConfPath,
-  LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
-  LOG.info("The job-conf file on the remote FS is "
-  + remoteJobConfPath.toUri().toASCIIString());
-  //  End of JobConf setup
 
-  // Setup DistributedCache
-  MRApps.setupDistributedCache(conf, localResources);
+  configureJobJar(conf, localResources);
 
-  // Setup up task credentials buffer
-  LOG.info("Adding #" + credentials.numberOfTokens()
-  + " tokens and #" + credentials.numberOfSecretKeys()
-  + " secret keys for NM use for launching container");
-  Credentials taskCredentials = new Credentials(credentials);
+  configureJobConf(conf, localResources, oldJobId);
 
-  // LocalStorageToken is 

[32/50] [abbrv] hadoop git commit: YARN-5905. Update the RM webapp host that is reported as part of Federation membership to current primary RM's IP.

2017-04-03 Thread subru
YARN-5905. Update the RM webapp host that is reported as part of Federation 
membership to current primary RM's IP.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bb520ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bb520ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bb520ee

Branch: refs/heads/YARN-2915
Commit: 1bb520ee3f9844f4af7f9296bf478cbd7e5e312e
Parents: c7a86d9
Author: Subru Krishnan 
Authored: Tue Nov 22 18:30:40 2016 -0800
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../federation/FederationStateStoreService.java  |  4 ++--
 .../federation/TestFederationRMStateStoreService.java| 11 ++-
 2 files changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bb520ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
index 9a01d7e..530184f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
@@ -177,8 +177,8 @@ public class FederationStateStoreService extends 
AbstractService
 config.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-String webAppAddress =
-WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(config);
+String webAppAddress = getServiceAddress(NetUtils
+.createSocketAddr(WebAppUtils.getRMWebAppURLWithScheme(config)));
 
 SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
 amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bb520ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 30f69b5..d92a793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -19,6 +19,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.federation;
 
 import java.io.IOException;
 import java.io.StringReader;
+import java.net.UnknownHostException;
 
 import javax.xml.bind.JAXBException;
 
@@ -157,12 +158,20 @@ public class TestFederationRMStateStoreService {
   }
 
   private String checkSubClusterInfo(SubClusterState state)
-  throws YarnException {
+  throws YarnException, UnknownHostException {
 Assert.assertNotNull(stateStore.getSubCluster(request));
 SubClusterInfo response =
 stateStore.getSubCluster(request).getSubClusterInfo();
 Assert.assertEquals(state, response.getState());
 Assert.assertTrue(response.getLastHeartBeat() >= lastHearbeatTS);
+String expectedAddress =
+(response.getClientRMServiceAddress().split(":"))[0];
+Assert.assertEquals(expectedAddress,
+(response.getAMRMServiceAddress().split(":"))[0]);
+Assert.assertEquals(expectedAddress,
+(response.getRMAdminServiceAddress().split(":"))[0]);
+

[23/50] [abbrv] hadoop git commit: YARN-3664. Federation PolicyStore internal APIs

2017-04-03 Thread subru
YARN-3664. Federation PolicyStore internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed1868a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed1868a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed1868a7

Branch: refs/heads/YARN-2915
Commit: ed1868a7b99feb64a7bb791284f9f361139b311c
Parents: a9bdcc8
Author: Subru Krishnan 
Authored: Fri Aug 5 12:34:58 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Apr 3 13:54:17 2017 -0700

--
 .../federation/store/FederationPolicyStore.java |  76 
 ...SubClusterPoliciesConfigurationsRequest.java |  35 
 ...ubClusterPoliciesConfigurationsResponse.java |  66 +++
 ...GetSubClusterPolicyConfigurationRequest.java |  62 ++
 ...etSubClusterPolicyConfigurationResponse.java |  65 +++
 ...SetSubClusterPolicyConfigurationRequest.java |  79 
 ...etSubClusterPolicyConfigurationResponse.java |  36 
 .../records/SubClusterPolicyConfiguration.java  | 130 +
 ...sterPoliciesConfigurationsRequestPBImpl.java |  95 +
 ...terPoliciesConfigurationsResponsePBImpl.java | 191 +++
 ...ClusterPolicyConfigurationRequestPBImpl.java | 103 ++
 ...lusterPolicyConfigurationResponsePBImpl.java | 143 ++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   4 +-
 ...ClusterPolicyConfigurationRequestPBImpl.java | 159 +++
 ...lusterPolicyConfigurationResponsePBImpl.java |  93 +
 .../pb/SubClusterPolicyConfigurationPBImpl.java | 121 
 .../proto/yarn_server_federation_protos.proto   |  28 +++
 .../records/TestFederationProtocolRecords.java  |  53 -
 18 files changed, 1536 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1868a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
new file mode 100644
index 000..9d9bd9b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse;
+
+/**
+ * The FederationPolicyStore provides a key-value interface to access the
+ * policies configured for the system. The key is a "queue" name, i.e., the
+ * system allows to configure a different policy for each queue in the system
+ * (though each policy can make dynamic run-time decisions on a 
per-job/per-task
+ * basis). The value is a {@code SubClusterPolicyConfiguration}, a serialized
+ * representation of the policy type and its 

[2/2] hadoop git commit: HDFS-11564. Ozone: SCM: Add Comparable Metric Support. Contributed by Anu Engineer.

2017-04-03 Thread aengineer
HDFS-11564. Ozone: SCM: Add Comparable Metric Support. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68eab679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68eab679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68eab679

Branch: refs/heads/HDFS-7240
Commit: 68eab679e3e7bec657bb9a4a0feb15fe56cc33fa
Parents: 558b478
Author: Anu Engineer 
Authored: Mon Apr 3 13:33:11 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 3 13:33:11 2017 -0700

--
 .../ozone/scm/container/ContainerMapping.java   |  20 +-
 .../scm/container/ContainerPlacementPolicy.java |   2 +-
 .../SCMContainerPlacementCapacity.java  | 207 -
 .../container/SCMContainerPlacementRandom.java  | 146 
 .../placement/algorithms/SCMCommonPolicy.java   | 192 
 .../SCMContainerPlacementCapacity.java  | 133 +++
 .../algorithms/SCMContainerPlacementRandom.java |  89 
 .../placement/algorithms/package-info.java  |  18 ++
 .../placement/metrics/DatanodeMetric.java   |  91 
 .../container/placement/metrics/LongMetric.java | 158 +
 .../placement/metrics/SCMNodeMetric.java| 223 +++
 .../placement/metrics/SCMNodeStat.java  | 139 
 .../placement/metrics/package-info.java |  20 ++
 .../scm/container/placement/package-info.java   |  19 ++
 .../ozone/scm/exceptions/SCMException.java  |  34 ++-
 .../hadoop/ozone/scm/node/NodeManager.java  |   4 +-
 .../apache/hadoop/ozone/scm/node/NodeStat.java  |  12 +-
 .../hadoop/ozone/scm/node/SCMNodeManager.java   |  16 +-
 .../ozone/scm/node/SCMNodePoolManager.java  |   6 +-
 .../hadoop/ozone/scm/node/SCMNodeStat.java  | 100 -
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  72 +++---
 .../hadoop/ozone/TestContainerOperations.java   |   6 +-
 .../placement/TestContainerPlacement.java   | 130 +++
 .../placement/TestDatanodeMetrics.java  |  59 +
 .../ozone/scm/TestContainerSmallFile.java   |   4 +-
 .../ozone/scm/container/MockNodeManager.java| 147 +++-
 .../scm/container/TestContainerMapping.java |   8 +-
 .../ozone/scm/node/TestContainerPlacement.java  |  28 +--
 .../hadoop/ozone/scm/node/TestNodeManager.java  | 124 +++
 .../ozone/scm/node/TestSCMNodePoolManager.java  |   4 +-
 30 files changed, 1595 insertions(+), 616 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68eab679/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java
index cb6a3cd..53f37b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java
@@ -23,11 +23,14 @@ import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy;
+import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom;
 import org.apache.hadoop.ozone.scm.node.NodeManager;
 import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.utils.LevelDBStore;
+import org.iq80.leveldb.Options;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,7 +42,6 @@ import java.nio.charset.Charset;
 import java.util.List;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
-import org.iq80.leveldb.Options;
 
 /**
  * Mapping class contains the mapping from a name to a pipeline mapping. This 
is
@@ -94,8 +96,7 @@ public class ContainerMapping implements Mapping {
 this.containerSize = OzoneConsts.GB * conf.getInt(
 ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB,
 ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
-
-this.placementPolicy =  createContainerPlacementPolicy(nodeManager, conf);
+this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf);
   }
 
   /**
@@ -105,9 +106,10 @@ public class ContainerMapping implements Mapping {
* @param conf - 

[1/2] hadoop git commit: HDFS-11564. Ozone: SCM: Add Comparable Metric Support. Contributed by Anu Engineer.

2017-04-03 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 558b478ff -> 68eab679e


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68eab679/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
new file mode 100644
index 000..d798c61
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.placement;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.container.MockNodeManager;
+import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
+import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom;
+import org.apache.hadoop.ozone.scm.exceptions.SCMException;
+import org.apache.hadoop.ozone.scm.node.NodeManager;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Asserts that allocation strategy works as expected.
+ */
+public class TestContainerPlacement {
+
+  private DescriptiveStatistics computeStatistics(NodeManager nodeManager) {
+DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics();
+for (DatanodeID id : nodeManager.getNodes(NodeManager.NODESTATE.HEALTHY)) {
+  float weightedValue =
+  nodeManager.getNodeStat(id).get().getScmUsed().get() / (float)
+  nodeManager.getNodeStat(id).get().getCapacity().get();
+  descriptiveStatistics.addValue(weightedValue);
+}
+return descriptiveStatistics;
+  }
+
+  /**
+   * This test simulates lots of Cluster I/O and updates the metadata in SCM.
+   * We simulate adding and removing containers from the cluster. It asserts
+   * that our placement algorithm has taken the capacity of nodes into
+   * consideration by asserting that standard deviation of used space on these
+   * has improved.
+   */
+  @Test
+  public void testCapacityPlacementYieldsBetterDataDistribution() throws
+  SCMException {
+final int opsCount = 200 * 1000;
+final int nodesRequired = 3;
+Random random = new Random();
+
+// The nature of init code in MockNodeManager yields similar clusters.
+MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100);
+MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100);
+DescriptiveStatistics beforeCapacity =
+computeStatistics(nodeManagerCapacity);
+DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom);
+
+//Assert that our initial layout of clusters are similar.
+assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom
+.getStandardDeviation(), 0.001);
+
+SCMContainerPlacementCapacity capacityPlacer = new
+SCMContainerPlacementCapacity(nodeManagerCapacity, new 
Configuration());
+SCMContainerPlacementRandom randomPlacer = new
+SCMContainerPlacementRandom(nodeManagerRandom, new Configuration());
+
+for (int x = 0; x < opsCount; x++) {
+  long containerSize = random.nextInt(100) * OzoneConsts.GB;
+  List nodesCapacity =
+  capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
+  assertEquals(nodesRequired, nodesCapacity.size());
+
+  List nodesRandom = 
randomPlacer.chooseDatanodes(nodesRequired,
+  containerSize);
+
+  // One fifth of all calls are delete
+  if (x % 5 == 0) {
+deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize);
+deleteContainer(nodeManagerRandom, 

hadoop git commit: HDFS-11519. Ozone: Implement XceiverServerSpi and XceiverClientSpi using Ratis. Contributed by Tsz Wo Nicholas Sze.

2017-04-03 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 d00c07c67 -> 558b478ff


HDFS-11519. Ozone: Implement XceiverServerSpi and XceiverClientSpi using Ratis. 
Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/558b478f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/558b478f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/558b478f

Branch: refs/heads/HDFS-7240
Commit: 558b478ff2063371dfda78e128c13883901be046
Parents: d00c07c
Author: Anu Engineer 
Authored: Mon Apr 3 12:40:06 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 3 12:40:06 2017 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |  25 
 .../org/apache/hadoop/scm/ScmConfigKeys.java|   9 ++
 .../apache/hadoop/scm/XceiverClientManager.java |   8 +-
 .../apache/hadoop/scm/XceiverClientRatis.java   | 111 +
 .../scm/container/common/helpers/Pipeline.java  |  10 ++
 .../com/google/protobuf/ShadedProtoUtil.java|  38 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  16 +++
 .../common/impl/ContainerManagerImpl.java   |  11 +-
 .../server/ratis/ContainerStateMachine.java | 107 +
 .../server/ratis/XceiverServerRatis.java| 119 +++
 .../container/ozoneimpl/OzoneContainer.java |   9 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  38 +++---
 .../hdfs/MiniDFSClusterWithNodeGroup.java   |   2 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  28 +
 .../ozone/container/ContainerTestHelper.java|  64 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  89 --
 .../transport/server/TestContainerServer.java   | 108 +
 hadoop-project/pom.xml  |  44 ++-
 20 files changed, 784 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/558b478f/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 30525f1..8e5b6ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -64,4 +64,7 @@
   
 
   
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/558b478f/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 7514eeb..cb7281e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -118,6 +118,31 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   com.fasterxml.jackson.core
   jackson-databind
 
+
+
+  org.apache.ratis
+  ratis-proto-shaded
+
+
+  ratis-common
+  org.apache.ratis
+
+
+  ratis-client
+  org.apache.ratis
+
+
+  ratis-server
+  org.apache.ratis
+
+
+  ratis-netty
+  org.apache.ratis
+
+
+  ratis-grpc
+  org.apache.ratis
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/558b478f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
index 5f3dbd5..fed4459 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
@@ -35,6 +35,15 @@ public final class ScmConfigKeys {
   public static final int SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
   1;
 
+  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
+  = "dfs.container.ratis.enabled";
+  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
+  = false;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
+  = "dfs.container.ratis.rpc.type";
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+  = "GRPC";
+
   // TODO : this is copied from OzoneConsts, may need to move to a better place
   public static final 

hadoop git commit: HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by Akira Ajisaka

2017-04-03 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f9d3bb3b8 -> e47c95ffb


HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e47c95ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e47c95ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e47c95ff

Branch: refs/heads/branch-2.8
Commit: e47c95ffbfcfdefc7bfe4fb01733885c649ce1d1
Parents: f9d3bb3
Author: Mingliang Liu 
Authored: Mon Apr 3 11:15:03 2017 -0700
Committer: Mingliang Liu 
Committed: Mon Apr 3 11:15:03 2017 -0700

--
 hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e47c95ff/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 334635d..5fe0d91 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -91,6 +91,7 @@ access to the data. Anyone with the credentials can not only 
read your datasets
 —they can delete them.
 
 Do not inadvertently share these credentials through means such as
+
 1. Checking in to SCM any configuration files containing the secrets.
 1. Logging them to a console, as they invariably end up being seen.
 1. Defining filesystem URIs with the credentials in the URL, such as


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by Akira Ajisaka

2017-04-03 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4f1f16f1a -> 8c21b2a25


HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by 
Akira Ajisaka

(cherry picked from commit 5faa949b782be48ef400d2eb1695f420455de764)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c21b2a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c21b2a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c21b2a2

Branch: refs/heads/branch-2
Commit: 8c21b2a25933e07a492afdc49dda76acba3bc113
Parents: 4f1f16f
Author: Mingliang Liu 
Authored: Mon Apr 3 11:07:14 2017 -0700
Committer: Mingliang Liu 
Committed: Mon Apr 3 11:07:56 2017 -0700

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md   | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c21b2a2/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 127abeb..07cc903 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -38,6 +38,7 @@ The specifics of using these filesystems are documented in 
this section.
 
 
 See also:
+
 * [Testing](testing.html)
 * [Troubleshooting S3a](troubleshooting_s3a.html)
 
@@ -96,6 +97,7 @@ access to the data. Anyone with the credentials can not only 
read your datasets
 —they can delete them.
 
 Do not inadvertently share these credentials through means such as
+
 1. Checking in to SCM any configuration files containing the secrets.
 1. Logging them to a console, as they invariably end up being seen.
 1. Defining filesystem URIs with the credentials in the URL, such as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c21b2a2/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index aaf3fca..626c066 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -651,6 +651,7 @@ located.
 
 New tests are always welcome. Bear in mind that we need to keep costs
 and test time down, which is done by
+
 * Not duplicating tests.
 * Being efficient in your use of Hadoop API calls.
 * Isolating large/slow tests into the "scale" test group.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by Akira Ajisaka

2017-04-03 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk bbd68478d -> 5faa949b7


HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5faa949b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5faa949b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5faa949b

Branch: refs/heads/trunk
Commit: 5faa949b782be48ef400d2eb1695f420455de764
Parents: bbd6847
Author: Mingliang Liu 
Authored: Mon Apr 3 11:07:14 2017 -0700
Committer: Mingliang Liu 
Committed: Mon Apr 3 11:07:14 2017 -0700

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md   | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5faa949b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 82c3588..18c0ceb 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -41,6 +41,7 @@ The specifics of using these filesystems are documented in 
this section.
 
 
 See also:
+
 * [Testing](testing.html)
 * [Troubleshooting S3a](troubleshooting_s3a.html)
 
@@ -99,6 +100,7 @@ access to the data. Anyone with the credentials can not only 
read your datasets
 —they can delete them.
 
 Do not inadvertently share these credentials through means such as
+
 1. Checking in to SCM any configuration files containing the secrets.
 1. Logging them to a console, as they invariably end up being seen.
 1. Defining filesystem URIs with the credentials in the URL, such as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5faa949b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 79551a3..39ca8f4 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -643,6 +643,7 @@ located.
 
 New tests are always welcome. Bear in mind that we need to keep costs
 and test time down, which is done by
+
 * Not duplicating tests.
 * Being efficient in your use of Hadoop API calls.
 * Isolating large/slow tests into the "scale" test group.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda) [Forced Update!]

2017-04-03 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 663f30a25 -> 94f81b6f6 (forced update)


YARN-5952. Create REST API for changing YARN scheduler configurations. 
(Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94f81b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94f81b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94f81b6f

Branch: refs/heads/YARN-5734
Commit: 94f81b6f6357573d8a2b3efa35ea69711ca6fec0
Parents: 25d2028
Author: Wangda Tan 
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Wangda Tan 
Committed: Mon Apr 3 10:12:01 2017 -0700

--
 .../scheduler/MutableConfScheduler.java |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java|   6 +-
 .../conf/MutableCSConfigurationProvider.java|  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 169 +++
 .../webapp/dao/QueueConfigInfo.java |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 +++
 .../TestMutableCSConfigurationProvider.java |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++
 10 files changed, 849 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94f81b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 000..35e36e1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+  Map confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94f81b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 

hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations

2017-04-03 Thread jhung
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 25d2028be -> 663f30a25


YARN-5952. Create REST API for changing YARN scheduler configurations


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/663f30a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/663f30a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/663f30a2

Branch: refs/heads/YARN-5734
Commit: 663f30a253182de8599ff294aee1eb0c28eb1dce
Parents: 25d2028
Author: Jonathan Hung 
Authored: Mon Apr 3 10:15:25 2017 -0700
Committer: Jonathan Hung 
Committed: Mon Apr 3 10:15:25 2017 -0700

--
 .../scheduler/MutableConfScheduler.java |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java|   6 +-
 .../conf/MutableCSConfigurationProvider.java|  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 169 +++
 .../webapp/dao/QueueConfigInfo.java |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java  |  60 +++
 .../TestMutableCSConfigurationProvider.java |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++
 10 files changed, 849 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/663f30a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 000..35e36e1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+  Map confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/663f30a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 

hadoop git commit: HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.

2017-04-03 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk bc7aff7ce -> bbd68478d


HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbd68478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbd68478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbd68478

Branch: refs/heads/trunk
Commit: bbd68478d5743b3b2911bf3febed7daa89479e45
Parents: bc7aff7
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:57:28 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:57:28 2017 -0700

--
 .../apache/hadoop/fs/shell/SetReplication.java  | 17 ++--
 .../hadoop/hdfs/TestSetrepIncreasing.java   | 44 
 2 files changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd68478/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
index 2231c58..16e6e92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
@@ -85,11 +85,20 @@ class SetReplication extends FsCommand {
 }
 
 if (item.stat.isFile()) {
-  if (!item.fs.setReplication(item.path, newRep)) {
-throw new IOException("Could not set replication for: " + item);
+  // Do the checking if the file is erasure coded since
+  // replication factor for an EC file is meaningless.
+  if (!item.stat.isErasureCoded()) {
+if (!item.fs.setReplication(item.path, newRep)) {
+  throw new IOException("Could not set replication for: " + item);
+}
+out.println("Replication " + newRep + " set: " + item);
+if (waitOpt) {
+  waitList.add(item);
+}
+  } else {
+out.println("Did not set replication for: " + item
++ ", because it's an erasure coded file.");
   }
-  out.println("Replication " + newRep + " set: " + item);
-  if (waitOpt) waitList.add(item);
 } 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd68478/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index fee30b5..50d7b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.junit.Test;
 
@@ -102,4 +105,45 @@ public class TestSetrepIncreasing {
   cluster.shutdown();
 }
  }
+
+  @Test
+  public void testSetRepOnECFile() throws Exception {
+ClientProtocol client;
+Configuration conf = new HdfsConfiguration();
+conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+StripedFileTestUtil.getDefaultECPolicy().getName());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+cluster.waitActive();
+client = NameNodeProxies.createProxy(conf,
+cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+client.setErasureCodingPolicy("/",
+StripedFileTestUtil.getDefaultECPolicy().getName());
+
+FileSystem dfs = cluster.getFileSystem();
+try {
+  Path d = new Path("/tmp");
+  dfs.mkdirs(d);
+  Path f = new Path(d, "foo");
+  dfs.createNewFile(f);
+  FileStatus file = dfs.getFileStatus(f);
+  assertTrue(file.isErasureCoded());
+
+  ByteArrayOutputStream out = new 

hadoop git commit: HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang.

2017-04-03 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 800d632ca -> f9d3bb3b8


HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan 
Fajth, Wei-Chiu Chuang.

(cherry picked from commit bc7aff7cec07bbc3fed63a00c8f1584c34670998)
(cherry picked from commit 4f1f16f1a67dcb6db705a73bf7fcf3f0f9d7094c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9d3bb3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9d3bb3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9d3bb3b

Branch: refs/heads/branch-2.8
Commit: f9d3bb3b8335889b30691baca4331f6f9ed28f69
Parents: 800d632
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:32:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:39:41 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 ++
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9d3bb3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9addbfa..9840679 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,6 +633,11 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
+if (deletedNode.isDirectory()){
+  DirectoryWithSnapshotFeature sf =
+  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
+  sf.computeContentSummary4Snapshot(context);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9d3bb3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 143a7b6..9b5c0b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,6 +26,7 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2427,7 +2428,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp");
+File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2537,7 +2538,8 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
+getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9d3bb3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 

hadoop git commit: HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang.

2017-04-03 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4396c8e93 -> 4f1f16f1a


HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan 
Fajth, Wei-Chiu Chuang.

(cherry picked from commit bc7aff7cec07bbc3fed63a00c8f1584c34670998)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f1f16f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f1f16f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f1f16f1

Branch: refs/heads/branch-2
Commit: 4f1f16f1a67dcb6db705a73bf7fcf3f0f9d7094c
Parents: 4396c8e
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:32:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:36:08 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 ++
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f16f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9addbfa..9840679 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,6 +633,11 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
+if (deletedNode.isDirectory()){
+  DirectoryWithSnapshotFeature sf =
+  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
+  sf.computeContentSummary4Snapshot(context);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f16f1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index aa532d2..2d388d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,6 +26,7 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2427,7 +2428,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp");
+File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2537,7 +2538,8 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
+getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f1f16f1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..7926e44 100644
--- 

hadoop git commit: HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang.

2017-04-03 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 845529b3a -> bc7aff7ce


HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan 
Fajth, Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc7aff7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc7aff7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc7aff7c

Branch: refs/heads/trunk
Commit: bc7aff7cec07bbc3fed63a00c8f1584c34670998
Parents: 845529b
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:32:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Apr 3 07:35:09 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 ++
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 84 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9addbfa..9840679 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,6 +633,11 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
+if (deletedNode.isDirectory()){
+  DirectoryWithSnapshotFeature sf =
+  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
+  sf.computeContentSummary4Snapshot(context);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index d1b3aa6..d06c384 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,6 +26,7 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2429,7 +2430,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp");
+File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2539,7 +2540,8 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
+getTestDir());
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc7aff7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..7926e44 100644
---