[hadoop] branch trunk updated: YARN-9335 [atsv2] Restrict the number of elements held in timeline collector when backend is unreachable for async calls. Contributed by Abhishesk Modi.

2019-04-05 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 22362c8  YARN-9335 [atsv2] Restrict the number of elements held in 
timeline collector when backend is unreachable for async calls. Contributed by 
Abhishesk Modi.
22362c8 is described below

commit 22362c876d28c081c37dd74f6f1ae8139695e254
Author: Vrushali C 
AuthorDate: Fri Apr 5 12:06:51 2019 -0700

YARN-9335 [atsv2] Restrict the number of elements held in timeline 
collector when backend is unreachable for async calls. Contributed by Abhishesk 
Modi.
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  9 +
 .../src/main/resources/yarn-default.xml|  7 
 .../collector/TimelineCollector.java   | 24 +++-
 .../collector/TestTimelineCollector.java   | 43 --
 4 files changed, 79 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fa75eb4..34f1e93 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2767,6 +2767,15 @@ public class YarnConfiguration extends Configuration {
   public static final int
   DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS = 60;
 
+  /** The setting that controls the capacity of the queue for async writes
+   * to timeline collector.
+   */
+  public static final String TIMELINE_SERVICE_WRITER_ASYNC_QUEUE_CAPACITY =
+  TIMELINE_SERVICE_PREFIX + "writer.async.queue.capacity";
+
+  public static final int
+  DEFAULT_TIMELINE_SERVICE_WRITER_ASYNC_QUEUE_CAPACITY = 100;
+
   /**
* The name for setting that controls how long the final value of
* a metric of a completed app is retained before merging
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index dfbffd4..004af7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2591,6 +2591,13 @@
   
 
   
+The setting that decides the capacity of the queue to hold
+asynchronous timeline entities.
+yarn.timeline-service.writer.async.queue.capacity
+100
+  
+
+  
 Time period till which the application collector will be alive
  in NM, after the  application master container finishes.
 yarn.timeline-service.app-collector.linger-period.ms
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 6c83665..0c54ed0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -23,8 +23,11 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -37,6 +40,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -61,6 +65,7 @@ public abstract class TimelineCollector extends 
CompositeService {
   = new ConcurrentHashMap<>();
   private static Set entityTypesSkipAggregat

[hadoop] branch trunk updated: YARN-9382 Publish container killed, paused and resumed events to ATSv2. Contributed by Abhishesk Modi.

2019-04-05 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 27039a2  YARN-9382 Publish container killed, paused and resumed events 
to ATSv2. Contributed by Abhishesk Modi.
27039a2 is described below

commit 27039a29ae403398182e615fa5c1d0cb91a54268
Author: Vrushali C 
AuthorDate: Fri Apr 5 12:02:43 2019 -0700

YARN-9382 Publish container killed, paused and resumed events to ATSv2. 
Contributed by Abhishesk Modi.
---
 .../server/metrics/ContainerMetricsConstants.java  |   9 ++
 .../timelineservice/NMTimelinePublisher.java   | 102 ++-
 .../timelineservice/TestNMTimelinePublisher.java   | 137 +
 3 files changed, 247 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
index 7d6fc92..8b2fb85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
@@ -35,6 +35,15 @@ public class ContainerMetricsConstants {
   "YARN_RM_CONTAINER_CREATED";
 
   // Event of this type will be emitted by NM.
+  public static final String PAUSED_EVENT_TYPE = "YARN_CONTAINER_PAUSED";
+
+  // Event of this type will be emitted by NM.
+  public static final String RESUMED_EVENT_TYPE = "YARN_CONTAINER_RESUMED";
+
+  // Event of this type will be emitted by NM.
+  public static final String KILLED_EVENT_TYPE = "YARN_CONTAINER_KILLED";
+
+  // Event of this type will be emitted by NM.
   public static final String FINISHED_EVENT_TYPE = "YARN_CONTAINER_FINISHED";
 
   // Event of this type will be emitted by RM.
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index b2d9376..ba57495 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -25,6 +25,9 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerPauseEvent;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResumeEvent;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -253,6 +256,95 @@ public class NMTimelinePublisher extends CompositeService {
   }
 
   @SuppressWarnings("unchecked")
+  private void publishContainerResumedEvent(
+  ContainerEvent event) {
+if (publishNMContainerEvents) {
+  ContainerResumeEvent resumeEvent = (ContainerResumeEvent) event;
+  ContainerId containerId = resumeEvent.getContainerID();
+  ContainerEntity entity = createContainerEntity(containerId);
+
+  Map entityInfo = new HashMap();
+  entityInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO,
+  resumeEvent.getDiagnostic());
+  entity.setInfo(entityInfo);
+
+  Container container = context.getContainers().get(containerId);
+  if (container != null) {
+TimelineEvent tEvent = new TimelineEvent();
+tEvent.setId(ContainerMetricsConstants.RESUMED_EVENT_TYPE);
+tEvent.setTimestamp(event.getTimestamp());
+
+long containerStartTime = container.getContainerStartTime();
+entity.addEvent(tEvent);
+entity
+.setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime));
+dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity,
+containerId.getApplicationAttemptId().getApplicationId()));
+  }
+}
+  }
+
+  @SuppressWarnings("unchecked")
+  private void publishContainerPausedEvent(
+  ContainerEvent event) {
+if (publishNMContainerEvents) {
+  ContainerPauseEvent pa

[hadoop] branch trunk updated: YARN-9303 Username splits won't help timelineservice.app_flow table. Contributed by Prabhu Joseph.

2019-04-03 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eb03f7c  YARN-9303  Username splits won't help 
timelineservice.app_flow table. Contributed by Prabhu Joseph.
eb03f7c is described below

commit eb03f7c4192d662ae06797acca7f67bc253440cb
Author: Vrushali C 
AuthorDate: Wed Apr 3 22:53:05 2019 -0700

YARN-9303  Username splits won't help timelineservice.app_flow table. 
Contributed by Prabhu Joseph.
---
 .../timelineservice/storage/apptoflow/AppToFlowTableRW.java   | 8 +---
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
index 6460203..05c4e57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
@@ -79,13 +79,7 @@ public class AppToFlowTableRW extends 
BaseTableRW {
 mappCF.setBloomFilterType(BloomType.ROWCOL);
 appToFlowTableDescp.addFamily(mappCF);
 
-appToFlowTableDescp
-.setRegionSplitPolicyClassName(
-"org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-appToFlowTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-admin.createTable(appToFlowTableDescp,
-TimelineHBaseSchemaConstants.getUsernameSplits());
+admin.createTable(appToFlowTableDescp);
 LOG.info("Status of table creation for " + table.getNameAsString() + "="
 + admin.tableExists(table));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9303 Username splits won't help timelineservice.app_flow table. Contributed by Prabhu Joseph.

2019-04-03 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new df4a14d  YARN-9303  Username splits won't help 
timelineservice.app_flow table. Contributed by Prabhu Joseph.
df4a14d is described below

commit df4a14d20cf4d8d814c37c8b772b322a536c52b5
Author: Vrushali C 
AuthorDate: Wed Apr 3 22:50:18 2019 -0700

YARN-9303  Username splits won't help timelineservice.app_flow table. 
Contributed by Prabhu Joseph.
---
 .../timelineservice/storage/apptoflow/AppToFlowTableRW.java   | 8 +---
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
index 6460203..05c4e57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
@@ -79,13 +79,7 @@ public class AppToFlowTableRW extends 
BaseTableRW {
 mappCF.setBloomFilterType(BloomType.ROWCOL);
 appToFlowTableDescp.addFamily(mappCF);
 
-appToFlowTableDescp
-.setRegionSplitPolicyClassName(
-"org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-appToFlowTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-admin.createTable(appToFlowTableDescp,
-TimelineHBaseSchemaConstants.getUsernameSplits());
+admin.createTable(appToFlowTableDescp);
 LOG.info("Status of table creation for " + table.getNameAsString() + "="
 + admin.tableExists(table));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9338 Timeline related testcases are failing. Contributed by Abhishek Modi.

2019-03-12 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 17a3e14  YARN-9338 Timeline related testcases are failing. Contributed 
by Abhishek Modi.
17a3e14 is described below

commit 17a3e14d25877af90ef6655750ce2b035c2982b5
Author: Vrushali C 
AuthorDate: Tue Mar 12 21:33:17 2019 -0700

YARN-9338 Timeline related testcases are failing. Contributed by Abhishek 
Modi.
---
 .../security/TestTimelineAuthFilterForV2.java  |   4 +
 .../storage/FileSystemTimelineWriterImpl.java  |  30 +++---
 .../storage/TestFileSystemTimelineWriterImpl.java  | 119 +
 3 files changed, 139 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index c353cf0..95a008a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -44,7 +44,9 @@ import java.util.concurrent.Callable;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
@@ -144,6 +146,8 @@ public class TestTimelineAuthFilterForV2 {
 // Setup timeline service v2.
 try {
   conf = new Configuration(false);
+  conf.setClass("fs.file.impl", RawLocalFileSystem.class,
+  FileSystem.class);
   conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
   "kerberos");
   conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index c284f8f..023d496 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -78,6 +79,7 @@ public class FileSystemTimelineWriterImpl extends 
AbstractService
   private int fsNumRetries;
   private long fsRetryInterval;
   private Path entitiesPath;
+  private Configuration config;
 
   /** default value for storage location on local disk. */
   private static final String STORAGE_DIR_ROOT = "timeline_service_data";
@@ -122,17 +124,13 @@ public class FileSystemTimelineWriterImpl extends 
AbstractService
   TimelineEntity entity,
   TimelineWriteResponse response)
   throws IOException {
-Path clusterIdPath = new Path(entitiesPath, clusterId);
-Path userIdPath = new Path(clusterIdPath, userId);
-Path flowNamePath = new Path(userIdPath, escape(flowName));
-Path flowVersionPath = new Path(flowNamePath, escape(flowVersion));
-Path flowRunPath = new Path(flowVersionPath, String.valueOf(flowRun));
-Path appIdPath = new Path(flowRunPath, appId);
-Path entityTypePath = new Path(appIdPath, entity.getType());
+String entityTypePathStr = clusterId + File.separator + userId +
+File.separator + escape(flowName) + File.separator +
+escape(flowVersion) + File.separator + flowRun + File.sep

[hadoop] branch branch-2 updated: YARN-9150 Making TimelineSchemaCreator support different backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks

2019-03-07 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 396fcee  YARN-9150 Making TimelineSchemaCreator support different 
backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
396fcee is described below

commit 396fcee0b04cc0938e88914ff993c0dab7d3f93a
Author: Vrushali C 
AuthorDate: Thu Mar 7 23:04:36 2019 -0800

YARN-9150 Making TimelineSchemaCreator support different backends for 
Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  6 ++
 .../storage/DataGeneratorForTest.java  |  2 +-
 ...reator.java => HBaseTimelineSchemaCreator.java} | 10 +--
 .../timelineservice/storage/SchemaCreator.java | 28 
 .../storage/TimelineSchemaCreator.java | 80 ++
 .../storage/DummyTimelineSchemaCreator.java| 29 
 .../storage/TestTimelineSchemaCreator.java | 41 +++
 7 files changed, 190 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2883705..668f994 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2242,6 +2242,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
   "org.apache.hadoop.yarn.server.timelineservice" +
   ".storage.HBaseTimelineReaderImpl";
+  public static final String TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS =
+  TIMELINE_SERVICE_PREFIX + "schema-creator.class";
+
+  public static final String DEFAULT_TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice.storage" +
+  ".HBaseTimelineSchemaCreator";
 
 
   /**
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index cf6a854..476021a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -57,7 +57,7 @@ public final class DataGeneratorForTest {
 // the coprocessor class is loaded from classpath
 conf.set(YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION, " ");
 // now create all tables
-TimelineSchemaCreator.createAllTables(conf, false);
+HBaseTimelineSchemaCreator.createAllTables(conf, false);
   }
 
   public static void loadApps(HBaseTestingUtility util, long ts)
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java
similarity index 97%
rename from 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
rename to 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java
index e9e4770..41164e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-ti

[hadoop] branch branch-2 updated: YARN-8549 Adding a NoOp timeline writer and reader plugin classes for ATSv2. Contributed by Prabha Manepalli

2019-03-07 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new b83ec96  YARN-8549 Adding a NoOp timeline writer and reader plugin 
classes for ATSv2. Contributed by Prabha Manepalli
b83ec96 is described below

commit b83ec96767bc97569674151e52a89d60313856f1
Author: Vrushali C 
AuthorDate: Thu Mar 7 22:54:32 2019 -0800

YARN-8549 Adding a NoOp timeline writer and reader plugin classes for 
ATSv2. Contributed by Prabha Manepalli
---
 .../storage/NoOpTimelineReaderImpl.java| 72 ++
 .../storage/NoOpTimelineWriterImpl.java| 64 +++
 2 files changed, 136 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
new file mode 100644
index 000..042718b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Stub based implementation for TimelineReader. This implementation will
+ * not provide a complete implementation of all the necessary features. This
+ * implementation is provided solely for basic testing purposes.
+ */
+
+public class NoOpTimelineReaderImpl extends AbstractService
+implements TimelineReader {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(NoOpTimelineReaderImpl.class);
+
+  public NoOpTimelineReaderImpl() {
+super(NoOpTimelineReaderImpl.class.getName());
+LOG.info("NoOpTimelineReader is configured. Responses to all read "
++ "requests would be empty");
+  }
+
+  @Override public TimelineEntity getEntity(TimelineReaderContext context,
+  TimelineDataToRetrieve dataToRetrieve) throws IOException {
+LOG.debug("NoOpTimelineReader is configured. Response to all the read "
++ "requests would be empty");
+return new TimelineEntity();
+  }
+
+  @Override public Set getEntities(
+  TimelineReaderContext context, TimelineEntityFilters filters,
+  TimelineDataToRetrieve dataToRetrieve) throws IOException {
+LOG.debug("NoOpTimelineReader is configured. Response to all the read "
++ "requests would be empty");
+return new HashSet<>();
+  }
+
+  @Override public Set getEntityTypes(TimelineReaderContext context)
+  throws IOException {
+LOG.debug("NoOpTimelineReader is configured. Response to all the read "
++ "requests would be empty");
+return new HashSet<>();
+  }
+}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
new file mode 100644
index 000..247e0d9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/h

[hadoop] branch trunk updated: YARN-8218 Add application launch time to ATSV1. Contributed by Abhishek Modi

2019-03-06 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 491313a  YARN-8218 Add application launch time to ATSV1. Contributed 
by Abhishek Modi
491313a is described below

commit 491313ab84cc76683d0ef93a1ac17d8ecc8c430c
Author: Vrushali C 
AuthorDate: Wed Mar 6 21:47:29 2019 -0800

YARN-8218 Add application launch time to ATSV1. Contributed by Abhishek Modi
---
 .../ApplicationHistoryManagerOnTimelineStore.java   |  7 ++-
 .../resourcemanager/metrics/TimelineServiceV1Publisher.java | 13 +
 .../resourcemanager/metrics/TestSystemMetricsPublisher.java | 10 +-
 3 files changed, 28 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 62d8769..cc277ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -250,6 +250,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 String type = null;
 boolean unmanagedApplication = false;
 long createdTime = 0;
+long launchTime = 0;
 long submittedTime = 0;
 long finishedTime = 0;
 float progress = 0.0f;
@@ -379,6 +380,9 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
   createdTime = event.getTimestamp();
 } else if (event.getEventType().equals(
+ApplicationMetricsConstants.LAUNCHED_EVENT_TYPE)) {
+  launchTime = event.getTimestamp();
+} else if (event.getEventType().equals(
 ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
   // This type of events are parsed in time-stamp descending order
   // which means the previous event could override the information
@@ -454,7 +458,8 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 return new ApplicationReportExt(ApplicationReport.newInstance(
 ApplicationId.fromString(entity.getEntityId()),
 latestApplicationAttemptId, user, queue, name, null, -1, null, state,
-diagnosticsInfo, null, createdTime, submittedTime, 0, finishedTime,
+diagnosticsInfo, null, createdTime,
+submittedTime, launchTime, finishedTime,
 finalStatus, appResources, null, progress, type, null, appTags,
 unmanagedApplication, Priority.newInstance(applicationPriority),
 appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 7ad826f..5a5fa11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
@@ -122,6 +122,19 @@ public class TimelineServiceV1Publisher extends 
AbstractSystemMetricsPublisher {
   }
 
   @Override
+  public void appLaunched(RMApp app, long launchTime) {
+TimelineEntity entity = createApplicationEntity(app.getApplicationId());
+
+TimelineEvent tEvent = new TimelineEvent();
+tEvent.setEventType(ApplicationMetricsConstants.LAUNCHED_EVENT_TYPE);
+tEvent.setTimestamp(launchTime);
+entity.addEvent(tEvent);
+
+getDispatcher().getEventHandler().handle(new TimelineV1PublishEvent(
+SystemMetricsEventType.PUBLISH_ENTITY, entity, 
app.getApplicationId()));
+  }
+
+  @Override
   public void appFinished(RMApp app, RMAppState state, long finishedTime) {
 TimelineEntity entity = createApplicationEntity(app.getApplicationId());
 
diff --git 
a

[hadoop] branch trunk updated: YARN-3841 [atsv2 Storage implementation] Adding retry semantics to HDFS backing storage. Contributed by Abhishek Modi.

2019-02-27 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ea3cdc6  YARN-3841 [atsv2 Storage implementation] Adding retry 
semantics to HDFS backing storage. Contributed by Abhishek Modi.
ea3cdc6 is described below

commit ea3cdc60b39d96702c0bce292829914c25bc0d8e
Author: Vrushali C 
AuthorDate: Wed Feb 27 14:55:35 2019 -0800

YARN-3841 [atsv2 Storage implementation] Adding retry semantics to HDFS 
backing storage. Contributed by Abhishek Modi.
---
 .../storage/FileSystemTimelineWriterImpl.java  | 217 -
 .../storage/TestFileSystemTimelineWriterImpl.java  |  51 +++--
 2 files changed, 207 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index ac0902f..c284f8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -18,16 +18,16 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage;
 
-import java.io.BufferedWriter;
 import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineDomain;
@@ -35,14 +35,17 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError;
+import org.apache.hadoop.yarn.client.api.impl.FileSystemTimelineWriter;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * This implements a local file based backend for storing application timeline
+ * This implements a FileSystem based backend for storing application timeline
  * information. This implementation may not provide a complete implementation 
of
  * all the necessary features. This implementation is provided solely for basic
  * testing purposes, and should not be used in a non-test situation.
@@ -52,20 +55,36 @@ import com.google.common.annotations.VisibleForTesting;
 public class FileSystemTimelineWriterImpl extends AbstractService
 implements TimelineWriter {
 
-  private String outputRoot;
-
   /** Config param for timeline service storage tmp root for FILE YARN-3264. */
-  public static final String TIMELINE_SERVICE_STORAGE_DIR_ROOT
-  = YarnConfiguration.TIMELINE_SERVICE_PREFIX + "fs-writer.root-dir";
+  public static final String TIMELINE_SERVICE_STORAGE_DIR_ROOT =
+  YarnConfiguration.TIMELINE_SERVICE_PREFIX + "fs-writer.root-dir";
+
+  public static final String TIMELINE_FS_WRITER_NUM_RETRIES =
+  YarnConfiguration.TIMELINE_SERVICE_PREFIX + "fs-writer.num-retries";
+  public static final int DEFAULT_TIMELINE_FS_WRITER_NUM_RETRIES = 0;
+
+  public static final String TIMELINE_FS_WRITER_RETRY_INTERVAL_MS =
+   YarnConfiguration.TIMELINE_SERVICE_PREFIX +
+   "fs-writer.retry-interval-ms";
+  public static final long DEFAULT_TIMELINE_FS_WRITER_RETRY_INTERVAL_MS = 
1000L;
 
   public static final String ENTITIES_DIR = "entities";
 
   /** Default extension for output files. */
   public static final String TIMELINE_SERVICE_STORAGE_EXTENSION = ".thist";
 
+  private FileSystem fs;
+  private Path rootPath;
+  private int fsNumRetries;
+  private long fsRetryInterval;
+  p

[hadoop] branch trunk updated: YARN-5336 Limit the flow name size & consider cleanup for hex chars. Contributed by Sushil Ks

2019-02-27 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0ec962a  YARN-5336 Limit the flow name size & consider cleanup for hex 
chars. Contributed by Sushil Ks
0ec962a is described below

commit 0ec962ac8f0fa2a7a1811efaa0258c3e2564c79a
Author: Vrushali C 
AuthorDate: Wed Feb 27 14:43:39 2019 -0800

YARN-5336 Limit the flow name size & consider cleanup for hex chars. 
Contributed by Sushil Ks
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java | 13 ++
 .../hadoop/yarn/util/timeline/TimelineUtils.java   | 32 +
 .../src/main/resources/yarn-default.xml| 11 +
 .../yarn/util/timeline/TestShortenedFlowName.java  | 52 ++
 .../server/timelineservice/TimelineContext.java|  9 +++-
 5 files changed, 115 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4cd4cca..a6d1dc5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2782,6 +2782,19 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
   "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
 
+  /**
+   * This setting controls the max size of the flow name getting generated
+   * in ATSv2 after removing UUID if present.
+   * */
+  public static final String FLOW_NAME_MAX_SIZE =
+  TIMELINE_SERVICE_PREFIX + "flowname.max-size";
+
+  /**
+   * Default setting for flow name size has no size restriction
+   * after removing UUID if present.
+   */
+  public static final int FLOW_NAME_DEFAULT_MAX_SIZE = 0;
+
 /**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
index 800e8ca..63a9ba5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
@@ -24,6 +24,8 @@ import java.net.InetSocketAddress;
 import com.fasterxml.jackson.core.JsonGenerationException;
 import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
@@ -183,6 +185,36 @@ public class TimelineUtils {
   }
 
   /**
+   * Shortens the flow name for the configured size by removing UUID if 
present.
+   *
+   * @param flowName which has to be shortened
+   * @param conf to resize the flow name
+   * @return shortened flowName
+   */
+  public static String shortenFlowName(String flowName, Configuration conf) {
+if (flowName == null) {
+  return null;
+}
+// remove UUID inside flowname if present
+flowName = removeUUID(flowName);
+// resize flowname
+int length = conf.getInt(YarnConfiguration.FLOW_NAME_MAX_SIZE,
+YarnConfiguration.FLOW_NAME_DEFAULT_MAX_SIZE);
+if (length <= 0) {
+  return flowName;
+}
+return StringUtils.substring(flowName, 0, length);
+  }
+
+  @VisibleForTesting
+  static String removeUUID(String flowName) {
+flowName = StringUtils.replaceAll(flowName,
+"-?([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-" +
+"[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}){1}", "");
+return flowName;
+  }
+
+  /**
* Generate flow version tag.
*
* @param flowVersion flow version that keeps track of the changes made to 
the
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index db29fb9..1a5c35a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-c

[hadoop] branch trunk updated: YARN-8549 Adding a NoOp timeline writer and reader plugin classes for ATSv2. Contributed by Prabha Manepalli.

2019-02-01 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2c13513  YARN-8549 Adding a NoOp timeline writer and reader plugin 
classes for ATSv2. Contributed by Prabha Manepalli.
2c13513 is described below

commit 2c135130402255ce41e1ef958989e746f21ae1ab
Author: Vrushali C 
AuthorDate: Fri Feb 1 14:26:50 2019 -0800

YARN-8549 Adding a NoOp timeline writer and reader plugin classes for 
ATSv2. Contributed by Prabha Manepalli.
---
 .../storage/NoOpTimelineReaderImpl.java| 80 
 .../storage/NoOpTimelineWriterImpl.java| 88 ++
 2 files changed, 168 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
new file mode 100644
index 000..53bf058
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Stub based implementation for TimelineReader. This implementation will
+ * not provide a complete implementation of all the necessary features. This
+ * implementation is provided solely for basic testing purposes.
+ */
+
+public class NoOpTimelineReaderImpl extends AbstractService
+implements TimelineReader {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(NoOpTimelineReaderImpl.class);
+
+  public NoOpTimelineReaderImpl() {
+super(NoOpTimelineReaderImpl.class.getName());
+LOG.info("NoOpTimelineReader is configured. Response to all the read " +
+"requests would be empty");
+  }
+
+  @Override
+  public TimelineEntity getEntity(TimelineReaderContext context,
+   TimelineDataToRetrieve dataToRetrieve) throws IOException {
+if(LOG.isDebugEnabled()){
+  LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+  "requests would be empty");
+}
+return new TimelineEntity();
+  }
+
+  @Override
+  public Set getEntities(TimelineReaderContext context,
+  TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
+  throws IOException {
+if(LOG.isDebugEnabled()){
+  LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+  "requests would be empty");
+}
+return new HashSet<>();
+  }
+
+  @Override
+  public Set getEntityTypes(TimelineReaderContext context)
+  throws IOException {
+if(LOG.isDebugEnabled()){
+  LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+  "requests would be empty");
+}
+return new HashSet<>();
+  }
+}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/ha

[hadoop] branch trunk updated: YARN-9150 Making TimelineSchemaCreator support different backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks

2019-01-15 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 713ded6  YARN-9150 Making TimelineSchemaCreator support different 
backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
713ded6 is described below

commit 713ded6b15dc0b5e4205a7812a62225377e0b32b
Author: Vrushali C 
AuthorDate: Tue Jan 15 21:28:10 2019 -0800

YARN-9150 Making TimelineSchemaCreator support different backends for 
Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
---
 .../storage/HBaseTimelineSchemaCreator.java| 378 +
 .../timelineservice/storage/SchemaCreator.java |  28 ++
 .../storage/TimelineSchemaCreator.java |  80 +
 .../storage/DummyTimelineSchemaCreator.java|  29 ++
 .../storage/TestTimelineSchemaCreator.java |  41 +++
 5 files changed, 556 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java
new file mode 100644
index 000..b1593c5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java
@@ -0,0 +1,378 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This creates the schema for a hbase based backend for storing application
+ * timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class HBaseTimelineSchemaCreator implements SchemaCreator {
+  public HBaseTimelineSchemaCreator() {
+  }
+
+  final static String NAME = HBaseTimelineSchemaCreator.class.getSimpleName();
+  private static final Logger LOG =
+  LoggerFactory.getLogger(HBaseTimelineSchemaCreator.class);
+  private

[hadoop] branch trunk updated: YARN-9150 Making TimelineSchemaCreator support different backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks

2019-01-15 Thread vrushali
This is an automated email from the ASF dual-hosted git repository.

vrushali pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6be3923  YARN-9150 Making TimelineSchemaCreator support different 
backends for Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
6be3923 is described below

commit 6be39230a67098e7d157925575b3b18bbf947717
Author: Vrushali C 
AuthorDate: Tue Jan 15 21:25:37 2019 -0800

YARN-9150 Making TimelineSchemaCreator support different backends for 
Timeline Schema Creation in ATSv2. Contributed by Sushil Ks
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |   7 +
 .../storage/DataGeneratorForTest.java  |   2 +-
 .../storage/TimelineSchemaCreator.java | 378 -
 3 files changed, 8 insertions(+), 379 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c29707c..e1980c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2697,6 +2697,13 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.timelineservice.storage" +
   ".HBaseTimelineReaderImpl";
 
+  public static final String TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS =
+  TIMELINE_SERVICE_PREFIX + "schema-creator.class";
+
+  public static final String DEFAULT_TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice.storage" +
+  ".HBaseTimelineSchemaCreator";
+
   /**
* default schema prefix for hbase tables.
*/
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index cf6a854..476021a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -57,7 +57,7 @@ public final class DataGeneratorForTest {
 // the coprocessor class is loaded from classpath
 conf.set(YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION, " ");
 // now create all tables
-TimelineSchemaCreator.createAllTables(conf, false);
+HBaseTimelineSchemaCreator.createAllTables(conf, false);
   }
 
   public static void loadApps(HBaseTestingUtility util, long ts)
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
deleted file mode 100644
index af6f915..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ /dev/null
@@ -1,378 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ha

hadoop git commit: YARN-3879 [Storage implementation] Create HDFS backing storage implementation for ATS reads. Contributed by Abhishek Modi.

2018-10-11 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 91f18a514 -> 7ed627af6


YARN-3879 [Storage implementation] Create HDFS backing storage implementation 
for ATS reads. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ed627af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ed627af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ed627af

Branch: refs/heads/branch-2
Commit: 7ed627af6b3503e2b5446e582c83678218996d72
Parents: 91f18a5
Author: Vrushali C 
Authored: Thu Oct 11 21:23:34 2018 -0700
Committer: Vrushali C 
Committed: Thu Oct 11 21:23:34 2018 -0700

--
 .../storage/FileSystemTimelineReaderImpl.java   | 87 +++-
 1 file changed, 50 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ed627af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
index a0ee2be..6260c75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -38,6 +37,11 @@ import org.apache.commons.csv.CSVFormat;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
@@ -68,7 +72,9 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
   private static final Logger LOG =
   LoggerFactory.getLogger(FileSystemTimelineReaderImpl.class);
 
-  private String rootPath;
+  private FileSystem fs;
+  private Path rootPath;
+  private Path entitiesPath;
   private static final String ENTITIES_DIR = "entities";
 
   /** Default extension for output files. */
@@ -94,7 +100,7 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 
   @VisibleForTesting
   String getRootPath() {
-return rootPath;
+return rootPath.toString();
   }
 
   private static ObjectMapper mapper;
@@ -162,12 +168,12 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 if (clusterId == null || appId == null) {
   throw new IOException("Unable to get flow info");
 }
-String appFlowMappingFile = rootPath + File.separator +  ENTITIES_DIR +
-File.separator + clusterId + File.separator + APP_FLOW_MAPPING_FILE;
+Path clusterIdPath = new Path(entitiesPath, clusterId);
+Path appFlowMappingFilePath = new Path(clusterIdPath,
+APP_FLOW_MAPPING_FILE);
 try (BufferedReader reader =
  new BufferedReader(new InputStreamReader(
- new FileInputStream(
- appFlowMappingFile), Charset.forName("UTF-8")));
+ fs.open(appFlowMappingFilePath), Charset.forName("UTF-8")));
  CSVParser parser = new CSVParser(reader, csvFormat)) {
   for (CSVRecord record : parser.getRecords()) {
 if (record.size() < 4) {
@@ -266,7 +272,7 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 return entity;
   }
 
-  private Set getEntities(File dir, String entityType,
+  private Set getEntities(Path dir, String entityType,
   TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
   throws IOException {
 // First sort the selected entities based on created/

hadoop git commit: YARN-3879 [Storage implementation] Create HDFS backing storage implementation for ATS reads. Contributed by Abhishek Modi.

2018-10-11 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74db993a6 -> bca928d3c


YARN-3879 [Storage implementation] Create HDFS backing storage implementation 
for ATS reads. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bca928d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bca928d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bca928d3

Branch: refs/heads/trunk
Commit: bca928d3c7b88f39e9bc1784889596f0b00964d4
Parents: 74db993
Author: Vrushali C 
Authored: Thu Oct 11 21:13:52 2018 -0700
Committer: Vrushali C 
Committed: Thu Oct 11 21:14:06 2018 -0700

--
 .../storage/FileSystemTimelineReaderImpl.java   | 87 +++-
 1 file changed, 50 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca928d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
index adb8821..ef08a9d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -41,6 +40,11 @@ import org.apache.commons.csv.CSVFormat;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
@@ -68,7 +72,9 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
   private static final Logger LOG =
   LoggerFactory.getLogger(FileSystemTimelineReaderImpl.class);
 
-  private String rootPath;
+  private FileSystem fs;
+  private Path rootPath;
+  private Path entitiesPath;
   private static final String ENTITIES_DIR = "entities";
 
   /** Default extension for output files. */
@@ -94,7 +100,7 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 
   @VisibleForTesting
   String getRootPath() {
-return rootPath;
+return rootPath.toString();
   }
 
   private static ObjectMapper mapper;
@@ -162,12 +168,12 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 if (clusterId == null || appId == null) {
   throw new IOException("Unable to get flow info");
 }
-String appFlowMappingFile = rootPath + File.separator +  ENTITIES_DIR +
-File.separator + clusterId + File.separator + APP_FLOW_MAPPING_FILE;
+Path clusterIdPath = new Path(entitiesPath, clusterId);
+Path appFlowMappingFilePath = new Path(clusterIdPath,
+APP_FLOW_MAPPING_FILE);
 try (BufferedReader reader =
  new BufferedReader(new InputStreamReader(
- new FileInputStream(
- appFlowMappingFile), Charset.forName("UTF-8")));
+ fs.open(appFlowMappingFilePath), Charset.forName("UTF-8")));
  CSVParser parser = new CSVParser(reader, csvFormat)) {
   for (CSVRecord record : parser.getRecords()) {
 if (record.size() < 4) {
@@ -266,7 +272,7 @@ public class FileSystemTimelineReaderImpl extends 
AbstractService
 return entity;
   }
 
-  private Set getEntities(File dir, String entityType,
+  private Set getEntities(Path dir, String entityType,
   TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
   throws IOException {
 // First sort the selected entities based on created/

hadoop git commit: YARN-8834 Provide Java client for fetching Yarn specific entities from TimelineReader. Contributed by Abhishek Modi

2018-10-11 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb18cc5ea -> a3edfddcf


YARN-8834 Provide Java client for fetching Yarn specific entities from 
TimelineReader. Contributed by Abhishek Modi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3edfddc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3edfddc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3edfddc

Branch: refs/heads/trunk
Commit: a3edfddcf7822ea13bdf4858672eb82cea5e0b5f
Parents: fb18cc5
Author: Vrushali C 
Authored: Thu Oct 11 20:21:00 2018 -0700
Committer: Vrushali C 
Committed: Thu Oct 11 20:21:00 2018 -0700

--
 .../yarn/client/api/TimelineReaderClient.java   | 120 ++
 .../api/impl/TimelineReaderClientImpl.java  | 239 +++
 .../api/impl/TestTimelineReaderClientImpl.java  | 157 
 3 files changed, 516 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3edfddc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineReaderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineReaderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineReaderClient.java
new file mode 100644
index 000..f73c2d3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineReaderClient.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.client.api.impl.TimelineReaderClientImpl;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A client library that can be used to get Timeline Entities associated with
+ * application, application attempt or containers. This client library needs to
+ * be used along with time line v.2 server version.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class TimelineReaderClient extends CompositeService {
+
+  /**
+   * Create a new instance of Timeline Reader Client.
+   */
+  @InterfaceAudience.Public
+  public static TimelineReaderClient createTimelineReaderClient() {
+return new TimelineReaderClientImpl();
+  }
+
+  @InterfaceAudience.Private
+  public TimelineReaderClient(String name) {
+super(name);
+  }
+
+  /**
+   * Gets application entity.
+   * @param appId application id
+   * @param fields Fields to be fetched. Defaults to INFO.
+   * @param filters Filters to be applied while fetching entities.
+   * @return entity of the application
+   * @throws IOException
+   */
+  public abstract  TimelineEntity getApplicationEntity(
+  ApplicationId appId, String fields, Map filters)
+  throws IOException;
+
+  /**
+   * Gets application attempt entity.
+   * @param appAttemptId application attempt id
+   * @param fields Fields to be fetched. Defaults to INFO.
+   * @param filters Filters to be applied while fetching entities.
+   * @return entity associated with application attempt
+   * @throws IOException
+   */
+  public abstract  TimelineEntity getApplicationAttemptEntity(
+  ApplicationAttemptId appAttemptId, String fields,
+  Map filters) throws IOException;
+
+  /**
+   * Gets application attempt entities.
+   * @param appId application id
+   * @param fields Fields to be fetched. Defaults to INFO.
+   * @para

hadoop git commit: YARN-5742 Serve aggregated logs of historical apps from timeline service. Contributed by Rohith Sharma KS

2018-10-11 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk d91d47bc7 -> 8d1981806


YARN-5742 Serve aggregated logs of historical apps from timeline service. 
Contributed by Rohith Sharma KS


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d198180
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d198180
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d198180

Branch: refs/heads/trunk
Commit: 8d1981806feb8278966c02a9eff42d72541bb35e
Parents: d91d47b
Author: Vrushali C 
Authored: Thu Oct 11 16:26:07 2018 -0700
Committer: Vrushali C 
Committed: Thu Oct 11 16:26:07 2018 -0700

--
 .../webapp/AHSWebServices.java  | 250 ++---
 .../webapp/TestAHSWebServices.java  |   7 +-
 .../yarn/server/webapp/LogWebService.java   | 506 +++
 .../yarn/server/webapp/LogWebServiceUtils.java  | 258 ++
 .../server/webapp/YarnWebServiceParams.java |   1 +
 .../yarn/server/webapp/TestLogWebService.java   | 126 +
 .../reader/TimelineReaderServer.java|   4 +-
 7 files changed, 945 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d198180/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 9aa71a7..d94605f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -18,13 +18,7 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
@@ -35,15 +29,16 @@ import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.QueryParam;
-import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Context;
-import javax.ws.rs.core.GenericEntity;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -54,10 +49,8 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
-import org.apache.hadoop.yarn.logaggregation.ContainerLogMeta;
-import org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
-import org.apache.hadoop.yarn.logaggregation.ContainerLogAggregationType;
+import org.apache.hadoop.yarn.server.webapp.LogWebServiceUtils;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.YarnWebServiceParams;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
@@ -65,21 +58,14 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.ContainerLogsInfo;
 imp

hadoop git commit: YARN-6989 Ensure timeline service v2 codebase gets UGI from HttpServletRequest in a consistent way. Contributed by Abhishek Modi

2018-10-10 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc1bf7f89 -> 7a5d27dde


YARN-6989 Ensure timeline service v2 codebase gets UGI from HttpServletRequest 
in a consistent way. Contributed by Abhishek Modi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a5d27dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a5d27dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a5d27dd

Branch: refs/heads/branch-2
Commit: 7a5d27dde4abab8db35895b77d0ce93bfe99c8a1
Parents: cc1bf7f
Author: Vrushali C 
Authored: Wed Oct 10 15:25:58 2018 -0700
Committer: Vrushali C 
Committed: Wed Oct 10 15:25:58 2018 -0700

--
 .../reader/TimelineReaderWebServicesUtils.java  | 21 ++--
 ...elineReaderWhitelistAuthorizationFilter.java |  2 +-
 2 files changed, 3 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a5d27dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index ae19b21..02f510a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -269,25 +269,8 @@ public final class TimelineReaderWebServicesUtils {
* @return UGI.
*/
   public static UserGroupInformation getUser(HttpServletRequest req) {
-return getCallerUserGroupInformation(req, false);
-  }
-
-  /**
-   * Get UGI from the HTTP request.
-   *
-   * @param hsr HTTP request.
-   * @param usePrincipal if true, use principal name else use remote user name
-   * @return UGI.
-   */
-  public static UserGroupInformation getCallerUserGroupInformation(
-  HttpServletRequest hsr, boolean usePrincipal) {
-
-String remoteUser = hsr.getRemoteUser();
-if (usePrincipal) {
-  Principal princ = hsr.getUserPrincipal();
-  remoteUser = princ == null ? null : princ.getName();
-}
-
+Principal princ = req.getUserPrincipal();
+String remoteUser = princ == null ? null : princ.getName();
 UserGroupInformation callerUGI = null;
 if (remoteUser != null) {
   callerUGI = UserGroupInformation.createRemoteUser(remoteUser);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a5d27dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
index d3f63e5..4ed3f23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -67,7 +67,7 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
 
 if (isWhitelistReadAuthEnabled) {
   UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
-  .getCallerUserGroupInformation(httpRequest, true);
+  .getUser(httpRequest);
   if (callerUGI == null) {
 String msg = "Unable to obtain user name, user not authenticated";
 throw new AuthorizationException(msg);


-
To unsubscribe, e-mail: common-com

hadoop git commit: YARN-6989 Ensure timeline service v2 codebase gets UGI from HttpServletRequest in a consistent way. Contributed by Abhishek Modi

2018-10-10 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf3d591f0 -> 045069efe


YARN-6989 Ensure timeline service v2 codebase gets UGI from HttpServletRequest 
in a consistent way. Contributed by Abhishek Modi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/045069ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/045069ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/045069ef

Branch: refs/heads/trunk
Commit: 045069efeca07674be1571252bc4c685aa57b440
Parents: bf3d591
Author: Vrushali C 
Authored: Wed Oct 10 15:17:05 2018 -0700
Committer: Vrushali C 
Committed: Wed Oct 10 15:17:05 2018 -0700

--
 .../reader/TimelineReaderWebServicesUtils.java  | 21 ++--
 ...elineReaderWhitelistAuthorizationFilter.java |  2 +-
 2 files changed, 3 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/045069ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 63529a4..bea81c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -269,25 +269,8 @@ public final class TimelineReaderWebServicesUtils {
* @return UGI.
*/
   public static UserGroupInformation getUser(HttpServletRequest req) {
-return getCallerUserGroupInformation(req, false);
-  }
-
-  /**
-   * Get UGI from the HTTP request.
-   *
-   * @param hsr HTTP request.
-   * @param usePrincipal if true, use principal name else use remote user name
-   * @return UGI.
-   */
-  public static UserGroupInformation getCallerUserGroupInformation(
-  HttpServletRequest hsr, boolean usePrincipal) {
-
-String remoteUser = hsr.getRemoteUser();
-if (usePrincipal) {
-  Principal princ = hsr.getUserPrincipal();
-  remoteUser = princ == null ? null : princ.getName();
-}
-
+Principal princ = req.getUserPrincipal();
+String remoteUser = princ == null ? null : princ.getName();
 UserGroupInformation callerUGI = null;
 if (remoteUser != null) {
   callerUGI = UserGroupInformation.createRemoteUser(remoteUser);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/045069ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
index dbe391c..b549778 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -67,7 +67,7 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
 
 if (isWhitelistReadAuthEnabled) {
   UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
-  .getCallerUserGroupInformation(httpRequest, true);
+  .getUser(httpRequest);
   if (callerUGI == null) {
 String msg = "Unable to obtain user name, user not authenticated";
 throw new AuthorizationException(msg);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.a

hadoop git commit: YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. Contributed by Sushil Ks.

2018-09-27 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk b237a0dd4 -> 90e2e493b


YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. Contributed by 
Sushil Ks.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90e2e493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90e2e493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90e2e493

Branch: refs/heads/trunk
Commit: 90e2e493b3dc8be54f655b957b98a4bc0e003684
Parents: b237a0d
Author: Vrushali C 
Authored: Thu Sep 27 15:53:39 2018 -0700
Committer: Vrushali C 
Committed: Thu Sep 27 15:53:39 2018 -0700

--
 .../collector/TimelineCollectorWebService.java  |  16 +-
 .../PerNodeAggTimelineCollectorMetrics.java | 117 +
 .../metrics/TimelineReaderMetrics.java  | 113 +
 .../timelineservice/metrics/package-info.java   |  28 
 .../reader/TimelineReaderWebServices.java   | 166 +--
 .../TestPerNodeAggTimelineCollectorMetrics.java |  56 +++
 .../reader/TestTimelineReaderMetrics.java   |  56 +++
 7 files changed, 500 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90e2e493/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
index b33a0f0..5111da8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
@@ -54,6 +55,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.UserEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 
@@ -78,6 +80,8 @@ public class TimelineCollectorWebService {
   LoggerFactory.getLogger(TimelineCollectorWebService.class);
 
   private @Context ServletContext context;
+  private static final PerNodeAggTimelineCollectorMetrics METRICS =
+  PerNodeAggTimelineCollectorMetrics.getInstance();
 
   /**
* Gives information about timeline collector.
@@ -152,12 +156,15 @@ public class TimelineCollectorWebService {
   TimelineEntities entities) {
 init(res);
 UserGroupInformation callerUgi = getUser(req);
+boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
 if (callerUgi == null) {
   String msg = "The owner of the posted timeline entities is not set";
   LOG.error(msg);
   throw new ForbiddenException(msg);
 }
 
+long startTime = Time.monotonicNow();
+boolean succeeded = false;
 try {
   ApplicationId appID = parseApplicationId(appId);
   if (appID == null) {
@@ -172,7 +179,6 @@ public class TimelineCollectorWebService {
 throw new NotFoundException("Application: "+ appId + " is not found");
   }
 
-  boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
   if (isAsync) {
 collector.putEntitiesAsync(processTimelineEntities(entities, appId,
 Boolean.valueOf(isSubAppEntities)), callerUgi);
@@ -181,6 +187,7 @@ public class TimelineCollectorWe

hadoop git commit: HADOOP-15657 Registering MutableQuantiles via Metric annotation. Contributed by Sushil Ks

2018-09-05 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0468b6e73 -> f6e4b13d5


HADOOP-15657 Registering MutableQuantiles via Metric annotation. Contributed by 
Sushil Ks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e4b13d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e4b13d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e4b13d

Branch: refs/heads/branch-2
Commit: f6e4b13d5f1b254a009f21c064f48c818f241a0d
Parents: 0468b6e
Author: Vrushali C 
Authored: Wed Sep 5 21:09:16 2018 -0700
Committer: Vrushali C 
Committed: Wed Sep 5 21:09:16 2018 -0700

--
 .../main/java/org/apache/hadoop/metrics2/annotation/Metric.java | 5 +
 .../org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java   | 4 
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e4b13d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
index 88fb070..516a3b8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
@@ -67,4 +67,9 @@ public @interface Metric {
* @return optional type (counter|gauge) of the metric
*/
   Type type() default Type.DEFAULT;
+
+  /**
+   * @return optional roll over interval in secs for MutableQuantiles
+   */
+  int interval() default 10;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e4b13d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index 3209990..c415dd5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -83,6 +83,10 @@ public class MutableMetricsFactory {
   return registry.newMutableRollingAverages(info.name(),
   annotation.valueName());
 }
+if (cls == MutableQuantiles.class) {
+  return registry.newQuantiles(info.name(), annotation.about(),
+  annotation.sampleName(), annotation.valueName(), 
annotation.interval());
+}
 throw new MetricsException("Unsupported metric field "+ field.getName() +
" of type "+ field.getType().getName());
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15657 Registering MutableQuantiles via Metric annotation. Contributed by Sushil Ks

2018-09-05 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8286bf2d1 -> 962089ab5


HADOOP-15657 Registering MutableQuantiles via Metric annotation. Contributed by 
Sushil Ks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/962089ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/962089ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/962089ab

Branch: refs/heads/trunk
Commit: 962089ab5978291f609656fa1c8a685c683ed83f
Parents: 8286bf2
Author: Vrushali C 
Authored: Wed Sep 5 20:53:21 2018 -0700
Committer: Vrushali C 
Committed: Wed Sep 5 20:53:21 2018 -0700

--
 .../main/java/org/apache/hadoop/metrics2/annotation/Metric.java | 5 +
 .../org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java   | 4 
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/962089ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
index b8e6a8a..37fa760 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
@@ -68,4 +68,9 @@ public @interface Metric {
* @return optional type (counter|gauge) of the metric
*/
   Type type() default Type.DEFAULT;
+
+  /**
+   * @return optional roll over interval in secs for MutableQuantiles
+   */
+  int interval() default 10;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/962089ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index a3ca98d0..c7adaa5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -83,6 +83,10 @@ public class MutableMetricsFactory {
   return registry.newMutableRollingAverages(info.name(),
   annotation.valueName());
 }
+if (cls == MutableQuantiles.class) {
+  return registry.newQuantiles(info.name(), annotation.about(),
+  annotation.sampleName(), annotation.valueName(), 
annotation.interval());
+}
 throw new MetricsException("Unsupported metric field "+ field.getName() +
" of type "+ field.getType().getName());
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted users. Contributed by Rohith Sharma K S

2018-05-09 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b29479c93 -> ad4d4153d


YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted 
users.  Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad4d4153
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad4d4153
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad4d4153

Branch: refs/heads/branch-3.1
Commit: ad4d4153da27334bd27abff2280ebe47f0acecd3
Parents: b29479c
Author: Vrushali C 
Authored: Wed May 9 22:30:59 2018 -0700
Committer: Vrushali C 
Committed: Wed May 9 22:30:59 2018 -0700

--
 ...elineReaderWhitelistAuthorizationFilter.java | 14 ++---
 ...elineReaderWhitelistAuthorizationFilter.java | 58 +---
 2 files changed, 44 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad4d4153/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
index 8093fcf..dbe391c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -27,15 +27,13 @@ import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderWebServicesUtils;
@@ -64,9 +62,12 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
   @Override
   public void doFilter(ServletRequest request, ServletResponse response,
   FilterChain chain) throws IOException, ServletException {
+HttpServletRequest httpRequest = (HttpServletRequest) request;
+HttpServletResponse httpResponse = (HttpServletResponse) response;
+
 if (isWhitelistReadAuthEnabled) {
   UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
-  .getCallerUserGroupInformation((HttpServletRequest) request, true);
+  .getCallerUserGroupInformation(httpRequest, true);
   if (callerUGI == null) {
 String msg = "Unable to obtain user name, user not authenticated";
 throw new AuthorizationException(msg);
@@ -76,9 +77,8 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
 String userName = callerUGI.getShortUserName();
 String msg = "User " + userName
 + " is not allowed to read TimelineService V2 data.";
-Response.status(Status.FORBIDDEN).entity(msg).build();
-throw new ForbiddenException("user " + userName
-+ " is not allowed to read TimelineService V2 data");
+httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, msg);
+return;
   }
 }
 if (chain != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad4d4153/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java

hadoop git commit: YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted users. Contributed by Rohith Sharma K S

2018-05-09 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk cc0310a52 -> 3c95ca4f2


YARN-8247 Incorrect HTTP status code returned by ATSv2 for non-whitelisted 
users.  Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c95ca4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c95ca4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c95ca4f

Branch: refs/heads/trunk
Commit: 3c95ca4f21dcfcaabdd0694e7d005a45baba953f
Parents: cc0310a
Author: Vrushali C 
Authored: Wed May 9 22:17:48 2018 -0700
Committer: Vrushali C 
Committed: Wed May 9 22:17:48 2018 -0700

--
 ...elineReaderWhitelistAuthorizationFilter.java | 14 ++---
 ...elineReaderWhitelistAuthorizationFilter.java | 58 +---
 2 files changed, 44 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c95ca4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
index 8093fcf..dbe391c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -27,15 +27,13 @@ import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderWebServicesUtils;
@@ -64,9 +62,12 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
   @Override
   public void doFilter(ServletRequest request, ServletResponse response,
   FilterChain chain) throws IOException, ServletException {
+HttpServletRequest httpRequest = (HttpServletRequest) request;
+HttpServletResponse httpResponse = (HttpServletResponse) response;
+
 if (isWhitelistReadAuthEnabled) {
   UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
-  .getCallerUserGroupInformation((HttpServletRequest) request, true);
+  .getCallerUserGroupInformation(httpRequest, true);
   if (callerUGI == null) {
 String msg = "Unable to obtain user name, user not authenticated";
 throw new AuthorizationException(msg);
@@ -76,9 +77,8 @@ public class TimelineReaderWhitelistAuthorizationFilter 
implements Filter {
 String userName = callerUGI.getShortUserName();
 String msg = "User " + userName
 + " is not allowed to read TimelineService V2 data.";
-Response.status(Status.FORBIDDEN).entity(msg).build();
-throw new ForbiddenException("user " + userName
-+ " is not allowed to read TimelineService V2 data");
+httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, msg);
+return;
   }
 }
 if (chain != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c95ca4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reade

hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-04 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3087e8913 -> 345e7624d


YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/345e7624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/345e7624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/345e7624

Branch: refs/heads/trunk
Commit: 345e7624d58a058a1bad666bd1e5ce4b346a9056
Parents: 3087e89
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Vrushali C 
Committed: Wed Apr 4 15:08:03 2018 -0700

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index fd93d07..52c13f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 42f2cae..41755e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3797,6 +3797,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 44d6d

hadoop git commit: HADOOP-15166 CLI MiniCluster fails with ClassNotFoundException o.a.h.yarn.server.timelineservice.collector.TimelineCollectorManager. Contributed by Gera Shegalov

2018-01-19 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec8f47e7f -> c191538ed


HADOOP-15166 CLI MiniCluster fails with ClassNotFoundException 
o.a.h.yarn.server.timelineservice.collector.TimelineCollectorManager. 
Contributed by Gera Shegalov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c191538e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c191538e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c191538e

Branch: refs/heads/trunk
Commit: c191538ed18e12fff157e88a3203b23b20c10d83
Parents: ec8f47e
Author: Vrushali C 
Authored: Fri Jan 19 16:15:55 2018 -0800
Committer: Vrushali C 
Committed: Fri Jan 19 16:15:55 2018 -0800

--
 .../hadoop-common/src/site/markdown/CLIMiniCluster.md.vm   | 2 +-
 hadoop-mapreduce-project/bin/mapred| 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c191538e/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
index 806df0a..9aa9ad2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
@@ -41,7 +41,7 @@ Running the MiniCluster
 
 From inside the root directory of the extracted tarball, you can start the CLI 
MiniCluster using the following command:
 
-$ 
HADOOP_CLASSPATH=share/hadoop/yarn/test/hadoop-yarn-server-tests-${project.version}-tests.jar
 bin/hadoop jar 
./share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-${project.version}-tests.jar
 minicluster -rmport RM_PORT -jhsport JHS_PORT
+$ bin/mapred minicluster -rmport RM_PORT -jhsport JHS_PORT
 
 In the example command above, `RM_PORT` and `JHS_PORT` should be replaced by 
the user's choice of these port numbers. If not specified, random free ports 
will be used.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c191538e/hadoop-mapreduce-project/bin/mapred
--
diff --git a/hadoop-mapreduce-project/bin/mapred 
b/hadoop-mapreduce-project/bin/mapred
index 44f6216..9773ec8 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -36,6 +36,7 @@ function hadoop_usage
   hadoop_add_subcommand "sampler" client "sampler"
   hadoop_add_subcommand "frameworkuploader" admin "mapreduce framework upload"
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "minicluster" client "CLI MiniCluster"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
 
@@ -101,6 +102,11 @@ function mapredcmd_case
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;
+minicluster)
+  hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_DIR}/timelineservice"'/*'
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/test"'/*'
+  HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager
+;;
 *)
   HADOOP_CLASSNAME="${subcmd}"
   if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. Contributed by Rohith Sharma

2018-01-19 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a286acd7b -> 41e10161b


YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. 
Contributed by Rohith Sharma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41e10161
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41e10161
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41e10161

Branch: refs/heads/branch-2
Commit: 41e10161b6227b994079c603e9a0a350f0d055ea
Parents: a286acd
Author: Vrushali C 
Authored: Fri Jan 19 15:59:47 2018 -0800
Committer: Vrushali C 
Committed: Fri Jan 19 15:59:47 2018 -0800

--
 .../src/main/resources/yarn-default.xml |  2 +-
 .../common/HBaseTimelineStorageUtils.java   | 40 ++--
 2 files changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41e10161/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 7f2c36c..905f4da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2426,7 +2426,7 @@
   
 
   
- Optional URL to an hbase-site.xml configuration file to be
+ Optional FS path to an hbase-site.xml configuration file to 
be
 used to connect to the timeline-service hbase cluster. If empty or not
 specified, then the HBase configuration will be loaded from the classpath.
 When specified the values in the specified configuration file will override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41e10161/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index 0e5ff59..c1d5e8e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -18,13 +18,14 @@
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
 import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -269,28 +270,43 @@ public final class HBaseTimelineStorageUtils {
* @return a configuration with the HBase configuration from the classpath,
* optionally overwritten by the timeline service configuration URL 
if
* specified.
-   * @throws MalformedURLException if a timeline service HBase configuration 
URL
-   *   is specified but is a malformed URL.
+   * @throws IOException if a timeline service HBase configuration path
+   *   is specified but unable to read it.
*/
   public static Configuration getTimelineServiceHBaseConf(Configuration conf)
-  throws MalformedURLException {
+  throws IOException {
 if (conf == null) {
   throw new NullPointerException();
 }
 
 Configuration hbaseConf;
-String timelineServiceHBaseConfFileURL =
+String timelineServiceHBaseConfFilePath =
 conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
-if (timelineServiceHBaseConfFileURL != null
-&& timelineServiceHBaseConfFileURL.length() > 0) {
+if (timelineServiceHBaseConfFilePath != null
+&& timelineServiceHBaseConfFilePath.length() > 0) {
   LOG.info(&quo

hadoop git commit: YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. Contributed by Rohith Sharma

2018-01-19 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/trunk cce71dcee -> ec8f47e7f


YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. 
Contributed by Rohith Sharma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec8f47e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec8f47e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec8f47e7

Branch: refs/heads/trunk
Commit: ec8f47e7fadbe62c0c39390d0a46cefd50e98492
Parents: cce71dc
Author: Vrushali C 
Authored: Fri Jan 19 15:34:40 2018 -0800
Committer: Vrushali C 
Committed: Fri Jan 19 15:34:40 2018 -0800

--
 .../src/main/resources/yarn-default.xml |  2 +-
 .../common/HBaseTimelineStorageUtils.java   | 40 ++--
 2 files changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec8f47e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 017799a..b83673f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2455,7 +2455,7 @@
   
 
   
- Optional URL to an hbase-site.xml configuration file to be
+ Optional FS path to an hbase-site.xml configuration file to 
be
 used to connect to the timeline-service hbase cluster. If empty or not
 specified, then the HBase configuration will be loaded from the classpath.
 When specified the values in the specified configuration file will override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec8f47e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index c115b18..c25a0d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -18,13 +18,14 @@
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
 import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -269,28 +270,43 @@ public final class HBaseTimelineStorageUtils {
* @return a configuration with the HBase configuration from the classpath,
* optionally overwritten by the timeline service configuration URL 
if
* specified.
-   * @throws MalformedURLException if a timeline service HBase configuration 
URL
-   *   is specified but is a malformed URL.
+   * @throws IOException if a timeline service HBase configuration path
+   *   is specified but unable to read it.
*/
   public static Configuration getTimelineServiceHBaseConf(Configuration conf)
-  throws MalformedURLException {
+  throws IOException {
 if (conf == null) {
   throw new NullPointerException();
 }
 
 Configuration hbaseConf;
-String timelineServiceHBaseConfFileURL =
+String timelineServiceHBaseConfFilePath =
 conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
-if (timelineServiceHBaseConfFileURL != null
-&& timelineServiceHBaseConfFileURL.length() > 0) {
+if (timelineServiceHBaseConfFilePath != null
+&& timelineServiceHBaseConfFilePath.length() > 0) {
   LOG.info(&quo

[3/3] hadoop git commit: YARN-7378 Documentation changes post branch-2 merge (Vrushali C with Varun Saxena)

2017-10-30 Thread vrushali
YARN-7378 Documentation changes post branch-2 merge (Vrushali C with Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3454c02c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3454c02c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3454c02c

Branch: refs/heads/branch-2
Commit: 3454c02c96e8d5dc97b6c83ab05bc510feb647fe
Parents: 2afb8ba
Author: Vrushali C 
Authored: Mon Oct 30 14:09:29 2017 -0700
Committer: Vrushali C 
Committed: Mon Oct 30 14:09:29 2017 -0700

--
 .../src/site/markdown/TimelineServiceV2.md  | 1567 -
 .../src/site/markdown/TimelineServiceV2.md.vm   | 1588 ++
 2 files changed, 1588 insertions(+), 1567 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: YARN-7378 Documentation changes post branch-2 merge (Vrushali C with Varun Saxena)

2017-10-30 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2afb8ba14 -> 3454c02c9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3454c02c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
new file mode 100644
index 000..307224a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm
@@ -0,0 +1,1588 @@
+
+
+The YARN Timeline Service v.2
+
+
+
+
+## Overview
+
+### Introduction
+
+YARN Timeline Service v.2 is the next major iteration of Timeline Server, 
following v.1 and v.1.5.
+V.2 is created to address two major challenges of v.1.
+
+ Scalability
+V.1 is limited to a single instance of writer/reader and storage, and does not 
scale well beyond
+small clusters. V.2 uses a more scalable distributed writer architecture and a 
scalable backend
+storage.
+
+YARN Timeline Service v.2 separates the collection (writes) of data from 
serving (reads) of data.
+It uses distributed collectors, essentially one collector for each YARN 
application. The readers
+are separate instances that are dedicated to serving queries via REST API.
+
+YARN Timeline Service v.2 chooses Apache HBase as the primary backing storage, 
as Apache HBase
+scales well to a large size while maintaining good response times for reads 
and writes.
+
+ Usability improvements
+In many cases, users are interested in information at the level of "flows" or 
logical groups of
+YARN applications. It is much more common to launch a set or series of YARN 
applications to
+complete a logical application. Timeline Service v.2 supports the notion of 
flows explicitly. In
+addition, it supports aggregating metrics at the flow level.
+
+Also, information such as configuration and metrics is treated and supported 
as first-class
+citizens.
+
+The following diagrams illustrates the relationship between different YARN 
entities modelling flows.
+
+![Flow Hierarchy](./images/flow_hierarchy.png)
+
+###Architecture
+
+YARN Timeline Service v.2 uses a set of collectors (writers) to write data to 
the backend storage.
+The collectors are distributed and co-located with the application masters to 
which they are
+dedicated. All data that belong to that application are sent to the 
application level timeline
+collectors with the exception of the resource manager timeline collector.
+
+For a given application, the application master can write data for the 
application to the
+co-located timeline collectors (which is an NM auxiliary service in this 
release). In addition,
+node managers of other nodes that are running the containers for the 
application also write data
+to the timeline collector on the node that is running the application master.
+
+The resource manager also maintains its own timeline collector. It emits only 
YARN-generic
+lifecycle events to keep its volume of writes reasonable.
+
+The timeline readers are separate daemons separate from the timeline 
collectors, and they are
+dedicated to serving queries via REST API.
+
+The following diagram illustrates the design at a high level.
+
+![Timeline Service v.2  architecture](./images/timeline_v2.jpg)
+
+### Current Status and Future Plans
+
+YARN Timeline Service v.2 is currently in alpha ("alpha 2"). It is a work in 
progress, and
+many things can and will change rapidly.
+
+A complete end-to-end flow of writes and reads is functional, with Apache 
HBase as the backend.
+You should be able to start generating data. When enabled, all YARN-generic 
events are
+published as well as YARN system metrics such as CPU and memory. Furthermore, 
some applications
+including Distributed Shell and MapReduce can write per-framework data to YARN 
Timeline Service
+v.2.
+
+The basic mode of accessing data is via REST. Currently there is no support 
for command line
+access. The REST API comes with a good number of useful and flexible query 
patterns (see below for
+more information).
+
+The collectors (writers) are currently embedded in the node managers as 
auxiliary services. The
+resource manager also has its dedicated in-process collector. The reader is 
currently a single
+instance. Currently, it is not possible to write to Timeline Service outside 
the context of a YARN
+application (i.e. no off-cluster client).
+
+Starting from alpha2, Timeline Service v.2 supports simple authorization in 
terms of a
+configurable whitelist of users and groups who can read timeline data. Cluster 
admins are
+allowed by default to read timeline data.
+
+When YARN Timeline Service v.2 is disabled, one can expect no functional or 
performance impact
+on any other existing functionality.
+
+The work to make it truly production-read

[2/3] hadoop git commit: YARN-7378 Documentation changes post branch-2 merge (Vrushali C with Varun Saxena)

2017-10-30 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3454c02c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
deleted file mode 100644
index 6a0971a..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ /dev/null
@@ -1,1567 +0,0 @@
-
-
-The YARN Timeline Service v.2
-
-
-
-
-## Overview
-
-### Introduction
-
-YARN Timeline Service v.2 is the next major iteration of Timeline Server, 
following v.1 and v.1.5.
-V.2 is created to address two major challenges of v.1.
-
- Scalability
-V.1 is limited to a single instance of writer/reader and storage, and does not 
scale well beyond
-small clusters. V.2 uses a more scalable distributed writer architecture and a 
scalable backend
-storage.
-
-YARN Timeline Service v.2 separates the collection (writes) of data from 
serving (reads) of data.
-It uses distributed collectors, essentially one collector for each YARN 
application. The readers
-are separate instances that are dedicated to serving queries via REST API.
-
-YARN Timeline Service v.2 chooses Apache HBase as the primary backing storage, 
as Apache HBase
-scales well to a large size while maintaining good response times for reads 
and writes.
-
- Usability improvements
-In many cases, users are interested in information at the level of "flows" or 
logical groups of
-YARN applications. It is much more common to launch a set or series of YARN 
applications to
-complete a logical application. Timeline Service v.2 supports the notion of 
flows explicitly. In
-addition, it supports aggregating metrics at the flow level.
-
-Also, information such as configuration and metrics is treated and supported 
as first-class
-citizens.
-
-The following diagrams illustrates the relationship between different YARN 
entities modelling flows.
-
-![Flow Hierarchy](./images/flow_hierarchy.png)
-
-###Architecture
-
-YARN Timeline Service v.2 uses a set of collectors (writers) to write data to 
the backend storage.
-The collectors are distributed and co-located with the application masters to 
which they are
-dedicated. All data that belong to that application are sent to the 
application level timeline
-collectors with the exception of the resource manager timeline collector.
-
-For a given application, the application master can write data for the 
application to the
-co-located timeline collectors (which is an NM auxiliary service in this 
release). In addition,
-node managers of other nodes that are running the containers for the 
application also write data
-to the timeline collector on the node that is running the application master.
-
-The resource manager also maintains its own timeline collector. It emits only 
YARN-generic
-lifecycle events to keep its volume of writes reasonable.
-
-The timeline readers are separate daemons separate from the timeline 
collectors, and they are
-dedicated to serving queries via REST API.
-
-The following diagram illustrates the design at a high level.
-
-![Timeline Service v.2  architecture](./images/timeline_v2.jpg)
-
-### Current Status and Future Plans
-
-YARN Timeline Service v.2 is currently in alpha ("alpha 2"). It is a work in 
progress, and
-many things can and will change rapidly.
-
-A complete end-to-end flow of writes and reads is functional, with Apache 
HBase as the backend.
-You should be able to start generating data. When enabled, all YARN-generic 
events are
-published as well as YARN system metrics such as CPU and memory. Furthermore, 
some applications
-including Distributed Shell and MapReduce can write per-framework data to YARN 
Timeline Service
-v.2.
-
-The basic mode of accessing data is via REST. Currently there is no support 
for command line
-access. The REST API comes with a good number of useful and flexible query 
patterns (see below for
-more information).
-
-The collectors (writers) are currently embedded in the node managers as 
auxiliary services. The
-resource manager also has its dedicated in-process collector. The reader is 
currently a single
-instance. Currently, it is not possible to write to Timeline Service outside 
the context of a YARN
-application (i.e. no off-cluster client).
-
-Starting from alpha2, Timeline Service v.2 supports simple authorization in 
terms of a
-configurable whitelist of users and groups who can read timeline data. Cluster 
admins are
-allowed by default to read timeline data.
-
-When YARN Timeline Service v.2 is disabled, one can expect no functional or 
performance impact
-on any other existing functionality.
-
-The work to make it truly production-ready continues. Some key items include
-
-* More robust storage fault tolerance
-* Support for 

[29/54] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6989725/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
new file mode 100644
index 000..3a32be1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -0,0 +1,856 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A client proxy for Router -> NN communication using the NN ClientProtocol.
+ * 
+ * Provides routers to invoke remote ClientProtocol methods and handle
+ * retries/failover.
+ * 
+ * invokeSingle Make a single request to a single namespace
+ * invokeSequential Make a sequential series of requests to multiple
+ * ordered namespaces until a condition is met.
+ * invokeConcurrent Make concurrent requests to multiple namespaces and
+ * return all of the results.
+ * 
+ * Also maintains a cached pool of connections to NNs. Connections are managed
+ * by the ConnectionManager and are unique to each user + NN. The size of the
+ * connection pool can be configured. Larger pools allow for more simultaneous
+ * requests to a single NN from a single user.
+ */
+public class RouterRpcClient {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RouterRpcClient.class);
+
+
+  /** Router identifier. */
+  private final String routerId;
+
+  /** Interface to identify the active NN for a nameservice or blockpool ID. */
+  private final ActiveNamenodeResolver namenodeResolver;
+
+  /** Connection pool to the Namenodes per user for performance. */
+  private final ConnectionManager connectionManager;
+  /** Service to run asynchronous calls. */
+  private final ExecutorService executorService;
+  /** Retry policy for router -> NN communication. */
+  private final RetryPolicy retryPolicy;
+
+  /** Pattern to parse a stack trace line. */
+  private static final Pattern STACK_TRACE_PATTERN =
+  Pattern.compile("\\tat (.*)\\.(.*)\\((.*):(\\d*)\\)");
+
+
+  /**
+   * Create a router RPC clie

[51/54] [abbrv] hadoop git commit: YARN-4090. Make Collections.sort() more efficient by caching resource usage. (Contributed by Yufei Gu, Shilong Zhang and Xianyin Xin)

2017-10-20 Thread vrushali
YARN-4090. Make Collections.sort() more efficient by caching resource usage. 
(Contributed by Yufei Gu, Shilong Zhang and Xianyin Xin)

(cherry picked from commit 1f4cdf10681b6903207a63fb5c306c9665ed9464)
(cherry picked from commit 96106b8f5fe50e2d5c0c4df5dbddea4f89f278d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c4f28c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c4f28c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c4f28c9

Branch: refs/heads/branch-2
Commit: 4c4f28c9e7f76762346d990b043c6b77d3ac627a
Parents: c9f6a98
Author: Yufei Gu 
Authored: Fri Oct 20 01:32:20 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:33 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 19 ++
 .../scheduler/fair/FSLeafQueue.java | 19 +-
 .../scheduler/fair/FSParentQueue.java   | 14 ---
 .../resourcemanager/scheduler/fair/FSQueue.java | 34 +
 .../scheduler/fair/TestFairScheduler.java   | 39 +++-
 5 files changed, 93 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4f28c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 06ba4e3..006acea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -174,6 +175,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   rmContainer.getNodeLabelExpression(),
   getUser(), 1, containerResource);
   this.attemptResourceUsage.decUsed(containerResource);
+  getQueue().decUsedResource(containerResource);
 
   // Clear resource utilization metrics cache.
   lastMemoryAggregateAllocationUpdateTime = -1;
@@ -468,6 +470,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   List resourceRequestList = appSchedulingInfo.allocate(
   type, node, schedulerKey, container);
   this.attemptResourceUsage.incUsed(container.getResource());
+  getQueue().incUsedResource(container.getResource());
 
   // Update resource requests related to "request" and store in RMContainer
   ((RMContainerImpl) rmContainer).setResourceRequests(resourceRequestList);
@@ -651,6 +654,22 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 schedulerKey.getAllocationRequestId());
   }
 
+  @Override
+  public synchronized void recoverContainer(SchedulerNode node,
+  RMContainer rmContainer) {
+try {
+  writeLock.lock();
+
+  super.recoverContainer(node, rmContainer);
+
+  if (!rmContainer.getState().equals(RMContainerState.COMPLETED)) {
+getQueue().incUsedResource(rmContainer.getContainer().getResource());
+  }
+} finally {
+  writeLock.unlock();
+}
+  }
+
   /**
* Reserve a spot for {@code container} on this {@code node}. If
* the container is {@code alreadyReserved} on the node, simply

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4f28c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-ya

[20/54] [abbrv] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and 
Inigo Goiri.

(cherry picked from commit 6d94c90ece1c1d23d4c97e72c54e9991f5dbc481)
(cherry picked from commit 2c740a684a23663962119726bf0e7ecef173f6f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04e3f380
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04e3f380
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04e3f380

Branch: refs/heads/branch-2
Commit: 04e3f380674fd257e805c699c7fe55f20435ef11
Parents: 2ab2452
Author: Inigo 
Authored: Thu Apr 6 19:18:52 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:29 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 ++
 .../server/federation/store/RecordStore.java| 100 
 .../store/driver/StateStoreSerializer.java  | 119 +++
 .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++
 .../store/records/impl/pb/PBRecord.java |  47 
 .../store/records/impl/pb/package-info.java |  29 +
 .../src/main/resources/hdfs-default.xml |   8 ++
 7 files changed, 429 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04e3f380/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 912307f..b645347 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1018,6 +1019,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT 
=
   "org.apache.hadoop.hdfs.server.federation.MockResolver";
 
+  // HDFS Router-based federation State Store
+  public static final String FEDERATION_STORE_PREFIX =
+  FEDERATION_ROUTER_PREFIX + "store.";
+
+  public static final String FEDERATION_STORE_SERIALIZER_CLASS =
+  DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
+  public static final Class
+  FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
+  StateStoreSerializerPBImpl.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04e3f380/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
new file mode 100644
index 000..524f432
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.lang.reflect.Constructor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apach

[23/54] [abbrv] hadoop git commit: HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and 
Inigo Goiri.

(cherry picked from commit 0f88e049156dce173afc0dbda864e29190dd2210)
(cherry picked from commit 533b986633e0a9076cf3918fba3e3b591c6f65f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ab2452b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ab2452b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ab2452b

Branch: refs/heads/branch-2
Commit: 2ab2452b72e35a39cfc773455f519d7a737702b9
Parents: c51de70
Author: Inigo 
Authored: Wed Mar 29 19:35:06 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:29 2017 -0700

--
 .../store/StateStoreUnavailableException.java   |  33 
 .../federation/store/StateStoreUtils.java   |  72 +++
 .../store/driver/StateStoreDriver.java  | 172 +
 .../driver/StateStoreRecordOperations.java  | 164 
 .../store/driver/impl/StateStoreBaseImpl.java   |  69 +++
 .../store/driver/impl/package-info.java |  39 
 .../federation/store/driver/package-info.java   |  37 
 .../federation/store/protocol/package-info.java |  31 +++
 .../federation/store/records/BaseRecord.java| 189 +++
 .../federation/store/records/QueryResult.java   |  56 ++
 .../federation/store/records/package-info.java  |  36 
 11 files changed, 898 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ab2452b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
new file mode 100644
index 000..4e6f8c8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.io.IOException;
+
+/**
+ * Thrown when the state store is not reachable or available. Cached APIs and
+ * queries may succeed. Client should retry again later.
+ */
+public class StateStoreUnavailableException extends IOException {
+
+  private static final long serialVersionUID = 1L;
+
+  public StateStoreUnavailableException(String msg) {
+super(msg);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ab2452b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
new file mode 100644
index 000..8c681df
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,

[31/54] [abbrv] hadoop git commit: HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.

(cherry picked from commit 47db6e9d8e2c264671c89fdd6cb11a7c762d2cce)
(cherry picked from commit 0ec82b8cdfaaa5f23d1a0f7f7fb8c9187c5e309b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65593d58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65593d58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65593d58

Branch: refs/heads/branch-2
Commit: 65593d58c4c13b171f0bada878dd94e808152962
Parents: e698972
Author: Inigo Goiri 
Authored: Fri Jul 28 15:55:10 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:30 2017 -0700

--
 .../federation/router/RouterRpcServer.java  | 59 +---
 1 file changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65593d58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 4bae71e..eaaab39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -64,8 +64,9 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1736,13 +1739,6 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
-  public AddingECPolicyResponse[] addErasureCodingPolicies(
-  ErasureCodingPolicy[] policies) throws IOException {
-checkOperation(OperationCategory.WRITE, false);
-return null;
-  }
-
-  @Override // ClientProtocol
   public void unsetErasureCodingPolicy(String src) throws IOException {
 checkOperation(OperationCategory.WRITE, false);
   }
@@ -1808,6 +1804,53 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 return null;
   }
 
+  @Override
+  public AddECPolicyResponse[] addErasureCodingPolicies(
+  ErasureCodingPolicy[] arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+return null;
+  }
+
+  @Override
+  public void removeErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void disableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void enableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public HashMap getErasureCodingCodecs() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public BlocksStats getBlocksStats() throws IOException {
+checkOperation

[09/54] [abbrv] hadoop git commit: HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by Junping Du.

2017-10-20 Thread vrushali
HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by 
Junping Du.

(cherry picked from commit b016f08f67830ed3ca741bc6a10c3f5164781be5)
(cherry picked from commit 9433f9eb09af0ca61f01a2eb42ff3ffe31a94d5f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8c6ef01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8c6ef01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8c6ef01

Branch: refs/heads/branch-2
Commit: e8c6ef01d382dd49d7324b87fb020516ac432697
Parents: bc3ca4c
Author: Junping Du 
Authored: Wed Oct 18 15:06:30 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8c6ef01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 05928f3..533b6ca 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -214,7 +214,8 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @param pingInterval the ping interval
*/
-  static final void setPingInterval(Configuration conf, int pingInterval) {
+  public static final void setPingInterval(Configuration conf,
+  int pingInterval) {
 conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
   }
 
@@ -225,7 +226,7 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @return the ping interval
*/
-  static final int getPingInterval(Configuration conf) {
+  public static final int getPingInterval(Configuration conf) {
 return conf.getInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
 CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/54] [abbrv] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-11554. [Documentation] Router-based federation documentation. Contributed 
by Inigo Goiri.

(cherry picked from commit ee3260211d94aed223dd6f2386a166eb2c7d67af)
(cherry picked from commit 67d10087aff9d4ab2748aefc1b97522495c148f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/568a32e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/568a32e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/568a32e5

Branch: refs/heads/branch-2
Commit: 568a32e5fe9f6dde21ea3de0456bda7807051261
Parents: 89c8018
Author: Inigo Goiri 
Authored: Wed Aug 16 17:23:29 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 .../src/site/markdown/HDFSRouterFederation.md   | 170 +++
 .../site/resources/images/routerfederation.png  | Bin 0 -> 24961 bytes
 2 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/568a32e5/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
new file mode 100644
index 000..f094238
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -0,0 +1,170 @@
+
+
+HDFS Router-based Federation
+
+
+
+
+Introduction
+
+
+NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
+
+
+Architecture
+
+
+A natural extension to this partitioned federation is to add a layer of 
software responsible for federating the namespaces.
+This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and supports 
rebalancing of data across subclusters.
+To accomplish these goals, the federation layer directs block accesses to the 
proper subcluster, maintains the state of the namespaces, and provides 
mechanisms for data rebalancing.
+This layer must be scalable, highly available, and fault tolerant.
+
+This federation layer comprises multiple components.
+The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
+
+![Router-based Federation Sequence Diagram | 
width=800](./images/routerfederation.png)
+
+
+### Example flow
+The simplest configuration deploys a Router on each NameNode machine.
+The Router monitors the local NameNode and heartbeats the state to the State 
Store.
+When a regular DFS client contacts any of the Routers to access a file in the 
federated filesystem, the Router checks the Mount Table in the State Store 
(i.e., the local cache) to find out which subcluster contains the file.
+Then it checks the Membership table in the State Store (i.e., the local cache) 
for the NameNode responsible for the subcluster.
+After it has identified the correct NameNode, the Router proxies the request.
+The client accesses Datanodes directly.
+
+
+### Router
+There can be multiple Routers in the system with soft state.
+Each Router has two roles:
+
+* Federated interface: expose a single, global NameNode interface to the 
clients and forward the requests to the active NameNode in the correct 
subcluster
+* NameNode heartbeat: maintain the information about a NameNode in the State 
Store
+
+ Federated interface
+The Router receives a client request, checks the State Store for the correct 
subcluster, and forwards the request to the active NameNode of that subcluster.
+The reply from the NameNode then flows in the opposite direction.
+The Routers are stateless and can be behind a load balancer.
+For performance, the Router also caches remote mount table entries and the 
state of the subclusters.
+To make sure that changes have been propagated to all Routers, each Router 
heartbeats its state to the St

[47/54] [abbrv] hadoop git commit: HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

(cherry picked from commit 53e8d0d030525e4c7f3875e23807c6dbe778890f)
(cherry picked from commit 5d63a388d1c3ec8a658cb2fd9b34c240bddf15a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c954e6b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c954e6b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c954e6b7

Branch: refs/heads/branch-2
Commit: c954e6b7b2ba5e24f8c5cc432b99ab9d084819cc
Parents: 1772d45
Author: Inigo Goiri 
Authored: Fri Oct 6 17:31:53 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:34 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   | 11 +--
 .../server/federation/router/DFSRouter.java | 76 
 .../hdfs/server/federation/router/Router.java   | 39 --
 .../src/site/markdown/HDFSRouterFederation.md   | 12 ++--
 4 files changed, 88 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c954e6b7/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 53bdf70..a9a7852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto dfsrouter dfsrouteradmin debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,12 +179,12 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
-:router
-  set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
+:dfsrouter
+  set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
-:federation
+:dfsrouteradmin
   set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
@@ -229,7 +229,8 @@ goto :eof
   @echo   secondarynamenoderun the DFS secondary namenode
   @echo   namenode run the DFS namenode
   @echo   journalnode  run the DFS journalnode
-  @echo   router   run the DFS router
+  @echo   dfsrouterrun the DFS router
+  @echo   dfsrouteradmin   manage Router-based federation
   @echo   zkfc run the ZK Failover Controller daemon
   @echo   datanode run a DFS datanode
   @echo   dfsadmin run a DFS admin client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c954e6b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
new file mode 100644
index 000..a2ac258
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+import org.apache.hadoop.conf.Configuration

[48/54] [abbrv] hadoop git commit: HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12620. Backporting HDFS-10467 to branch-2. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b60c658b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b60c658b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b60c658b

Branch: refs/heads/branch-2
Commit: b60c658b048226fba95c6b62e1a1d541170e20f4
Parents: c954e6b
Author: Inigo Goiri 
Authored: Thu Oct 19 17:40:42 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:34 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   7 +
 .../federation/metrics/FederationMetrics.java   | 139 +--
 .../federation/metrics/NamenodeBeanMetrics.java |  61 
 .../federation/resolver/MountTableResolver.java |  16 ++-
 .../federation/router/ConnectionManager.java|   2 +-
 .../federation/router/ConnectionPool.java   |   2 +-
 .../federation/router/RouterRpcServer.java  | 115 +--
 .../hdfs/server/federation/MockResolver.java|   5 +-
 .../server/federation/router/TestRouterRpc.java |   2 +-
 9 files changed, 161 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60c658b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index fbfbaf2..0b96ec2 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -47,6 +47,8 @@ function print_usage(){
   echo "  datanode run a DFS datanode"
   echo "  debugrun a Debug Admin to execute HDFS debug 
commands"
   echo "  dfsadmin run a DFS admin client"
+  echo "  dfsrouterrun the DFS router"
+  echo "  dfsrouteradmin   manage Router-based federation"
   echo "  haadmin  run a DFS HA admin client"
   echo "  fsck run a DFS filesystem checking utility"
   echo "  balancer run a cluster balancing utility"
@@ -157,6 +159,11 @@ elif [ "$COMMAND" = "dfs" ] ; then
 elif [ "$COMMAND" = "dfsadmin" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsrouter" ] ; then
+  CLASS='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ROUTER_OPTS"
+elif [ "$COMMAND" = "dfsrouteradmin" ] ; then
+  CLASS='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
 elif [ "$COMMAND" = "haadmin" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
   CLASSPATH=${CLASSPATH}:${TOOL_PATH}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60c658b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index 7844a2e..685c585 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -31,6 +31,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
@@ -38,10 +39,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.function.ToIntFunction;
-import java.util.function.ToLongFunction;
-import java.util.stream.Collectors;
 
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
@@ -72,7 +69,7 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.util.ajax.JSON;
+import org.mortbay.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -263,12 +260,12 @@ public class FederationMetrics implements FederationMBean 
{
 
   @Override
   public long getTo

[35/54] [abbrv] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.

(cherry picked from commit 928f8dab52191e733984d37f47b69719ccf11313)
(cherry picked from commit d8c81073320320a019fb3868be4f06f46aebea43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f787d26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f787d26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f787d26

Branch: refs/heads/branch-2
Commit: 6f787d262c0a1174a88fac0f77a61a5212275c59
Parents: 366bf3c
Author: Inigo Goiri 
Authored: Tue Aug 1 14:40:27 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  38 ++
 .../resolver/NamenodeStatusReport.java  | 193 ++
 .../federation/router/FederationUtil.java   |  66 
 .../router/NamenodeHeartbeatService.java| 350 +++
 .../hdfs/server/federation/router/Router.java   | 112 ++
 .../src/main/resources/hdfs-default.xml |  32 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   8 +
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../server/federation/RouterConfigBuilder.java  |  22 ++
 .../server/federation/RouterDFSCluster.java |  43 +++
 .../router/TestNamenodeHeartbeat.java   | 168 +
 .../server/federation/router/TestRouter.java|   3 +
 13 files changed, 1057 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f787d26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2e16a50..6d06bf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1039,6 +1039,20 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "rpc.enable";
   public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
 
+  // HDFS Router heartbeat
+  public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
+  public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.interval";
+  public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
+  public static final String DFS_ROUTER_MONITOR_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.namenode";
+  public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable";
+  public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true;
+
   // HDFS Router NN client
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
   FEDERATION_ROUTER_PREFIX + "connection.pool-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f787d26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 7d1102f..d4fd5f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1239,6 +1239,44 @@ public class DFSUtil {
   }
 
   /**
+   * Map a logical namenode ID to its web address. Use the given nameservice if
+   * specified, or the configured one if none is given.
+   *
+   * @param conf Configuration
+   * @param nsId which nameservice nnId is a part of, optional
+   * @param nnId the namenode ID to get the service addr for
+   * @return the service addr, null if it could not be determined
+   */
+  public static String getNamenodeWebAddr(final Configuration conf, String 
nsId,
+  String nnId) {
+
+if (nsId == null) {
+  nsId = getOnlyNameServiceIdOrNull(conf);
+}
+
+String webAddrKey = DFSUtilClient.concatSuffixes(
+DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId);
+
+String webAddr =
+conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
+return webAddr;
+  }
+
+  

[05/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index f40f47d..f999dce 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -31,60 +31,77 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.ObjectMapper;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
+import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.sls.appmaster.AMSimulator;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.nodemanager.NMSimulator;
-import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
-import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
+import org.apache.hadoop.yarn.sls.resourcemanager.MockAMLauncher;
 import org.apache.hadoop.yarn.sls.scheduler.SLSCapacityScheduler;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
+import org.apache.hadoop.yarn.sls.scheduler.SLSFairScheduler;
+import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
+import org.apache.hadoop.yarn.sls.synthetic.SynthJob;
+import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
+import org.apache.hadoop.yarn.util.UTCClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
-public class SLSRunner {
+public class SLSRunner extends Configured implements Tool {
   // RM, Runner
   private ResourceManager rm;
   private static TaskRunner runner = new TaskRunner();
   private String[] inputTraces;
-  private Configuration conf;
   private Map queueAppNumMap;
-  
+
   // NM simulator
   private HashMap nmMap;
   private int nmMemoryMB, nmVCores;
   private String nodeFile;
-  
+
   // AM simulator
   private int AM_ID;
   private Map amMap;
@@ -99,49 +116,92 @@ public class SLSRunner {
   // other simulation information
   private int numNMs, numRacks, numAMs, numTasks;
   private long maxRuntime;
-  public final static Map simulateInfoMap =
+
+  private final static Map simulateInfoMap =
   new HashMap();
 
   // logger
   public final static Logger LOG = LoggerFactory.getLogger(SLSRunner.class);
 
-  // input traces, input-rumen or input-sls
-  private boolean isSLS;

[02/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
index e152696..154bcc9 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.sls.web;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.text.MessageFormat;
@@ -26,11 +25,12 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
+import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
@@ -38,12 +38,12 @@ import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.scheduler.FairSchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
+
 import org.mortbay.jetty.Handler;
 import org.mortbay.jetty.Request;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.handler.AbstractHandler;
 import org.mortbay.jetty.handler.ResourceHandler;
-
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Histogram;
@@ -84,12 +84,12 @@ public class SLSWebApp extends HttpServlet {
 // load templates
 ClassLoader cl = Thread.currentThread().getContextClassLoader();
 try {
-  simulateInfoTemplate = FileUtils.readFileToString(new File(
-  cl.getResource("simulate.info.html.template").getFile()));
-  simulateTemplate = FileUtils.readFileToString(new File(
-  cl.getResource("simulate.html.template").getFile()));
-  trackTemplate = FileUtils.readFileToString(new File(
-  cl.getResource("track.html.template").getFile()));
+  simulateInfoTemplate = IOUtils.toString(
+  cl.getResourceAsStream("html/simulate.info.html.template"));
+  simulateTemplate = IOUtils.toString(
+  cl.getResourceAsStream("html/simulate.html.template"));
+  trackTemplate = IOUtils.toString(
+  cl.getResourceAsStream("html/track.html.template"));
 } catch (IOException e) {
   e.printStackTrace();
 }
@@ -105,24 +105,23 @@ public class SLSWebApp extends HttpServlet {
 
   public SLSWebApp(SchedulerWrapper wrapper, int metricsAddressPort) {
 this.wrapper = wrapper;
-metrics = wrapper.getMetrics();
-handleOperTimecostHistogramMap =
-new HashMap();
-queueAllocatedMemoryCounterMap = new HashMap();
-queueAllocatedVCoresCounterMap = new HashMap();
+handleOperTimecostHistogramMap = new HashMap<>();
+queueAllocatedMemoryCounterMap = new HashMap<>();
+queueAllocatedVCoresCounterMap = new HashMap<>();
 schedulerMetrics = wrapper.getSchedulerMetrics();
+metrics = schedulerMetrics.getMetrics();
 port = metricsAddressPort;
   }
 
   public void start() throws Exception {
-// static files
 final ResourceHandler staticHandler = new ResourceHandler();
 staticHandler.setResourceBase("html");
 
 Handler handler = new AbstractHandler() {
   @Override
   public void handle(String target, HttpServletRequest request,
- HttpServletResponse response, int dispatch) {
+  HttpServletResponse response, int dispatch)
+  throws IOException, ServletException {
 try{
   // timeunit
   int timeunit = 1000;   // second, divide millionsecond / 1000
@@ -183,14 +182,14 @@ public class SLSWebApp extends HttpServlet {
 response.setStatus(HttpServletResponse.SC_OK);
 
 String simulateInfo;
-if (SLSRunner.simulateInfoMap.isEmpty()) {
+if (SLSRunner.getSimulateInfoMap().isEmpty()) {
   String empty = "" +
   "No information available";
   simulateInfo = MessageFormat.format(simulateInfoTemplate, empty);
 } else {
   StringBuilder info = new StringBuilder();
   for (Map.Entry entry :
-  SLSRunner.simulateInfoMap.entrySet()) {
+  SLSRunner.getSimulateInfoMap().entrySet()) {
 info.append("");
 info.append("").append(entry.getKey()).append("");
 info.append("").append(entry.getValue())
@@ -221,7 +220,7 @@ public class SLSWebApp extends HttpSer

[45/54] [abbrv] hadoop git commit: HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by 
Inigo Goiri.

(cherry picked from commit 928d1e87f9dbe64f89b858ccc1780723f3af58e7)
(cherry picked from commit 67785fe0063cfbfecab0e8deaae4a45d9c7c0073)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07b2da96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07b2da96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07b2da96

Branch: refs/heads/branch-2
Commit: 07b2da96a669f557cb5acd6a993200cf0f54d51c
Parents: be43786
Author: Inigo Goiri 
Authored: Fri Sep 15 16:02:12 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:33 2017 -0700

--
 .../router/NamenodeHeartbeatService.java| 47 
 .../server/federation/RouterDFSCluster.java | 23 +-
 2 files changed, 50 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b2da96/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index fe4f939..38f63e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -94,8 +94,9 @@ public class NamenodeHeartbeatService extends PeriodicService 
{
*/
   public NamenodeHeartbeatService(
   ActiveNamenodeResolver resolver, String nsId, String nnId) {
-super(NamenodeHeartbeatService.class.getSimpleName() + " " + nsId + " " +
-nnId);
+super(NamenodeHeartbeatService.class.getSimpleName() +
+(nsId == null ? "" : " " + nsId) +
+(nnId == null ? "" : " " + nnId));
 
 this.resolver = resolver;
 
@@ -109,28 +110,28 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 
 this.conf = configuration;
 
+String nnDesc = nameserviceId;
 if (this.namenodeId != null && !this.namenodeId.isEmpty()) {
   this.localTarget = new NNHAServiceTarget(
   conf, nameserviceId, namenodeId);
+  nnDesc += "-" + namenodeId;
 } else {
   this.localTarget = null;
 }
 
 // Get the RPC address for the clients to connect
 this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId);
-LOG.info("{}-{} RPC address: {}",
-nameserviceId, namenodeId, rpcAddress);
+LOG.info("{} RPC address: {}", nnDesc, rpcAddress);
 
 // Get the Service RPC address for monitoring
 this.serviceAddress =
 DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId);
 if (this.serviceAddress == null) {
-  LOG.error("Cannot locate RPC service address for NN {}-{}, " +
-  "using RPC address {}", nameserviceId, namenodeId, this.rpcAddress);
+  LOG.error("Cannot locate RPC service address for NN {}, " +
+  "using RPC address {}", nnDesc, this.rpcAddress);
   this.serviceAddress = this.rpcAddress;
 }
-LOG.info("{}-{} Service RPC address: {}",
-nameserviceId, namenodeId, serviceAddress);
+LOG.info("{} Service RPC address: {}", nnDesc, serviceAddress);
 
 // Get the Lifeline RPC address for faster monitoring
 this.lifelineAddress =
@@ -138,13 +139,12 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 if (this.lifelineAddress == null) {
   this.lifelineAddress = this.serviceAddress;
 }
-LOG.info("{}-{} Lifeline RPC address: {}",
-nameserviceId, namenodeId, lifelineAddress);
+LOG.info("{} Lifeline RPC address: {}", nnDesc, lifelineAddress);
 
 // Get the Web address for UI
 this.webAddress =
 DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId);
-LOG.info("{}-{} Web address: {}", nameserviceId, namenodeId, webAddress);
+LOG.info("{} Web address: {}", nnDesc, webAddress);
 
 this.setIntervalMs(conf.getLong(
 DFS_ROUTER_HEARTBEAT_INTERVAL_MS,
@@ -173,7 +173,7 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 String confKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 String ret = conf.get(confKey);
 
-if (nsId != null && nnId != null) {
+if (nsId != null || nnId != null) {
   // Ge

[53/54] [abbrv] hadoop git commit: YARN-7355. TestDistributedShell should be scheduler agnostic. (Contributed by Haibo)

2017-10-20 Thread vrushali
YARN-7355. TestDistributedShell should be scheduler agnostic. (Contributed by 
Haibo)

(cherry picked from commit 6b7c87c94592606966a4229313b3d0da48f16158)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d71d97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d71d97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d71d97

Branch: refs/heads/branch-2
Commit: 93d71d97d4eb29d6d95922efca3e10ddf89f8493
Parents: 378fa3a
Author: Yufei Gu 
Authored: Fri Oct 20 11:15:20 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:34 2017 -0700

--
 .../yarn/applications/distributedshell/TestDistributedShell.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d71d97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 47485ae..af7d21e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -149,7 +149,6 @@ public class TestDistributedShell {
 conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
 
 conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8");
-conf.set(YarnConfiguration.RM_SCHEDULER, 
CapacityScheduler.class.getName());
 conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
 conf.set("mapreduce.jobhistory.address",
 "0.0.0.0:" + ServerSocketUtil.getPort(10021, 10));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[52/54] [abbrv] hadoop git commit: YARN-7261. Add debug message for better download latency monitoring. (Yufei Gu)

2017-10-20 Thread vrushali
YARN-7261. Add debug message for better download latency monitoring. (Yufei Gu)

(cherry picked from commit 0799fde35e7f3b9e8a85284ac0b30f6bdcbffad1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/378fa3ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/378fa3ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/378fa3ac

Branch: refs/heads/branch-2
Commit: 378fa3ac7db79675db2f71c66ec7601eac5e204f
Parents: 4c4f28c
Author: Yufei Gu 
Authored: Fri Oct 20 09:59:07 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:34 2017 -0700

--
 .../org/apache/hadoop/yarn/util/FSDownload.java| 17 +++--
 .../localizer/ResourceLocalizationService.java | 11 ++-
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/378fa3ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index f34c16c..6e59574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -350,6 +350,11 @@ public class FSDownload implements Callable {
 } catch (URISyntaxException e) {
   throw new IOException("Invalid resource", e);
 }
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Starting to download " + sCopy);
+}
+
 createDir(destDirPath, cachePerms);
 final Path dst_work = new Path(destDirPath + "_tmp");
 createDir(dst_work, cachePerms);
@@ -364,6 +369,11 @@ public class FSDownload implements Callable {
   unpack(new File(dTmp.toUri()), new File(dFinal.toUri()));
   changePermissions(dFinal.getFileSystem(conf), dFinal);
   files.rename(dst_work, destDirPath, Rename.OVERWRITE);
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("File has been downloaded to " +
+new Path(destDirPath, sCopy.getName()));
+  }
 } catch (Exception e) {
   try {
 files.delete(destDirPath, true);
@@ -409,8 +419,11 @@ public class FSDownload implements Callable {
   // APPLICATION:
   perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
 }
-LOG.debug("Changing permissions for path " + path
-+ " to perm " + perm);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Changing permissions for path " + path + " to perm " + perm);
+}
+
 final FsPermission fPerm = perm;
 if (null == userUgi) {
   files.setPermission(path, perm);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/378fa3ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index c37f2e3..0e7861f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -859,7 +859,7 @@ public class ResourceLocalizationService extends 
CompositeService
   // TODO handle failures, cancellation, requests by other containers
   LocalizedResource rsrc = request.getResource();
   LocalResourceRequest key = rsrc.getRequest();
-  LOG.info("Downloading public rsrc:" + key);
+  LOG.info("Downloading public resource: " + key);
   /*
* Here multiple containers may request the same resource. So we need
* to start downloading only when
@@ -918,8 +918,17 @@ public class ResourceLocalizationService extends 
CompositeService
 + " Either queue i

[26/54] [abbrv] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10687. Federation Membership State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 95cae08849d23cf12c5d280c29f90908e6a90d40)
(cherry picked from commit 55da7fd7ebe2f3fa1c1c828dda727fddc75a1b81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/366bf3c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/366bf3c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/366bf3c5

Branch: refs/heads/branch-2
Commit: 366bf3c5dc7591c1295383e6d404ddfe7822f731
Parents: 65593d5
Author: Inigo Goiri 
Authored: Mon Jul 31 10:55:21 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:30 2017 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../resolver/MembershipNamenodeResolver.java| 290 
 .../federation/router/FederationUtil.java   |  42 +-
 .../federation/store/CachedRecordStore.java | 237 ++
 .../federation/store/MembershipStore.java   | 126 +
 .../federation/store/StateStoreCache.java   |  36 ++
 .../store/StateStoreCacheUpdateService.java |  67 +++
 .../federation/store/StateStoreService.java | 202 +++-
 .../store/impl/MembershipStoreImpl.java | 311 +
 .../federation/store/impl/package-info.java |  31 ++
 .../GetNamenodeRegistrationsRequest.java|  52 +++
 .../GetNamenodeRegistrationsResponse.java   |  55 +++
 .../store/protocol/GetNamespaceInfoRequest.java |  30 ++
 .../protocol/GetNamespaceInfoResponse.java  |  52 +++
 .../protocol/NamenodeHeartbeatRequest.java  |  52 +++
 .../protocol/NamenodeHeartbeatResponse.java |  49 ++
 .../UpdateNamenodeRegistrationRequest.java  |  72 +++
 .../UpdateNamenodeRegistrationResponse.java |  51 ++
 .../impl/pb/FederationProtocolPBTranslator.java | 145 ++
 .../GetNamenodeRegistrationsRequestPBImpl.java  |  87 
 .../GetNamenodeRegistrationsResponsePBImpl.java |  99 
 .../impl/pb/GetNamespaceInfoRequestPBImpl.java  |  60 +++
 .../impl/pb/GetNamespaceInfoResponsePBImpl.java |  95 
 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java |  93 
 .../pb/NamenodeHeartbeatResponsePBImpl.java |  71 +++
 ...UpdateNamenodeRegistrationRequestPBImpl.java |  95 
 ...pdateNamenodeRegistrationResponsePBImpl.java |  73 +++
 .../store/protocol/impl/pb/package-info.java|  29 ++
 .../store/records/MembershipState.java  | 329 +
 .../store/records/MembershipStats.java  | 126 +
 .../records/impl/pb/MembershipStatePBImpl.java  | 334 +
 .../records/impl/pb/MembershipStatsPBImpl.java  | 191 
 .../src/main/proto/FederationProtocol.proto | 107 +
 .../src/main/resources/hdfs-default.xml |  18 +-
 .../resolver/TestNamenodeResolver.java  | 284 
 .../store/FederationStateStoreTestUtils.java|  23 +-
 .../federation/store/TestStateStoreBase.java|  81 
 .../store/TestStateStoreMembershipState.java| 463 +++
 .../store/driver/TestStateStoreDriverBase.java  |  69 ++-
 .../store/records/TestMembershipState.java  | 129 ++
 42 files changed, 4745 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 53897ac..ce96062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -15,6 +15,9 @@

  
  
+   
+ 
+ 

  
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 87d1942..4f9b782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -348,6 +348,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   QJournalProtocol.proto
   editlog.proto
   fsimage.proto
+  FederationProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff 

[25/54] [abbrv] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
new file mode 100644
index 000..1f0d556
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for overriding an existing namenode registration in the state
+ * store.
+ */
+public abstract class UpdateNamenodeRegistrationResponse {
+
+  public static UpdateNamenodeRegistrationResponse newInstance() {
+return StateStoreSerializer.newRecord(
+UpdateNamenodeRegistrationResponse.class);
+  }
+
+  public static UpdateNamenodeRegistrationResponse newInstance(boolean status)
+  throws IOException {
+UpdateNamenodeRegistrationResponse response = newInstance();
+response.setResult(status);
+return response;
+  }
+
+  @Private
+  @Unstable
+  public abstract boolean getResult();
+
+  @Private
+  @Unstable
+  public abstract void setResult(boolean result);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
new file mode 100644
index 000..baad113
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+import org.apache.commons.codec.binary.Base64;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.Message;
+import com.google.protobuf.Message.Builder;
+import com.google.protobuf.MessageOrBuilder;
+
+/**
+ * Helper class for setting/getting data elements in an object backed by a
+ * protobuf implementation.
+ */
+public class FederationProtocolPBTranslator {
+
+  /** Optional proto byte stream used to create this object. */
+  private P proto;
+  /** The class of the proto handler for this trans

[14/54] [abbrv] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-10-20 Thread vrushali
YARN-6623. Add support to turn off launching privileged containers in the 
container-executor. (Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f476f4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f476f4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f476f4b

Branch: refs/heads/branch-2
Commit: 2f476f4b2c78768b4a1dab8e9ce7a5b20b52acf2
Parents: 3ed7a2c
Author: Wangda Tan 
Authored: Thu Oct 19 15:11:05 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:28 2017 -0700

--
 .../hadoop-yarn/conf/container-executor.cfg |   12 +
 .../src/CMakeLists.txt  |5 +-
 .../runtime/DockerLinuxContainerRuntime.java|   92 +-
 .../linux/runtime/docker/DockerClient.java  |   25 +-
 .../linux/runtime/docker/DockerCommand.java |   55 +-
 .../runtime/docker/DockerCommandExecutor.java   |3 +-
 .../runtime/docker/DockerInspectCommand.java|   13 +-
 .../linux/runtime/docker/DockerLoadCommand.java |2 +-
 .../linux/runtime/docker/DockerPullCommand.java |2 +-
 .../linux/runtime/docker/DockerRmCommand.java   |4 +-
 .../linux/runtime/docker/DockerRunCommand.java  |   69 +-
 .../linux/runtime/docker/DockerStopCommand.java |6 +-
 .../container-executor/impl/configuration.c |   17 +
 .../container-executor/impl/configuration.h |   19 +-
 .../impl/container-executor.c   |  326 +
 .../impl/container-executor.h   |9 -
 .../container-executor/impl/get_executable.c|3 -
 .../container-executor/impl/get_executable.h|   29 +
 .../main/native/container-executor/impl/main.c  |   32 +-
 .../impl/modules/common/module-configs.c|3 +-
 .../impl/modules/common/module-configs.h|1 +
 .../main/native/container-executor/impl/util.c  |   60 +-
 .../main/native/container-executor/impl/util.h  |   46 +-
 .../container-executor/impl/utils/docker-util.c |  998 
 .../container-executor/impl/utils/docker-util.h |  147 +++
 .../impl/utils/string-utils.c   |1 -
 .../docker-container-executor.cfg   |   13 +
 .../test/test-container-executor.c  |  203 +---
 .../native/container-executor/test/test_util.cc |   37 +-
 .../test/utils/test-string-utils.cc |   37 +-
 .../test/utils/test_docker_util.cc  | 1122 ++
 .../runtime/TestDockerContainerRuntime.java |  399 ---
 .../docker/TestDockerCommandExecutor.java   |   35 +-
 .../docker/TestDockerInspectCommand.java|   29 +-
 .../runtime/docker/TestDockerLoadCommand.java   |9 +-
 .../runtime/docker/TestDockerPullCommand.java   |8 +-
 .../runtime/docker/TestDockerRmCommand.java |8 +-
 .../runtime/docker/TestDockerRunCommand.java|   25 +-
 .../runtime/docker/TestDockerStopCommand.java   |   15 +-
 .../src/site/markdown/DockerContainers.md   |   33 +-
 40 files changed, 3192 insertions(+), 760 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index d68cee8..023654b 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -2,3 +2,15 @@ yarn.nodemanager.linux-container-executor.group=#configured 
value of yarn.nodema
 banned.users=#comma separated list of users who can not run applications
 min.user.id=1000#Prevent other super-users
 allowed.system.users=##comma separated list of system users who CAN run 
applications
+feature.tc.enabled=0
+
+# The configs below deal with settings for Docker
+#[docker]
+#  module.enabled=## enable/disable the module. set to "true" to enable, 
disabled by default
+#  docker.binary=/usr/bin/docker
+#  docker.allowed.capabilities=## comma seperated capabilities that can be 
granted, e.g 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+#  docker.allowed.devices=## comma seperated list of devices that can be 
mounted into a container
+#  docker.allowed.networks=## comma seperated networks that can be used. e.g 
bridge,host,none
+#  docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as 
read-only
+#  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to this list to run Hadoop jobs
+#  docker.privileged-containers.enabled=0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/h

[30/54] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo 
Goiri.

(cherry picked from commit 8a9cdebebf26841a0f1e99fb08135f4597f2eba2)
(cherry picked from commit ca4f209b49e3aad6a80306f7342c9b6b560a79a7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6989725
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6989725
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6989725

Branch: refs/heads/branch-2
Commit: e69897253daa4749153143935459543e8ecadb6e
Parents: 93687da
Author: Inigo Goiri 
Authored: Thu May 11 09:57:03 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:30 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   38 +
 .../resolver/FederationNamespaceInfo.java   |   46 +-
 .../federation/resolver/RemoteLocation.java |   46 +-
 .../federation/router/ConnectionContext.java|  104 +
 .../federation/router/ConnectionManager.java|  408 
 .../federation/router/ConnectionPool.java   |  314 +++
 .../federation/router/ConnectionPoolId.java |  117 ++
 .../router/RemoteLocationContext.java   |   38 +-
 .../server/federation/router/RemoteMethod.java  |  164 ++
 .../server/federation/router/RemoteParam.java   |   71 +
 .../hdfs/server/federation/router/Router.java   |   58 +-
 .../federation/router/RouterRpcClient.java  |  856 
 .../federation/router/RouterRpcServer.java  | 1867 +-
 .../src/main/resources/hdfs-default.xml |   95 +
 .../server/federation/FederationTestUtils.java  |   80 +-
 .../hdfs/server/federation/MockResolver.java|   90 +-
 .../server/federation/RouterConfigBuilder.java  |   20 +-
 .../server/federation/RouterDFSCluster.java |  535 +++--
 .../server/federation/router/TestRouter.java|   31 +-
 .../server/federation/router/TestRouterRpc.java |  869 
 .../router/TestRouterRpcMultiDestination.java   |  216 ++
 21 files changed, 5675 insertions(+), 388 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6989725/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1b66ead..5d6c467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1012,6 +1012,44 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router-based federation
   public static final String FEDERATION_ROUTER_PREFIX =
   "dfs.federation.router.";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
+  FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.count";
+  public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
+  public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.queue.size";
+  public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_READER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.count";
+  public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
+  public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.queue.size";
+  public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
+  public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ;
+  public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-address";
+  public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
+  public static final String DFS_ROUTER_RPC_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "rpc.enable";
+  public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
+
+  // HDFS Router NN client
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.pool-size";
+  public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
+  64;
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
+  FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
+  public static final lon

[54/54] [abbrv] hadoop git commit: YARN-3368 YARN-7169 New YARN UI backported from trunk. See branch YARN-3368_branch2 for commits

2017-10-20 Thread vrushali
YARN-3368 YARN-7169 New YARN UI backported from trunk. See branch 
YARN-3368_branch2 for commits


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76908728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76908728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76908728

Branch: refs/heads/branch-2
Commit: 769087288ad753ca58509ff3fe5efcc2d2b061aa
Parents: 20f5687 93d71d9
Author: vrushali 
Authored: Fri Oct 20 11:51:33 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:51:33 2017 -0700

--
 .gitignore  |   13 +
 BUILDING.txt|   91 +-
 LICENSE.txt |  645 ++-
 dev-support/bin/create-release  |2 +-
 dev-support/docker/Dockerfile   |   22 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |7 +
 hadoop-project/pom.xml  |6 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   12 +-
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |   30 +
 .../src/main/resources/yarn-default.xml |   14 +
 .../server/resourcemanager/ResourceManager.java |   91 +-
 .../src/site/markdown/YarnUI2.md|   55 +
 .../hadoop-yarn/hadoop-yarn-ui/README.md|   64 +
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  259 +
 .../hadoop-yarn-ui/public/crossdomain.xml   |   15 +
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc |4 +
 .../hadoop-yarn-ui/src/main/webapp/.ember-cli   |9 +
 .../hadoop-yarn-ui/src/main/webapp/.jshintrc|   35 +
 .../src/main/webapp/.watchmanconfig |3 +
 .../src/main/webapp/WEB-INF/web.xml |   25 +
 .../src/main/webapp/WEB-INF/wro.xml |9 +
 .../src/main/webapp/app/adapters/abstract.js|   49 +
 .../main/webapp/app/adapters/cluster-info.js|   30 +
 .../main/webapp/app/adapters/cluster-metric.js  |   30 +
 .../webapp/app/adapters/yarn-app-attempt.js |   40 +
 .../webapp/app/adapters/yarn-app-flowrun.js |   33 +
 .../webapp/app/adapters/yarn-app-timeline.js|   35 +
 .../src/main/webapp/app/adapters/yarn-app.js|   41 +
 .../webapp/app/adapters/yarn-container-log.js   |   80 +
 .../main/webapp/app/adapters/yarn-container.js  |   49 +
 .../src/main/webapp/app/adapters/yarn-entity.js |   35 +
 .../webapp/app/adapters/yarn-flow-activity.js   |   30 +
 .../webapp/app/adapters/yarn-flowrun-brief.js   |   32 +
 .../main/webapp/app/adapters/yarn-flowrun.js|   38 +
 .../main/webapp/app/adapters/yarn-node-app.js   |   53 +
 .../webapp/app/adapters/yarn-node-container.js  |   54 +
 .../src/main/webapp/app/adapters/yarn-node.js   |   33 +
 .../app/adapters/yarn-queue/capacity-queue.js   |   23 +
 .../app/adapters/yarn-queue/fair-queue.js   |   23 +
 .../app/adapters/yarn-queue/fifo-queue.js   |   23 +
 .../app/adapters/yarn-queue/yarn-queue.js   |   30 +
 .../main/webapp/app/adapters/yarn-rm-node.js|   36 +
 .../app/adapters/yarn-timeline-appattempt.js|   41 +
 .../app/adapters/yarn-timeline-container.js |   40 +
 .../hadoop-yarn-ui/src/main/webapp/app/app.js   |   38 +
 .../webapp/app/components/app-attempt-table.js  |   22 +
 .../src/main/webapp/app/components/app-table.js |   22 +
 .../webapp/app/components/app-timeout-bar.js|   60 +
 .../app/components/app-usage-donut-chart.js |   59 +
 .../src/main/webapp/app/components/bar-chart.js |  123 +
 .../app/components/base-chart-component.js  |  148 +
 .../app/components/base-usage-donut-chart.js|   42 +
 .../webapp/app/components/breadcrumb-bar.js |   31 +
 .../webapp/app/components/container-table.js|   22 +
 .../main/webapp/app/components/donut-chart.js   |  195 +
 .../webapp/app/components/em-table-html-cell.js |   23 +
 .../main/webapp/app/components/item-selector.js |   39 +
 .../main/webapp/app/components/nodes-heatmap.js |  306 ++
 ...er-app-memusage-by-nodes-stacked-barchart.js |   88 +
 ...app-ncontainers-by-nodes-stacked-barchart.js |   67 +
 .../app/components/queue-configuration-table.js |   22 +
 .../webapp/app/components/queue-navigator.js|   22 +
 .../app/components/queue-usage-donut-chart.js   |   67 +
 .../main/webapp/app/components/queue-view.js|  288 +
 .../webapp/app/components/simple-bar-chart.js   |  206 +
 .../main/webapp/app/components/simple-table.js  |   81 +
 .../webapp/app/components/stacked-barchart.js   |  201 +
 .../webapp/app/components/sunburst-chart.js |  261 +
 .../main/webapp/app/components/timeline-view.js |  480 ++
 .../main/webapp/app/components/tree-selector.js |  304 ++
 .../src/main/webapp/app/config.js   |   24 +
 .../src/main/webapp/app/constants.js|   24 +
 .../webapp/app/controllers/app-table-columns.js |  163 +
 .../main/webapp/app/controllers/application.js  |   55 +
 .../webapp/app/controllers/cluster

[42/54] [abbrv] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

(cherry picked from commit 3b19e77752afce87936f5c0d1e6d272fba798d7b)
(cherry picked from commit bc9e588a19c0aaf518de8dab719362be4a8d6a54)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/673f6856
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/673f6856
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/673f6856

Branch: refs/heads/branch-2
Commit: 673f6856897c452a63bae941e238de6ffb8c1b40
Parents: c778f9d
Author: Inigo Goiri 
Authored: Fri Sep 8 09:37:10 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:32 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 +
 .../federation/metrics/FederationMBean.java | 204 ++
 .../federation/metrics/FederationMetrics.java   | 673 +++
 .../federation/metrics/FederationRPCMBean.java  |  90 +++
 .../metrics/FederationRPCMetrics.java   | 239 +++
 .../FederationRPCPerformanceMonitor.java| 211 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 624 +
 .../federation/metrics/StateStoreMBean.java |  45 ++
 .../federation/metrics/StateStoreMetrics.java   | 144 
 .../server/federation/metrics/package-info.java |  27 +
 .../federation/router/ConnectionManager.java|  23 +
 .../federation/router/ConnectionPool.java   |  23 +
 .../hdfs/server/federation/router/Router.java   |  62 ++
 .../server/federation/router/RouterMetrics.java |  73 ++
 .../federation/router/RouterMetricsService.java | 108 +++
 .../federation/router/RouterRpcClient.java  |  39 +-
 .../federation/router/RouterRpcMonitor.java |  95 +++
 .../federation/router/RouterRpcServer.java  |  63 +-
 .../federation/store/CachedRecordStore.java |   8 +
 .../federation/store/StateStoreService.java |  42 +-
 .../store/driver/StateStoreDriver.java  |  17 +-
 .../driver/impl/StateStoreSerializableImpl.java |   6 +-
 .../driver/impl/StateStoreZooKeeperImpl.java|  26 +
 .../store/records/MembershipState.java  |   2 +-
 .../federation/store/records/MountTable.java|  23 +
 .../records/impl/pb/MembershipStatePBImpl.java  |   5 +-
 .../src/main/resources/hdfs-default.xml |  19 +-
 .../server/federation/FederationTestUtils.java  |  13 +
 .../server/federation/RouterConfigBuilder.java  |  13 +
 .../metrics/TestFederationMetrics.java  | 237 +++
 .../federation/metrics/TestMetricsBase.java | 150 +
 .../server/federation/router/TestRouter.java|  23 +-
 .../store/driver/TestStateStoreDriverBase.java  |  69 ++
 33 files changed, 3383 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/673f6856/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b161bc0..3606e7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -28,6 +28,8 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor;
+import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -1041,6 +1043,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "rpc.enable";
   public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
 
+  public static final String DFS_ROUTER_METRICS_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "metrics.enable";
+  public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_METRICS_CLASS =
+  FEDERATION_ROUTER_PREFIX + "metrics.class";
+  public static final Class
+  DFS_ROUTER_METRICS_CLASS_DEFAULT =
+  FederationRPCPerformanceMonitor.class;
+
   // HDFS Router heartbeat
   public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
 

[28/54] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6989725/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 24792bb..4bae71e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -17,16 +17,109 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AddBlockFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.inotify.EventBatchList;
+import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
+import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.had

[06/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a3929f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a3929f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a3929f2

Branch: refs/heads/branch-2
Commit: 6a3929f2b2c994eec3eae17509485c619cafd4ef
Parents: bc2326f
Author: Wangda Tan 
Authored: Tue Oct 17 19:45:09 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:26 2017 -0700

--
 .../hadoop/metrics2/source/JvmMetrics.java  |  10 +
 .../org/apache/hadoop/tools/rumen/TaskInfo.java |  29 +-
 hadoop-tools/hadoop-sls/pom.xml |  48 +-
 .../hadoop-sls/src/main/assemblies/sls.xml  |   4 +-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  |  31 +-
 .../src/main/data/2jobs2min-rumen-jh.json   | 705 +-
 .../src/main/html/js/thirdparty/jquery.js   |   2 +-
 .../hadoop/yarn/sls/ReservationClientUtil.java  |  78 ++
 .../hadoop/yarn/sls/RumenToSLSConverter.java|   8 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 932 --
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 259 +++--
 .../yarn/sls/appmaster/MRAMSimulator.java   | 230 ++---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  |  15 +
 .../yarn/sls/nodemanager/NMSimulator.java   |   1 -
 .../sls/resourcemanager/MockAMLauncher.java | 120 +++
 .../sls/scheduler/FairSchedulerMetrics.java | 328 +++
 .../sls/scheduler/ResourceSchedulerWrapper.java | 973 ---
 .../sls/scheduler/SLSCapacityScheduler.java | 680 +++--
 .../yarn/sls/scheduler/SLSFairScheduler.java| 346 +++
 .../yarn/sls/scheduler/SchedulerMetrics.java| 605 +++-
 .../yarn/sls/scheduler/SchedulerWrapper.java|  27 +-
 .../hadoop/yarn/sls/scheduler/TaskRunner.java   |   9 +-
 .../hadoop/yarn/sls/scheduler/Tracker.java  |  46 +
 .../hadoop/yarn/sls/synthetic/SynthJob.java | 306 ++
 .../yarn/sls/synthetic/SynthJobClass.java   | 180 
 .../sls/synthetic/SynthTraceJobProducer.java| 319 ++
 .../hadoop/yarn/sls/synthetic/SynthUtils.java   | 101 ++
 .../yarn/sls/synthetic/SynthWorkload.java   | 121 +++
 .../hadoop/yarn/sls/synthetic/package-info.java |  22 +
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |  61 +-
 .../apache/hadoop/yarn/sls/web/SLSWebApp.java   |  45 +-
 .../src/site/markdown/SchedulerLoadSimulator.md | 151 ++-
 .../hadoop/yarn/sls/BaseSLSRunnerTest.java  | 151 +++
 .../sls/TestReservationSystemInvariants.java|  77 ++
 .../apache/hadoop/yarn/sls/TestSLSRunner.java   |  95 +-
 .../hadoop/yarn/sls/TestSynthJobGeneration.java |  96 ++
 .../yarn/sls/appmaster/TestAMSimulator.java |  89 +-
 .../yarn/sls/nodemanager/TestNMSimulator.java   |  32 +-
 .../yarn/sls/scheduler/TestTaskRunner.java  |   2 +-
 .../hadoop/yarn/sls/utils/TestSLSUtils.java |  30 +
 .../hadoop/yarn/sls/web/TestSLSWebApp.java  |  28 +-
 .../src/test/resources/capacity-scheduler.xml   |  10 +
 .../src/test/resources/exit-invariants.txt  |   8 +
 .../src/test/resources/fair-scheduler.xml   |   8 +-
 .../hadoop-sls/src/test/resources/inputsls.json |  55 ++
 .../src/test/resources/log4j.properties |  19 +
 .../hadoop-sls/src/test/resources/nodes.json|  84 ++
 .../src/test/resources/ongoing-invariants.txt   |  54 +
 .../src/test/resources/sls-runner.xml   |   6 +-
 .../hadoop-sls/src/test/resources/syn.json  |  53 +
 .../hadoop-sls/src/test/resources/yarn-site.xml |  19 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   1 +
 .../invariants/InvariantViolationException.java |  35 +
 .../monitor/invariants/InvariantsChecker.java   |  96 ++
 .../invariants/MetricsInvariantChecker.java | 195 
 .../ReservationInvariantsChecker.java   |  63 ++
 .../monitor/invariants/package-info.java|  22 +
 .../invariants/TestMetricsInvariantChecker.java |  99 ++
 .../src/test/resources/invariants.txt   |  54 +
 59 files changed, 5172 insertions(+), 3101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index caba170..c6369cd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -27,6 +27,7

[41/54] [abbrv] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/673f6856/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
new file mode 100644
index 000..851538a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * This class is for maintaining the various Router activity statistics
+ * and publishing them through the metrics interfaces.
+ */
+@Metrics(name="RouterActivity", about="Router metrics", context="dfs")
+public class RouterMetrics {
+
+  private final MetricsRegistry registry = new MetricsRegistry("router");
+
+  @Metric("Duration in SafeMode at startup in msec")
+  private MutableGaugeInt safeModeTime;
+
+  private JvmMetrics jvmMetrics = null;
+
+  RouterMetrics(
+  String processName, String sessionId, final JvmMetrics jvmMetrics) {
+this.jvmMetrics = jvmMetrics;
+registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+  }
+
+  public static RouterMetrics create(Configuration conf) {
+String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+String processName = "Router";
+MetricsSystem ms = DefaultMetricsSystem.instance();
+JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
+
+return ms.register(new RouterMetrics(processName, sessionId, jm));
+  }
+
+  public JvmMetrics getJvmMetrics() {
+return jvmMetrics;
+  }
+
+  public void shutdown() {
+DefaultMetricsSystem.shutdown();
+  }
+
+  public void setSafeModeTime(long elapsed) {
+safeModeTime.set((int) elapsed);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/673f6856/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
new file mode 100644
index 000..f4debce
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */

[03/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
new file mode 100644
index 000..81f6648
--- /dev/null
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls.scheduler;
+
+import com.codahale.metrics.Timer;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.sls.SLSRunner;
+import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+@Private
+@Unstable
+public class SLSFairScheduler extends FairScheduler
+implements SchedulerWrapper, Configurable {
+  private SchedulerMetrics schedulerMetrics;
+  private boolean metricsON;
+  private Tracker tracker;
+
+  private Map preemptionContainerMap =
+  new ConcurrentHashMap<>();
+
+  public SchedulerMetrics getSchedulerMetrics() {
+return schedulerMetrics;
+  }
+
+  public Tracker getTracker() {
+return tracker;
+  }
+
+  public SLSFairScheduler() {
+tracker = new Tracker();
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+super.setConfig(conf);
+
+metricsON = conf.getBoolean(SLSConfiguration.METRICS_SWITCH, true);
+if (metricsON) {
+  try {
+schedulerMetrics = SchedulerMetrics.getInstance(conf,
+FairScheduler.class);
+schedulerMetrics.init(this, conf);
+  } catch (Exception e) {
+e.printStackTrace();
+  }
+}
+  }
+
+  @Override
+  public Allocation allocate(ApplicationAttemptId attemptId,
+  List resourceRequests, List containerIds,
+  List blacklistAdditions, List blacklistRemovals,
+  ContainerUpdates updateRequests) {
+if (metricsON) {
+  final Timer.Context context = 
schedulerMetrics.getSchedulerAllocateTimer()
+  .time();
+  Allocation allocation = null;
+  try {
+allocation = super.allocate(attemptId, resourceReque

[16/54] [abbrv] hadoop git commit: YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

2017-10-20 Thread vrushali
YARN-7294. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently with Fair Scheduler. (Contributed by Miklos Szegedi)

(cherry picked from commit cbd2b73ef81a7e275c5d4f842cac5b81ff2f8c84)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1000811b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1000811b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1000811b

Branch: refs/heads/branch-2
Commit: 1000811bcba6a31eab189c640c39f4b22a3561a5
Parents: 2f476f4
Author: Yufei Gu 
Authored: Thu Oct 19 16:39:25 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:28 2017 -0700

--
 .../yarn/server/resourcemanager/TestSignalContainer.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1000811b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 2688987..fac0b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.util.ArrayList;
 import java.util.List;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -50,6 +51,10 @@ public class TestSignalContainer {
 Logger rootLogger = LogManager.getRootLogger();
 rootLogger.setLevel(Level.DEBUG);
 MockRM rm = new MockRM();
+FairScheduler fs = null;
+if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
+  fs = (FairScheduler)rm.getResourceScheduler();
+}
 rm.start();
 
 MockNM nm1 = rm.registerNode("h1:1234", 5000);
@@ -78,6 +83,9 @@ public class TestSignalContainer {
   List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
   conts.addAll(allocation);
+  if (fs != null) {
+nm1.nodeHeartbeat(true);
+  }
 }
 Assert.assertEquals(request, conts.size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/54] [abbrv] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
new file mode 100644
index 000..c627ca8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -0,0 +1,1122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include "errno.h"
+
+extern "C" {
+#include "utils/docker-util.c"
+}
+
+namespace ContainerExecutor {
+
+  class TestDockerUtil : public ::testing::Test {
+  protected:
+virtual void SetUp() {
+  docker_command_file = "docker-command.cmd";
+  container_executor_cfg_file = "container-executor.cfg";
+  container_executor_cfg.size = 0;
+  container_executor_cfg.sections = NULL;
+}
+
+virtual void TearDown() {
+  remove(docker_command_file.c_str());
+  remove(container_executor_cfg_file.c_str());
+  delete_ce_file();
+}
+
+struct configuration container_executor_cfg;
+std::string docker_command_file;
+std::string container_executor_cfg_file;
+
+
+void write_file(const std::string fname, const std::string contents) {
+  std::ofstream command_file;
+  command_file.open(fname.c_str());
+  command_file << contents;
+  command_file.close();
+}
+
+int create_ce_file() {
+  int ret = 0;
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+ret = mkdir("../etc", 0755);
+if (ret == 0 || errno == EEXIST) {
+  ret = mkdir("../etc/hadoop", 0755);
+  if (ret == 0 || errno == EEXIST) {
+write_file("../etc/hadoop/container-executor.cfg", "");
+return 0;
+  } else {
+std::cerr << "Could not create ../etc/hadoop, " << strerror(errno) 
<< std::endl;
+  }
+} else {
+  std::cerr << "Could not create ../etc, " << strerror(errno) << 
std::endl;
+}
+  }
+  std::cerr << "Could not create " << fname << std::endl;
+  return 1;
+}
+
+void delete_ce_file() {
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+struct stat buffer;
+if (stat(fname, &buffer) == 0) {
+  remove("../etc/hadoop/container-executor.cfg");
+  rmdir("../etc/hadoop");
+  rmdir("../etc");
+}
+  }
+}
+
+void write_container_executor_cfg(const std::string contents) {
+  write_file(container_executor_cfg_file, contents);
+}
+
+void write_command_file(const std::string contents) {
+  write_file(docker_command_file, contents);
+}
+
+void run_docker_command_test(const std::vector > &file_cmd_vec,
+ const std::vector 
> &bad_file_cmd_vec,
+ int (*docker_func)(const char *, const struct 
configuration *, char *, const size_t)) {
+  char tmp[8192];
+  std::vector >::const_iterator itr;
+  for (itr = file_cmd_vec.begin(); itr != file_cmd_vec.end(); ++itr) {
+memset(tmp, 0, 8192);
+write_command_file(itr->first);
+int ret = (*docker_func)(docker_command_file.c_str(), 
&container_executor_cfg, tmp, 8192);
+ASSERT_EQ(0, ret) << "error message: " << 
get_docker_error_message(ret) << " for input " << itr->first;
+ASSERT_STREQ(itr->second.c_str(), tmp);
+  }
+
+  std::vector >::const_iterator itr2;
+  for (itr2 = bad_file_cmd_vec.begin(); itr2 != bad_file_cmd_vec.end(); 
++itr2) {
+memset(tmp, 0, 8192);
+write_command_file(itr2->first);
+ 

[50/54] [abbrv] hadoop git commit: HADOOP-14944. Add JvmMetrics to KMS.

2017-10-20 Thread vrushali
HADOOP-14944. Add JvmMetrics to KMS.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9f6a98b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9f6a98b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9f6a98b

Branch: refs/heads/branch-2
Commit: c9f6a98b826f6e2ff65a666d1331927c4ee485e0
Parents: b60c658
Author: Xiao Chen 
Authored: Thu Oct 19 22:36:39 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:34 2017 -0700

--
 .../hadoop/metrics2/source/JvmMetrics.java  | 16 +
 .../crypto/key/kms/server/KMSConfiguration.java |  9 +
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 23 
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 38 +++-
 4 files changed, 85 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9f6a98b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index c6369cd..e3f8754 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -58,6 +58,11 @@ public class JvmMetrics implements MetricsSource {
   }
   return impl;
 }
+
+synchronized void shutdown() {
+  DefaultMetricsSystem.instance().unregisterSource(JvmMetrics.name());
+  impl = null;
+}
   }
 
   @VisibleForTesting
@@ -81,6 +86,7 @@ public class JvmMetrics implements MetricsSource {
   final ConcurrentHashMap gcInfoCache =
   new ConcurrentHashMap();
 
+  @VisibleForTesting
   JvmMetrics(String processName, String sessionId) {
 this.processName = processName;
 this.sessionId = sessionId;
@@ -104,6 +110,16 @@ public class JvmMetrics implements MetricsSource {
 return Singleton.INSTANCE.init(processName, sessionId);
   }
 
+  /**
+   * Shutdown the JvmMetrics singleton. This is not necessary if the JVM itself
+   * is shutdown, but may be necessary for scenarios where JvmMetrics instance
+   * needs to be re-created while the JVM is still around. One such scenario
+   * is unit-testing.
+   */
+  public static void shutdownSingleton() {
+Singleton.INSTANCE.shutdown();
+  }
+
   @Override
   public void getMetrics(MetricsCollector collector, boolean all) {
 MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9f6a98b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index d825b2b..cb89561 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -63,6 +63,15 @@ public class KMSConfiguration {
   public static final String KMS_AUDIT_AGGREGATION_WINDOW = CONFIG_PREFIX +
   "audit.aggregation.window.ms";
 
+  // Process name shown in metrics
+  public static final String METRICS_PROCESS_NAME_KEY =
+  CONFIG_PREFIX + "metrics.process.name";
+  public static final String METRICS_PROCESS_NAME_DEFAULT = "KMS";
+
+  // Session id for metrics
+  public static final String METRICS_SESSION_ID_KEY =
+  CONFIG_PREFIX + "metrics.session.id";
+
   // KMS Audit logger classes to use
   public static final String KMS_AUDIT_LOGGER_KEY = CONFIG_PREFIX +
   "audit.logger";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9f6a98b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 5772036..80cb627 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/k

[24/54] [abbrv] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/366bf3c5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
new file mode 100644
index 000..2d74505
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.resolver;
+
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.ROUTERS;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.newStateStore;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import 
org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
+import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link ActiveNamenodeResolver} functionality.
+ */
+public class TestNamenodeResolver {
+
+  private static StateStoreService stateStore;
+  private static ActiveNamenodeResolver namenodeResolver;
+
+  @BeforeClass
+  public static void create() throws Exception {
+
+Configuration conf = getStateStoreConfiguration();
+
+// Reduce expirations to 5 seconds
+conf.setLong(
+DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS,
+TimeUnit.SECONDS.toMillis(5));
+
+stateStore = newStateStore(conf);
+assertNotNull(stateStore);
+
+namenodeResolver = new MembershipNamenodeResolver(conf, stateStore);
+namenodeResolver.setRouterId(ROUTERS[0]);
+  }
+
+  @AfterClass
+  public static void destroy() throws Exception {
+stateStore.stop();
+stateStore.close();
+  }
+
+  @Before
+  public void setup() throws IOException, InterruptedException {
+// Wait for state store to connect
+stateStore.loadDriver();
+waitStateStore(stateStore, 1);
+
+// Clear NN registrations
+boolean cleared = clearRecords(stateStore, MembershipState.class);
+assertTrue(cleared);
+  }
+
+  @Test
+  public void testStateStoreDisconnected() throws Exception {
+
+// Add an entry to the store
+NamenodeStatusReport report = createNamenodeReport(
+NAMESERVICES[0], NAMENODES[0], HAServiceState.ACTIVE);
+assertTrue(namenodeResolver.registerNamenode(report));
+
+// Close the data store driver
+stateStore.closeDriver();
+assertFalse(stateStore.isDriverReady());
+
+// Flush the caches
+stateStore.refreshCaches(true);
+
+// Verify commands 

[17/54] [abbrv] hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-20 Thread vrushali
YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)

(cherry picked from commit 7b4b0187806601e33f5a88d48991e7c12ee4419f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f462461d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f462461d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f462461d

Branch: refs/heads/branch-2
Commit: f462461d4eeb0dc2f10e075d1a1a815537d4e471
Parents: 1000811
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:28 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f462461d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 009eb2c..8a5c730 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -301,8 +301,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/54] [abbrv] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
index 743a07a..6f959ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -118,8 +118,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("rm " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=rm", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -134,8 +136,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("stop " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=stop", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -151,9 +155,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("inspect --format='{{.State.Status}}' " + MOCK_CONTAINER_ID,
-dockerCommands.get(0));
+assertEquals(4, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=inspect", dockerCommands.get(1));
+assertEquals("  format={{.State.Status}}", dockerCommands.get(2));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(3));
+
   }
 
   @Test
@@ -169,8 +176,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("pull " + MOCK_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=pull", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_IMAGE_NAME, dockerCommands.get(2));
   }
 
   @Test
@@ -186,8 +195,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("load --i=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=load", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(2));
+
+
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerInspectCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerInspectCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-ya

[21/54] [abbrv] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c51de708/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
new file mode 100644
index 000..ee6f57d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator;
+import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.Time;
+
+/**
+ * In-memory cache/mock of a namenode and file resolver. Stores the most
+ * recently updated NN information for each nameservice and block pool. Also
+ * stores a virtual mount table for resolving global namespace paths to local 
NN
+ * paths.
+ */
+public class MockResolver
+implements ActiveNamenodeResolver, FileSubclusterResolver {
+
+  private Map> resolver =
+  new HashMap>();
+  private Map> locations =
+  new HashMap>();
+  private Set namespaces =
+  new HashSet();
+  private String defaultNamespace = null;
+
+  public MockResolver(Configuration conf, StateStoreService store) {
+this.cleanRegistrations();
+  }
+
+  public void addLocation(String mount, String nameservice, String location) {
+RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
+List locationsList = locations.get(mount);
+if (locationsList == null) {
+  locationsList = new LinkedList();
+  locations.put(mount, locationsList);
+}
+if (!locationsList.contains(remoteLocation)) {
+  locationsList.add(remoteLocation);
+}
+
+if (this.defaultNamespace == null) {
+  this.defaultNamespace = nameservice;
+}
+  }
+
+  public synchronized void cleanRegistrations() {
+this.resolver =
+new HashMap>();
+this.namespaces = new HashSet();
+  }
+
+  @Override
+  public void updateActiveNamenode(
+  String ns, InetSocketAddress successfulAddress) {
+
+String address = successfulAddress.getHostName() + ":" +
+successfulAddress.getPort();
+String key = ns;
+if (key != null) {
+  // Update the active entry
+  @SuppressWarnings("unchecked")
+  List iterator =
+  (List) resolver.get(key);
+  for (FederationNamenodeContext namenode : iterator) {
+if (namenode.getRpcAddress().equals(address)) {
+  MockNamenodeContext nn = (MockNamenodeContext) namenode;
+  nn.setState(FederationNamenodeServiceState.ACTIVE);
+  break;
+}
+  }
+  Collections.sort(iterator, new NamenodePriorityComparator());
+}
+  }
+
+  @Override
+  public List
+  getNamenodesForNameserviceId(String nameserviceId) {
+return resolver.get(nameserviceId);
+  }
+
+  @Override
+  public List getNamenodesForBlockPoolId(
+  

[37/54] [abbrv] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10880. Federation Mount Table State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 58b97df661441150d35abd44b3a8606206b46441)
(cherry picked from commit 6f0de2731806628b5b01bd1350225692147590da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c77a04b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c77a04b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c77a04b1

Branch: refs/heads/branch-2
Commit: c77a04b10f9094dab7518c4264701cae9fe02d13
Parents: 6f787d2
Author: Inigo Goiri 
Authored: Fri Aug 4 18:00:12 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   7 +-
 .../federation/resolver/MountTableManager.java  |  80 +++
 .../federation/resolver/MountTableResolver.java | 544 +++
 .../federation/resolver/PathLocation.java   | 124 -
 .../resolver/order/DestinationOrder.java|  29 +
 .../federation/resolver/order/package-info.java |  29 +
 .../federation/router/FederationUtil.java   |  56 +-
 .../hdfs/server/federation/router/Router.java   |   3 +-
 .../federation/store/MountTableStore.java   |  49 ++
 .../federation/store/StateStoreService.java |   2 +
 .../store/impl/MountTableStoreImpl.java | 116 
 .../protocol/AddMountTableEntryRequest.java |  47 ++
 .../protocol/AddMountTableEntryResponse.java|  42 ++
 .../protocol/GetMountTableEntriesRequest.java   |  49 ++
 .../protocol/GetMountTableEntriesResponse.java  |  53 ++
 .../protocol/RemoveMountTableEntryRequest.java  |  49 ++
 .../protocol/RemoveMountTableEntryResponse.java |  42 ++
 .../protocol/UpdateMountTableEntryRequest.java  |  51 ++
 .../protocol/UpdateMountTableEntryResponse.java |  43 ++
 .../pb/AddMountTableEntryRequestPBImpl.java |  84 +++
 .../pb/AddMountTableEntryResponsePBImpl.java|  76 +++
 .../pb/GetMountTableEntriesRequestPBImpl.java   |  76 +++
 .../pb/GetMountTableEntriesResponsePBImpl.java  | 104 
 .../pb/RemoveMountTableEntryRequestPBImpl.java  |  76 +++
 .../pb/RemoveMountTableEntryResponsePBImpl.java |  76 +++
 .../pb/UpdateMountTableEntryRequestPBImpl.java  |  96 
 .../pb/UpdateMountTableEntryResponsePBImpl.java |  76 +++
 .../federation/store/records/MountTable.java| 301 ++
 .../store/records/impl/pb/MountTablePBImpl.java | 213 
 .../src/main/proto/FederationProtocol.proto |  61 ++-
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../resolver/TestMountTableResolver.java| 396 ++
 .../store/FederationStateStoreTestUtils.java|  16 +
 .../store/TestStateStoreMountTable.java | 250 +
 .../store/driver/TestStateStoreDriverBase.java  |  12 +
 .../store/records/TestMountTable.java   | 176 ++
 36 files changed, 3437 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77a04b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6d06bf2..f3bc592 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -1070,8 +1072,9 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
   FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";
-  public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
-  "org.apache.hadoop.hdfs.server.federation.MockResolver";
+  public static final Class
+  FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
+  MountTableResolver.class;
   public

[22/54] [abbrv] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

(cherry picked from commit 6821e801724ac38e9737538b2164c9ae88792282)
(cherry picked from commit 2761bbc91a7b0a36c42b1b6569c5ecd4f236281b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c51de708
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c51de708
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c51de708

Branch: refs/heads/branch-2
Commit: c51de708abb21d8f0dd0a9c70786ac08535d0672
Parents: f462461
Author: Inigo 
Authored: Tue Mar 28 14:30:59 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:29 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +
 .../resolver/ActiveNamenodeResolver.java| 117 +++
 .../resolver/FederationNamenodeContext.java |  87 +++
 .../FederationNamenodeServiceState.java |  46 ++
 .../resolver/FederationNamespaceInfo.java   |  99 +++
 .../resolver/FileSubclusterResolver.java|  75 ++
 .../resolver/NamenodePriorityComparator.java|  63 ++
 .../resolver/NamenodeStatusReport.java  | 195 +
 .../federation/resolver/PathLocation.java   | 122 +++
 .../federation/resolver/RemoteLocation.java |  74 ++
 .../federation/resolver/package-info.java   |  41 +
 .../federation/router/FederationUtil.java   | 117 +++
 .../router/RemoteLocationContext.java   |  38 +
 .../hdfs/server/federation/router/Router.java   | 263 +++
 .../federation/router/RouterRpcServer.java  | 102 +++
 .../server/federation/router/package-info.java  |  31 +
 .../federation/store/StateStoreService.java |  77 ++
 .../server/federation/store/package-info.java   |  62 ++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../server/federation/FederationTestUtils.java  | 233 ++
 .../hdfs/server/federation/MockResolver.java| 290 +++
 .../server/federation/RouterConfigBuilder.java  |  40 +
 .../server/federation/RouterDFSCluster.java | 767 +++
 .../server/federation/router/TestRouter.java|  96 +++
 25 files changed, 3075 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c51de708/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 2181e47..b9853d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,6 +179,11 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
+:router
+  set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :debug
   set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
   goto :eof
@@ -219,6 +224,7 @@ goto :eof
   @echo   secondarynamenoderun the DFS secondary namenode
   @echo   namenode run the DFS namenode
   @echo   journalnode  run the DFS journalnode
+  @echo   router   run the DFS router
   @echo   zkfc run the ZK Failover Controller daemon
   @echo   datanode run a DFS datanode
   @echo   dfsadmin run a DFS admin client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c51de708/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e4c02c2..912307f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1001,6 +1001,23 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.use.dfs.network.topology

[13/54] [abbrv] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f476f4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
new file mode 100644
index 000..860320d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -0,0 +1,998 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include "../modules/common/module-configs.h"
+#include "docker-util.h"
+#include "string-utils.h"
+#include "util.h"
+
+static int read_and_verify_command_file(const char *command_file, const char 
*docker_command,
+struct configuration *command_config) {
+  int ret = 0;
+  ret = read_config(command_file, command_config);
+  if (ret != 0) {
+return INVALID_COMMAND_FILE;
+  }
+  char *command = get_configuration_value("docker-command", 
DOCKER_COMMAND_FILE_SECTION, command_config);
+  if (command == NULL || (strcmp(command, docker_command) != 0)) {
+ret = INCORRECT_COMMAND;
+  }
+  free(command);
+  return ret;
+}
+
+static int add_to_buffer(char *buff, const size_t bufflen, const char *string) 
{
+  size_t current_len = strlen(buff);
+  size_t string_len = strlen(string);
+  if (current_len + string_len < bufflen - 1) {
+strncpy(buff + current_len, string, string_len);
+buff[current_len + string_len] = '\0';
+return 0;
+  }
+  return -1;
+}
+
+static int add_param_to_command(const struct configuration *command_config, 
const char *key, const char *param,
+const int with_argument, char *out, const 
size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  int ret = 0;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *value = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+  if (value != NULL) {
+if (with_argument) {
+  quote_and_append_arg(&tmp_buffer, &tmp_buffer_size, param, value);
+  ret = add_to_buffer(out, outlen, tmp_buffer);
+} else if (strcmp(value, "true") == 0) {
+  ret = add_to_buffer(out, outlen, param);
+}
+free(value);
+if (ret != 0) {
+  ret = BUFFER_TOO_SMALL;
+}
+  }
+  free(tmp_buffer);
+  return ret;
+}
+
+static int add_param_to_command_if_allowed(const struct configuration 
*command_config,
+   const struct configuration 
*executor_cfg,
+   const char *key, const char 
*allowed_key, const char *param,
+   const int multiple_values, const 
char prefix,
+   char *out, const size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *tmp_ptr = NULL;
+  char **values = NULL;
+  char **permitted_values = get_configuration_values_delimiter(allowed_key,
+   
CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, executor_cfg,
+   ",");
+  int i = 0, j = 0, permitted = 0, ret = 0;
+  if (multiple_values) {
+values = get_configuration_values_delimiter(key, 
DOCKER_COMMAND_FILE_SECTION, command_config, ",");
+  } else {
+values = (char **) alloc_and_clear_memory(2, sizeof(char *));
+values[0] = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+values[1] = NULL;
+if (values[0] == NULL) {
+  ret = 0;
+  goto free_and_exit;
+}
+  }
+
+  if (values != NULL) {
+if (permitted_values != NULL) {
+  for (i = 0; values[i] != NULL; ++i) {
+memset(tmp

[18/54] [abbrv] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
new file mode 100644
index 000..7f0b36a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -0,0 +1,483 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.driver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
+import org.apache.hadoop.hdfs.server.federation.store.records.Query;
+import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.junit.AfterClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base tests for the driver. The particular implementations will use this to
+ * test their functionality.
+ */
+public class TestStateStoreDriverBase {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStateStoreDriverBase.class);
+
+  private static StateStoreService stateStore;
+  private static Configuration conf;
+
+
+  /**
+   * Get the State Store driver.
+   * @return State Store driver.
+   */
+  protected StateStoreDriver getStateStoreDriver() {
+return stateStore.getDriver();
+  }
+
+  @AfterClass
+  public static void tearDownCluster() {
+if (stateStore != null) {
+  stateStore.stop();
+}
+  }
+
+  /**
+   * Get a new State Store using this configuration.
+   *
+   * @param config Configuration for the State Store.
+   * @throws Exception If we cannot get the State Store.
+   */
+  public static void getStateStore(Configuration config) throws Exception {
+conf = config;
+stateStore = FederationStateStoreTestUtils.getStateStore(conf);
+  }
+
+  private  T generateFakeRecord(Class recordClass)
+  throws IllegalArgumentException, IllegalAccessException, IOException {
+
+// TODO add record
+return null;
+  }
+
+  /**
+   * Validate if a record is the same.
+   *
+   * @param original
+   * @param committed
+   * @param assertEquals Assert if the records are equal or just return.
+   * @return
+   * @throws IllegalArgumentException
+   * @throws IllegalAccessException
+   */
+  private boolean validateRecord(
+  BaseRecord original, BaseRecord committed, boolean assertEquals)
+  throws IllegalArgumentException, IllegalAccessException {
+
+boolean ret = true;
+
+Map> fields = getFields(original);
+for (String key : fields.keySet()) {
+  if (key.equals("dateModified") ||
+  key.equals("dateCreated") ||
+  key.equals("proto")) {
+// Fields are updated/set on commit and fetch and may not match
+// the fields that are initialized in a non-committed object.
+continue;
+  }
+  Object data1 = getField(original, key);
+  Object data2 = getField(committed, key);
+  if (assertEquals) {
+assertEquals("Field " + key + " does not match", data1, data2);
+  } else if (!data1.equals(data2)) {
+ret = false;
+  }
+}
+
+long now = stateStore.getDriver().getTime();
+asser

[49/54] [abbrv] hadoop git commit: HDFS-12273. Federation UI. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12273. Federation UI. Contributed by Inigo Goiri.

(cherry picked from commit adbb2e00c7b85524fd43bd68895d49814c16680a)
(cherry picked from commit 81601dac8ec7650bec14700b174910390a92fe1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1772d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1772d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1772d456

Branch: refs/heads/branch-2
Commit: 1772d4563db69eee057fa01655c77de757b8541a
Parents: 9920a89
Author: Inigo Goiri 
Authored: Thu Oct 5 17:26:43 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:34 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 +
 .../federation/metrics/FederationMBean.java |   7 +
 .../federation/metrics/FederationMetrics.java   |  25 +-
 .../resolver/MembershipNamenodeResolver.java|  23 ++
 .../hdfs/server/federation/router/Router.java   |  36 ++
 .../federation/router/RouterHttpServer.java | 124 +++
 .../federation/router/RouterRpcClient.java  |  45 ++-
 .../federation/router/RouterRpcServer.java  |  15 +-
 .../src/main/resources/hdfs-default.xml |  56 +++
 .../main/webapps/router/federationhealth.html   | 371 +++
 .../src/main/webapps/router/federationhealth.js | 313 
 .../src/main/webapps/router/index.html  |  24 ++
 .../server/federation/RouterConfigBuilder.java  |  13 +
 .../server/federation/RouterDFSCluster.java |  29 +-
 .../federation/metrics/TestMetricsBase.java |   1 +
 .../server/federation/router/TestRouter.java|   9 +-
 17 files changed, 1102 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 8ae3db8..154e4f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -276,6 +276,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3606e7a..3f967da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1141,6 +1141,25 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "admin.enable";
   public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
 
+  // HDFS Router-based federation web
+  public static final String DFS_ROUTER_HTTP_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "http.enable";
+  public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-address";
+  public static final intDFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
+  public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-bind-host";
+  public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-address";
+  public static final intDFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
+  public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-bind-host";
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/

[01/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 20f5687e5 -> 769087288


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
new file mode 100644
index 000..35cf1e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.log4j.Logger;
+import org.junit.Before;
+import org.junit.Test;
+
+import static junit.framework.TestCase.fail;
+
+/**
+ * This class tests the {@code MetricsInvariantChecker} by running it multiple
+ * time and reporting the time it takes to execute, as well as verifying that
+ * the invariant throws in case the invariants are not respected.
+ */
+public class TestMetricsInvariantChecker {
+  public final static Logger LOG =
+  Logger.getLogger(TestMetricsInvariantChecker.class);
+
+  private MetricsSystem metricsSystem;
+  private MetricsInvariantChecker ic;
+  private Configuration conf;
+
+  @Before
+  public void setup() {
+this.metricsSystem = DefaultMetricsSystem.instance();
+JvmMetrics.initSingleton("ResourceManager", null);
+this.ic = new MetricsInvariantChecker();
+this.conf = new Configuration();
+conf.set(MetricsInvariantChecker.INVARIANTS_FILE,
+"src/test/resources/invariants.txt");
+conf.setBoolean(MetricsInvariantChecker.THROW_ON_VIOLATION, true);
+ic.init(conf, null, null);
+  }
+
+  @Test(timeout = 5000)
+  public void testManyRuns() {
+
+QueueMetrics qm =
+QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
+qm.setAvailableResourcesToQueue(Resource.newInstance(1, 1));
+
+int numIterations = 1000;
+long start = System.currentTimeMillis();
+for (int i = 0; i < numIterations; i++) {
+  ic.editSchedule();
+}
+long end = System.currentTimeMillis();
+
+System.out.println("Runtime per iteration (avg of " + numIterations
++ " iterations): " + (end - start) + " tot time");
+
+  }
+
+  @Test
+  public void testViolation() {
+
+// create a "wrong" condition in which the invariants are not respected
+QueueMetrics qm =
+QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
+qm.setAvailableResourcesToQueue(Resource.newInstance(-1, -1));
+
+// test with throwing exception turned on
+try {
+  ic.editSchedule();
+  fail();
+} catch (InvariantViolationException i) {
+  // expected
+}
+
+// test log-only mode
+conf.setBoolean(MetricsInvariantChecker.THROW_ON_VIOLATION, false);
+ic.init(conf, null, null);
+ic.editSchedule();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invarian

[10/54] [abbrv] hadoop git commit: HDFS-12420. Add an option to disallow 'namenode format -force'. Contributed by Ajay Kumar.

2017-10-20 Thread vrushali
HDFS-12420. Add an option to disallow 'namenode format -force'. Contributed by 
Ajay Kumar.

(cherry picked from commit b6942cbe9b8c9469e8c2b64c3268d671f5a43e75)
(cherry picked from commit 5897095d539be086ed37df011f024e37eb37b0cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc3ca4c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc3ca4c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc3ca4c1

Branch: refs/heads/branch-2
Commit: bc3ca4c1061f222188007e3b07f64afc2b91ea35
Parents: 6a3929f
Author: Arpit Agarwal 
Authored: Thu Oct 5 15:26:52 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 ++
 .../hadoop/hdfs/server/namenode/NameNode.java   | 16 +
 .../namenode/NameNodeFormatException.java   | 37 
 .../src/main/resources/hdfs-default.xml | 11 ++
 .../src/site/markdown/HDFSCommands.md   |  2 +-
 .../hdfs/server/namenode/TestClusterId.java | 34 ++
 6 files changed, 101 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc3ca4c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6bec228..e4c02c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -244,6 +244,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = 
"supergroup";
   public static final String  DFS_NAMENODE_ACLS_ENABLED_KEY = 
"dfs.namenode.acls.enabled";
   public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
+  public static final String DFS_REFORMAT_DISABLED = "dfs.reformat.disabled";
+  public static final boolean DFS_REFORMAT_DISABLED_DEFAULT = false;
   public static final String  DFS_NAMENODE_XATTRS_ENABLED_KEY = 
"dfs.namenode.xattrs.enabled";
   public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
   public static final String  DFS_ADMIN = "dfs.cluster.administrators";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc3ca4c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index f4097bb..1826bce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.MetricsLoggerTask;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
@@ -1149,6 +1150,21 @@ public class NameNode extends ReconfigurableBase 
implements
   FSNamesystem fsn = new FSNamesystem(conf, fsImage);
   fsImage.getEditLog().initJournalsForWrite();
 
+  // Abort NameNode format if reformat is disabled and if
+  // meta-dir already exists
+  if (conf.getBoolean(DFSConfigKeys.DFS_REFORMAT_DISABLED,
+  DFSConfigKeys.DFS_REFORMAT_DISABLED_DEFAULT)) {
+force = false;
+isInteractive = false;
+for (StorageDirectory sd : fsImage.storage.dirIterable(null)) {
+  if (sd.hasSomeData()) {
+throw new NameNodeFormatException(
+"NameNode format aborted as reformat is disabled for "
++ "this cluster.");
+  }
+}
+  }
+
   if (!fsImage.confirmFormat(force, isInteractive)) {
 return true; // aborted
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc3ca4c1/hado

[04/54] [abbrv] hadoop git commit: YARN-6608. Backport all SLS improvements from trunk to branch-2. (Carlo Curino via wangda)

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3929f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
index 3b539fa..420a1c9 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
@@ -18,16 +18,17 @@
 
 package org.apache.hadoop.yarn.sls.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
-.FSAppAttempt;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
+import org.apache.hadoop.yarn.sls.SLSRunner;
 
 import com.codahale.metrics.Gauge;
-import org.apache.hadoop.yarn.sls.SLSRunner;
 
 @Private
 @Unstable
@@ -37,114 +38,131 @@ public class FairSchedulerMetrics extends 
SchedulerMetrics {
   private int totalVCores = Integer.MAX_VALUE;
   private boolean maxReset = false;
 
+  @VisibleForTesting
+  public enum Metric {
+DEMAND("demand"),
+USAGE("usage"),
+MINSHARE("minshare"),
+MAXSHARE("maxshare"),
+FAIRSHARE("fairshare");
+
+private String value;
+
+Metric(String value) {
+  this.value = value;
+}
+
+@VisibleForTesting
+public String getValue() {
+  return value;
+}
+  }
+
   public FairSchedulerMetrics() {
 super();
-appTrackedMetrics.add("demand.memory");
-appTrackedMetrics.add("demand.vcores");
-appTrackedMetrics.add("usage.memory");
-appTrackedMetrics.add("usage.vcores");
-appTrackedMetrics.add("minshare.memory");
-appTrackedMetrics.add("minshare.vcores");
-appTrackedMetrics.add("maxshare.memory");
-appTrackedMetrics.add("maxshare.vcores");
-appTrackedMetrics.add("fairshare.memory");
-appTrackedMetrics.add("fairshare.vcores");
-queueTrackedMetrics.add("demand.memory");
-queueTrackedMetrics.add("demand.vcores");
-queueTrackedMetrics.add("usage.memory");
-queueTrackedMetrics.add("usage.vcores");
-queueTrackedMetrics.add("minshare.memory");
-queueTrackedMetrics.add("minshare.vcores");
-queueTrackedMetrics.add("maxshare.memory");
-queueTrackedMetrics.add("maxshare.vcores");
-queueTrackedMetrics.add("fairshare.memory");
-queueTrackedMetrics.add("fairshare.vcores");
+
+for (Metric metric: Metric.values()) {
+  appTrackedMetrics.add(metric.value + ".memory");
+  appTrackedMetrics.add(metric.value + ".vcores");
+  queueTrackedMetrics.add(metric.value + ".memory");
+  queueTrackedMetrics.add(metric.value + ".vcores");
+}
   }
-  
-  @Override
-  public void trackApp(ApplicationAttemptId appAttemptId, String oldAppId) {
-super.trackApp(appAttemptId, oldAppId);
-FairScheduler fair = (FairScheduler) scheduler;
-final FSAppAttempt app = fair.getSchedulerApp(appAttemptId);
-metrics.register("variable.app." + oldAppId + ".demand.memory",
-  new Gauge() {
-@Override
-public Long getValue() {
-  return app.getDemand().getMemorySize();
-}
-  }
-);
-metrics.register("variable.app." + oldAppId + ".demand.vcores",
-  new Gauge() {
-@Override
-public Integer getValue() {
-  return app.getDemand().getVirtualCores();
-}
-  }
-);
-metrics.register("variable.app." + oldAppId + ".usage.memory",
-  new Gauge() {
-@Override
-public Long getValue() {
-  return app.getResourceUsage().getMemorySize();
-}
-  }
-);
-metrics.register("variable.app." + oldAppId + ".usage.vcores",
-  new Gauge() {
-@Override
-public Integer getValue() {
-  return app.getResourceUsage().getVirtualCores();
-}
-  }
-);
-metrics.register("variable.app." + oldAppId + ".minshare.memory",
-  new Gauge() {
-@Override
-public Long getValue() {
-  return app.getMinShare().getMemorySize();
-}
-  }
-);
-metrics.register("variable.app." + oldAppId + ".minshare.vcores",
-  new Gauge() {
-@Override
-public Long getV

[43/54] [abbrv] hadoop git commit: HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri.

(cherry picked from commit b12440d3479f19138bc66ea59baf41eb89061906)
(cherry picked from commit 6c69e23dcdf1cdbddd47bacdf2dace5c9f06e3ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9920a89e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9920a89e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9920a89e

Branch: refs/heads/branch-2
Commit: 9920a89e5f0283088bec46fc998c3e7f35c9e3b5
Parents: 42e0ae0
Author: Inigo Goiri 
Authored: Mon Oct 2 18:45:06 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:33 2017 -0700

--
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9920a89e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1fa1720..650c6ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -1857,8 +1857,8 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override
-  public AddECPolicyResponse[] addErasureCodingPolicies(
-  ErasureCodingPolicy[] arg0) throws IOException {
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
+  ErasureCodingPolicy[] policies) throws IOException {
 checkOperation(OperationCategory.WRITE, false);
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/54] [abbrv] hadoop git commit: HDFS-12502. nntop should support a category based on FilesInGetListingOps.

2017-10-20 Thread vrushali
HDFS-12502. nntop should support a category based on FilesInGetListingOps.

(cherry picked from commit 60bfee270ed3a653c44c0bc92396167b5022df6e)
(cherry picked from commit aecf6c50d351e7aed2008a682677a1edb104585d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86c2adc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86c2adc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86c2adc4

Branch: refs/heads/branch-2
Commit: 86c2adc4528ffd27b744ee7e88109b86aaa3ec02
Parents: e8c6ef0
Author: Zhe Zhang 
Authored: Wed Oct 18 23:51:24 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |  5 
 .../server/namenode/top/metrics/TopMetrics.java | 30 +++-
 .../server/namenode/metrics/TestTopMetrics.java | 11 +--
 3 files changed, 36 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c2adc4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e21da7f..eded007 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3619,6 +3619,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   readUnlock(operationName);
 }
 logAuditEvent(true, operationName, src);
+if (topConf.isEnabled && isAuditEnabled() && isExternalInvocation()
+&& dl != null && Server.getRemoteUser() != null) {
+  topMetrics.reportFilesInGetListing(Server.getRemoteUser().toString(),
+  dl.getPartialListing().length);
+}
 return dl;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c2adc4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index 2719c88..3d8dd19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -70,6 +70,14 @@ public class TopMetrics implements MetricsSource {
   public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
   public static final String TOPMETRICS_METRICS_SOURCE_NAME =
   "NNTopUserOpCounts";
+  /**
+   * In addition to counts of different RPC calls, NNTop also reports top
+   * users listing large directories (measured by the number of files involved
+   * in listing operations from the user). This is important because the CPU
+   * and GC overhead of a listing operation grows linearly with the number of
+   * files involved. This category in NNTop is {@link #FILES_IN_GETLISTING}.
+   */
+  public static final String FILES_IN_GETLISTING = "filesInGetListing";
   private final boolean isMetricsSourceEnabled;
 
   private static void logConf(Configuration conf) {
@@ -123,22 +131,30 @@ public class TopMetrics implements MetricsSource {
   public void report(boolean succeeded, String userName, InetAddress addr,
   String cmd, String src, String dst, FileStatus status) {
 // currently nntop only makes use of the username and the command
-report(userName, cmd);
+report(userName, cmd, 1);
   }
 
-  public void report(String userName, String cmd) {
+  public void reportFilesInGetListing(String userName, int numFiles) {
+report(userName, FILES_IN_GETLISTING, numFiles);
+  }
+
+  public void report(String userName, String cmd, int delta) {
 long currTime = Time.monotonicNow();
-report(currTime, userName, cmd);
+report(currTime, userName, cmd, delta);
   }
 
-  public void report(long currTime, String userName, String cmd) {
+  public void report(long currTime, String userName, String cmd, int delta) {
 LOG.debug("a metric is reported: cmd: {} user: {}", cmd, userName);
 userName = UserGroupInformation.trimLoginMethod(userName);
 for (RollingWindowManager rollingWindowManager : rollingWindowManagers

[40/54] [abbrv] hadoop git commit: HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.

(cherry picked from commit 90ba6843fb3ac5dc7576535e66a75a5e3433247b)
(cherry picked from commit 346c9fce43ebf6a90fc56e0dc7c403f97cc5391f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/368a7d3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/368a7d3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/368a7d3d

Branch: refs/heads/branch-2
Commit: 368a7d3d42b0ad2a525841f1797d41abc57daef0
Parents: 568a32e
Author: Inigo Goiri 
Authored: Wed Aug 16 17:31:37 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:32 2017 -0700

--
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/368a7d3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index eaaab39..c77d255 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1946,6 +1946,7 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 }
 long inodeId = 0;
 return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission,
+EnumSet.noneOf(HdfsFileStatus.Flags.class),
 owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId,
 childrenNum, null, (byte) 0, null);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/54] [abbrv] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

(cherry picked from commit ae27e31fbcf546481db0b0345772db2e9132372e)
(cherry picked from commit b3e6bd22e3c02b3e4f50396538f56a1bcb007638)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89c80185
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89c80185
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89c80185

Branch: refs/heads/branch-2
Commit: 89c8018592cf5516ba30f579ca634fb14c15098c
Parents: c77a04b
Author: Inigo Goiri 
Authored: Tue Aug 8 14:44:43 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 ++
 .../hdfs/protocolPB/RouterAdminProtocolPB.java  |  44 +++
 ...uterAdminProtocolServerSideTranslatorPB.java | 151 
 .../RouterAdminProtocolTranslatorPB.java| 150 
 .../resolver/MembershipNamenodeResolver.java|  34 +-
 .../hdfs/server/federation/router/Router.java   |  52 +++
 .../federation/router/RouterAdminServer.java| 183 ++
 .../server/federation/router/RouterClient.java  |  76 +
 .../hdfs/tools/federation/RouterAdmin.java  | 341 +++
 .../hdfs/tools/federation/package-info.java |  28 ++
 .../src/main/proto/RouterProtocol.proto |  47 +++
 .../src/main/resources/hdfs-default.xml |  46 +++
 .../server/federation/RouterConfigBuilder.java  |  26 ++
 .../server/federation/RouterDFSCluster.java |  43 ++-
 .../server/federation/StateStoreDFSCluster.java | 148 
 .../federation/router/TestRouterAdmin.java  | 261 ++
 18 files changed, 1639 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 4f9b782..453c919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -349,6 +349,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   editlog.proto
   fsimage.proto
   FederationProtocol.proto
+  RouterProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index b9853d6..53bdf70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -184,6 +184,11 @@ goto :eof
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
+:federation
+  set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :debug
   set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
   goto :eof

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f3bc592..b161bc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1085,6 +1085,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_PREFIX =
   FEDERATION_ROUTER_PREFIX + "store.";
 
+  public static final String DFS_ROUTER_STORE_ENABLE =
+ 

[36/54] [abbrv] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77a04b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
new file mode 100644
index 000..7f7c998
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * RemoveMountTableEntryRequest.
+ */
+public class RemoveMountTableEntryRequestPBImpl
+extends RemoveMountTableEntryRequest implements PBRecord {
+
+  private FederationProtocolPBTranslator translator =
+  new FederationProtocolPBTranslator(
+  RemoveMountTableEntryRequestProto.class);
+
+  public RemoveMountTableEntryRequestPBImpl() {
+  }
+
+  public RemoveMountTableEntryRequestPBImpl(
+  RemoveMountTableEntryRequestProto proto) {
+this.setProto(proto);
+  }
+
+  @Override
+  public RemoveMountTableEntryRequestProto getProto() {
+return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getSrcPath() {
+return this.translator.getProtoOrBuilder().getSrcPath();
+  }
+
+  @Override
+  public void setSrcPath(String path) {
+this.translator.getBuilder().setSrcPath(path);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c77a04b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
new file mode 100644
index 000..0c943ac
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language g

[27/54] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6989725/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index ee6f57d..2875750 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time;
 
 /**
  * In-memory cache/mock of a namenode and file resolver. Stores the most
- * recently updated NN information for each nameservice and block pool. Also
+ * recently updated NN information for each nameservice and block pool. It also
  * stores a virtual mount table for resolving global namespace paths to local 
NN
  * paths.
  */
@@ -51,82 +51,93 @@ public class MockResolver
 implements ActiveNamenodeResolver, FileSubclusterResolver {
 
   private Map> resolver =
-  new HashMap>();
-  private Map> locations =
-  new HashMap>();
-  private Set namespaces =
-  new HashSet();
+  new HashMap<>();
+  private Map> locations = new HashMap<>();
+  private Set namespaces = new HashSet<>();
   private String defaultNamespace = null;
 
+
   public MockResolver(Configuration conf, StateStoreService store) {
 this.cleanRegistrations();
   }
 
-  public void addLocation(String mount, String nameservice, String location) {
-RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
-List locationsList = locations.get(mount);
+  public void addLocation(String mount, String nsId, String location) {
+List locationsList = this.locations.get(mount);
 if (locationsList == null) {
-  locationsList = new LinkedList();
-  locations.put(mount, locationsList);
+  locationsList = new LinkedList<>();
+  this.locations.put(mount, locationsList);
 }
+
+final RemoteLocation remoteLocation = new RemoteLocation(nsId, location);
 if (!locationsList.contains(remoteLocation)) {
   locationsList.add(remoteLocation);
 }
 
 if (this.defaultNamespace == null) {
-  this.defaultNamespace = nameservice;
+  this.defaultNamespace = nsId;
 }
   }
 
   public synchronized void cleanRegistrations() {
-this.resolver =
-new HashMap>();
-this.namespaces = new HashSet();
+this.resolver = new HashMap<>();
+this.namespaces = new HashSet<>();
   }
 
   @Override
   public void updateActiveNamenode(
-  String ns, InetSocketAddress successfulAddress) {
+  String nsId, InetSocketAddress successfulAddress) {
 
 String address = successfulAddress.getHostName() + ":" +
 successfulAddress.getPort();
-String key = ns;
+String key = nsId;
 if (key != null) {
   // Update the active entry
   @SuppressWarnings("unchecked")
-  List iterator =
-  (List) resolver.get(key);
-  for (FederationNamenodeContext namenode : iterator) {
+  List namenodes =
+  (List) this.resolver.get(key);
+  for (FederationNamenodeContext namenode : namenodes) {
 if (namenode.getRpcAddress().equals(address)) {
   MockNamenodeContext nn = (MockNamenodeContext) namenode;
   nn.setState(FederationNamenodeServiceState.ACTIVE);
   break;
 }
   }
-  Collections.sort(iterator, new NamenodePriorityComparator());
+  // This operation modifies the list so we need to be careful
+  synchronized(namenodes) {
+Collections.sort(namenodes, new NamenodePriorityComparator());
+  }
 }
   }
 
   @Override
   public List
   getNamenodesForNameserviceId(String nameserviceId) {
-return resolver.get(nameserviceId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(nameserviceId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   @Override
   public List getNamenodesForBlockPoolId(
   String blockPoolId) {
-return resolver.get(blockPoolId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(blockPoolId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   private static class MockNamenodeContext
   implements FederationNamenodeContext {
+
+private String namenodeId;
+private String nameserviceId;
+
 private String webAddress;
 private String rpcAddress;
 private String serviceAddress;
 private String lifelineAddress;
-private String namenodeId;
-private String nameserviceId;
+
 private FederationNamenodeServiceState state;
 private long da

[15/54] [abbrv] hadoop git commit: HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to process. Contributed by Wei-Chiu Chuang.

2017-10-20 Thread vrushali
HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to 
process. Contributed by Wei-Chiu Chuang.

(cherry picked from commit 4ab0c8f96a41c573cc1f1e71c18871d243f952b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ed7a2ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ed7a2ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ed7a2ca

Branch: refs/heads/branch-2
Commit: 3ed7a2ca422d3007e5bfa67fd5790c2f06c23959
Parents: a47e8e6
Author: Wei-Chiu Chuang 
Authored: Thu Oct 19 06:17:59 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:28 2017 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ed7a2ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4f0ec43..a49cb24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3443,11 +3443,15 @@ public class BlockManager implements BlockStatsMXBean {
   throw new IOException(
   "Got incremental block report from unregistered or dead node");
 }
+
+boolean successful = false;
 try {
   processIncrementalBlockReport(node, srdb);
-} catch (Exception ex) {
-  node.setForceRegistration(true);
-  throw ex;
+  successful = true;
+} finally {
+  if (!successful) {
+node.setForceRegistration(true);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/54] [abbrv] hadoop git commit: HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by 
Inigo Goiri.

(cherry picked from commit 3302e792d469b7e8f3bfa10151e4e1c546589734)
(cherry picked from commit 1f06b81ecb14044964176dd16fafaa0ee96bfe3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be437869
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be437869
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be437869

Branch: refs/heads/branch-2
Commit: be43786942559e7455b279bc2637db17fa65f3eb
Parents: 673f685
Author: Inigo Goiri 
Authored: Wed Sep 13 09:15:13 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:33 2017 -0700

--
 .../hdfs/server/federation/router/RouterRpcServer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be437869/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 6aee1ee..1fa1720 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -76,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1879,19 +1879,19 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override
-  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+  public ECBlockGroupStats getECBlockGroupStats() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }
 
   @Override
-  public HashMap getErasureCodingCodecs() throws IOException {
+  public Map getErasureCodingCodecs() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }
 
   @Override
-  public BlocksStats getBlocksStats() throws IOException {
+  public ReplicatedBlockStats getReplicatedBlockStats() throws IOException {
 checkOperation(OperationCategory.READ, false);
 return null;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/54] [abbrv] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by 
Inigo Goiri.

(cherry picked from commit fabe02c8fafa807198054da0c02b2ebaafda76aa)
(cherry picked from commit cc58e7a983d8f1351089462f531993f7b4f0a9c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c778f9dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c778f9dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c778f9dd

Branch: refs/heads/branch-2
Commit: c778f9dd3739829333679f29bdf0147823af4aea
Parents: b71a753
Author: Inigo Goiri 
Authored: Thu Sep 7 13:53:08 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:32 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  |  4 
 .../server/federation/router/RouterRpcServer.java| 15 +++
 2 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c778f9dd/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 64ad6fb..8ae3db8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -222,10 +222,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
 
   org.apache.curator
-  curator-framework
-
-
-  org.apache.curator
   curator-test
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c778f9dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index c77d255..f9b4a5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
@@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
+  public void reencryptEncryptionZone(String zone, ReencryptAction action)
+  throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override // ClientProtocol
+  public BatchedEntries listReencryptionStatus(
+  long prevId) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override // ClientProtocol
   public void setXAttr(String src, XAttr xAttr, EnumSet flag)
   throws IOException {
 checkOperation(OperationCategory.WRITE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/54] [abbrv] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace 
and Inigo Goiri.

(cherry picked from commit c6e0bd640cdaf83a660fa050809cad6f1d4c6f4d)
(cherry picked from commit 4bf877b03f0e01c4bcedc689c66689701e62b560)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93687da4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93687da4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93687da4

Branch: refs/heads/branch-2
Commit: 93687da438f879c4c2c0016216056fb570012ee2
Parents: 04e3f38
Author: Inigo Goiri 
Authored: Tue May 2 15:49:53 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:29 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../federation/router/PeriodicService.java  | 198 
 .../StateStoreConnectionMonitorService.java |  67 +++
 .../federation/store/StateStoreService.java | 152 +-
 .../federation/store/StateStoreUtils.java   |  51 +-
 .../store/driver/StateStoreDriver.java  |  31 +-
 .../driver/StateStoreRecordOperations.java  |  17 +-
 .../store/driver/impl/StateStoreBaseImpl.java   |  31 +-
 .../driver/impl/StateStoreFileBaseImpl.java | 429 
 .../store/driver/impl/StateStoreFileImpl.java   | 161 +++
 .../driver/impl/StateStoreFileSystemImpl.java   | 178 +++
 .../driver/impl/StateStoreSerializableImpl.java |  77 +++
 .../federation/store/records/BaseRecord.java|  20 +-
 .../server/federation/store/records/Query.java  |  66 +++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../store/FederationStateStoreTestUtils.java| 232 +
 .../store/driver/TestStateStoreDriverBase.java  | 483 +++
 .../store/driver/TestStateStoreFile.java|  64 +++
 .../store/driver/TestStateStoreFileSystem.java  |  88 
 19 files changed, 2329 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b645347..1b66ead 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
@@ -1029,6 +1033,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
   StateStoreSerializerPBImpl.class;
 
+  public static final String FEDERATION_STORE_DRIVER_CLASS =
+  FEDERATION_STORE_PREFIX + "driver.class";
+  public static final Class
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+
+  public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
+  FEDERATION_STORE_PREFIX + "connection.test";
+  public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
new file mode 100644
index 000..5e1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService

[32/54] [abbrv] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
new file mode 100644
index 000..170247f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * The administrator interface of the {@link Router} implemented by
+ * {@link RouterAdminServer}.
+ */
+public class TestRouterAdmin {
+
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  public static final String RPC_BEAN =
+  "Hadoop:service=Router,name=FederationRPC";
+  private static List mockMountTable;
+  private static StateStoreService stateStore;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+cluster = new StateStoreDFSCluster(false, 1);
+// Build and start a router with State Store + admin + RPC
+Configuration conf = new RouterConfigBuilder()
+.stateStore()
+.admin()
+.rpc()
+.build();
+cluster.addRouterOverrides(conf);
+cluster.startRouters();
+routerContext = cluster.getRandomRouter();
+mockMountTable = cluster.generateMockMountTable();
+Router router = routerContext.getRouter();
+stateStore = router.getStateStore();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+cluster.stopRouter(routerContext);
+  }
+
+  @Before
+  public void testSetup() throws Exception {
+assertTrue(
+synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+  }
+
+  @Test
+  public void testAddMountTable() throws IOException {
+MountTable newEntry = MountTable.newInstance(
+"/testpath", Collections.singletonMap("ns0", "/testdir"),
+Time.now(), Time.now());
+
+RouterClient client = routerContext.getAdminClient();
+MountTableManager mountTable = client.getMountTableManager();
+
+// Existing mount table size
+List records = getMountTableEntrie

[07/54] [abbrv] hadoop git commit: HADOOP-14880. [KMS] Document&test missing KMS client side configs. Contributed by Gabor Bota.

2017-10-20 Thread vrushali
HADOOP-14880. [KMS] Document&test missing KMS client side configs. Contributed 
by Gabor Bota.

(cherry picked from commit 97c70c7ac6881f87eee1575bcbdd28b31ecac231)

Conflicts:

hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a47e8e66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a47e8e66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a47e8e66

Branch: refs/heads/branch-2
Commit: a47e8e660f56a2b431d40aea5787fef092cc2c72
Parents: 86c2adc
Author: Wei-Chiu Chuang 
Authored: Thu Oct 19 06:02:13 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 8 +++-
 .../org/apache/hadoop/fs/CommonConfigurationKeysPublic.java | 9 +
 .../hadoop-common/src/main/resources/core-default.xml   | 8 
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java| 2 +-
 4 files changed, 21 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 77e09e3..42c97d3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -115,10 +115,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
 
-  /* It's possible to specify a timeout, in seconds, in the config file */
-  public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
-  public static final int DEFAULT_TIMEOUT = 60;
-
   /* Number of times to retry authentication in the event of auth failure
* (normally happens due to stale authToken) 
*/
@@ -436,7 +432,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 throw new IOException(ex);
   }
 }
-int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
+int timeout = conf.getInt(
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_SECONDS,
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_DEFAULT);
 authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
 configurator = new TimeoutConnConfigurator(timeout, sslFactory);
 encKeyVersionQueue =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index d27f61d..2edb716 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -748,6 +748,15 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String KMS_CLIENT_TIMEOUT_SECONDS =
+  "hadoop.security.kms.client.timeout";
+  public static final int KMS_CLIENT_TIMEOUT_DEFAULT = 60;
+
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   /** Default value is the number of providers specified. */
   public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
   "hadoop.security.kms.client.failover.max.retries";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 79faddc..156b25b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2496,6 +2496,14 @@
 key will be dropped. Default = 12hrs
   
 
+
+  hadoop.security.kms.client.timeout
+ 

[46/54] [abbrv] hadoop git commit: HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12381. [Documentation] Adding configuration keys for the Router. 
Contributed by Inigo Goiri.

(cherry picked from commit c2d6aa79055ef72406fa598e1c743b0c994b5da8)
(cherry picked from commit ad41c8155940f4da0e51439c97c6cc9c808c28df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42e0ae0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42e0ae0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42e0ae0b

Branch: refs/heads/branch-2
Commit: 42e0ae0b80ff191a230dd8e88aa89c1c5de90f5a
Parents: 07b2da9
Author: Inigo Goiri 
Authored: Fri Sep 22 13:06:10 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:33 2017 -0700

--
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/HDFSRouterFederation.md   | 159 +--
 2 files changed, 156 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e0ae0b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 550e5df..50ce6f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4366,7 +4366,8 @@
 dfs.federation.router.rpc.enable
 true
 
-  If the RPC service to handle client requests in the router is enabled.
+  If true, the RPC service to handle client requests in the router is
+  enabled.
 
   
 
@@ -4470,7 +4471,7 @@
 dfs.federation.router.admin.enable
 true
 
-  If the RPC admin service to handle client requests in the router is
+  If true, the RPC admin service to handle client requests in the router is
   enabled.
 
   
@@ -4524,7 +4525,7 @@
 dfs.federation.router.store.enable
 true
 
-  If the Router connects to the State Store.
+  If true, the Router connects to the State Store.
 
   
 
@@ -4572,7 +4573,7 @@
 dfs.federation.router.heartbeat.enable
 true
 
-  Enables the Router to heartbeat into the State Store.
+  If true, the Router heartbeats into the State Store.
 
   
 
@@ -4596,7 +4597,7 @@
 dfs.federation.router.monitor.localnamenode.enable
 true
 
-  If the Router should monitor the namenode in the local machine.
+  If true, the Router should monitor the namenode in the local machine.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e0ae0b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index f094238..1cea7f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -21,7 +21,7 @@ Introduction
 
 
 NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
-The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](./Federation.html) and provide a federated view 
[ViewFs](./ViewFs.html).
 The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
 
 
@@ -35,7 +35,7 @@ This layer must be scalable, highly available, and fault 
tolerant.
 
 This federation layer comprises multiple components.
 The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
-The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](./ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
 This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
 
 ![Router-based Federation Sequence Diagram | 
width=800](./images

[38/54] [abbrv] hadoop git commit: HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by 
Jason Kace and Inigo Goiri.

(cherry picked from commit 23c4ddee11ab1300325a6361124ee8ad6f68d7a4)
(cherry picked from commit 7cb6bdf09ed361e067ebf234230babd1391a7d4b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b71a7530
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b71a7530
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b71a7530

Branch: refs/heads/branch-2
Commit: b71a75301448dade2ef051244d3972fc59389511
Parents: 368a7d3
Author: Inigo Goiri 
Authored: Mon Aug 21 11:40:41 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:32 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   9 +
 .../driver/impl/StateStoreSerializableImpl.java |  19 ++
 .../driver/impl/StateStoreZooKeeperImpl.java| 298 +++
 .../store/driver/TestStateStoreDriverBase.java  |   2 +-
 .../store/driver/TestStateStoreZK.java  | 105 +++
 5 files changed, 432 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71a7530/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 453c919..64ad6fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -220,6 +220,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   com.fasterxml.jackson.core
   jackson-databind
 
+
+  org.apache.curator
+  curator-framework
+
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71a7530/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
index e9b3fdf..e2038fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java
@@ -30,6 +30,11 @@ import 
org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
  */
 public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl {
 
+  /** Mark for slashes in path names. */
+  protected static final String SLASH_MARK = "0SLASH0";
+  /** Mark for colon in path names. */
+  protected static final String COLON_MARK = "_";
+
   /** Default serializer for this driver. */
   private StateStoreSerializer serializer;
 
@@ -74,4 +79,18 @@ public abstract class StateStoreSerializableImpl extends 
StateStoreBaseImpl {
   String data, Class clazz, boolean includeDates) throws IOException {
 return serializer.deserialize(data, clazz);
   }
+
+  /**
+   * Get the primary key for a record. If we don't want to store in folders, we
+   * need to remove / from the name.
+   *
+   * @param record Record to get the primary key for.
+   * @return Primary key for the record.
+   */
+  protected static String getPrimaryKey(BaseRecord record) {
+String primaryKey = record.getPrimaryKey();
+primaryKey = primaryKey.replaceAll("/", SLASH_MARK);
+primaryKey = primaryKey.replaceAll(":", COLON_MARK);
+return primaryKey;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71a7530/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
new file mode 100644
index 000..ddcd537
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional in

[2/3] hadoop git commit: YARN-7261. Add debug message for better download latency monitoring. (Yufei Gu)

2017-10-20 Thread vrushali
YARN-7261. Add debug message for better download latency monitoring. (Yufei Gu)

(cherry picked from commit 0799fde35e7f3b9e8a85284ac0b30f6bdcbffad1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/378fa3ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/378fa3ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/378fa3ac

Branch: refs/heads/YARN-3368_branch2
Commit: 378fa3ac7db79675db2f71c66ec7601eac5e204f
Parents: 4c4f28c
Author: Yufei Gu 
Authored: Fri Oct 20 09:59:07 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:34 2017 -0700

--
 .../org/apache/hadoop/yarn/util/FSDownload.java| 17 +++--
 .../localizer/ResourceLocalizationService.java | 11 ++-
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/378fa3ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index f34c16c..6e59574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -350,6 +350,11 @@ public class FSDownload implements Callable {
 } catch (URISyntaxException e) {
   throw new IOException("Invalid resource", e);
 }
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Starting to download " + sCopy);
+}
+
 createDir(destDirPath, cachePerms);
 final Path dst_work = new Path(destDirPath + "_tmp");
 createDir(dst_work, cachePerms);
@@ -364,6 +369,11 @@ public class FSDownload implements Callable {
   unpack(new File(dTmp.toUri()), new File(dFinal.toUri()));
   changePermissions(dFinal.getFileSystem(conf), dFinal);
   files.rename(dst_work, destDirPath, Rename.OVERWRITE);
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("File has been downloaded to " +
+new Path(destDirPath, sCopy.getName()));
+  }
 } catch (Exception e) {
   try {
 files.delete(destDirPath, true);
@@ -409,8 +419,11 @@ public class FSDownload implements Callable {
   // APPLICATION:
   perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
 }
-LOG.debug("Changing permissions for path " + path
-+ " to perm " + perm);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Changing permissions for path " + path + " to perm " + perm);
+}
+
 final FsPermission fPerm = perm;
 if (null == userUgi) {
   files.setPermission(path, perm);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/378fa3ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index c37f2e3..0e7861f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -859,7 +859,7 @@ public class ResourceLocalizationService extends 
CompositeService
   // TODO handle failures, cancellation, requests by other containers
   LocalizedResource rsrc = request.getResource();
   LocalResourceRequest key = rsrc.getRequest();
-  LOG.info("Downloading public rsrc:" + key);
+  LOG.info("Downloading public resource: " + key);
   /*
* Here multiple containers may request the same resource. So we need
* to start downloading only when
@@ -918,8 +918,17 @@ public class ResourceLocalizationService extends 
CompositeService
 + &

[1/3] hadoop git commit: YARN-4090. Make Collections.sort() more efficient by caching resource usage. (Contributed by Yufei Gu, Shilong Zhang and Xianyin Xin)

2017-10-20 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368_branch2 c9f6a98b8 -> 93d71d97d


YARN-4090. Make Collections.sort() more efficient by caching resource usage. 
(Contributed by Yufei Gu, Shilong Zhang and Xianyin Xin)

(cherry picked from commit 1f4cdf10681b6903207a63fb5c306c9665ed9464)
(cherry picked from commit 96106b8f5fe50e2d5c0c4df5dbddea4f89f278d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c4f28c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c4f28c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c4f28c9

Branch: refs/heads/YARN-3368_branch2
Commit: 4c4f28c9e7f76762346d990b043c6b77d3ac627a
Parents: c9f6a98
Author: Yufei Gu 
Authored: Fri Oct 20 01:32:20 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:33 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 19 ++
 .../scheduler/fair/FSLeafQueue.java | 19 +-
 .../scheduler/fair/FSParentQueue.java   | 14 ---
 .../resourcemanager/scheduler/fair/FSQueue.java | 34 +
 .../scheduler/fair/TestFairScheduler.java   | 39 +++-
 5 files changed, 93 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4f28c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 06ba4e3..006acea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -174,6 +175,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   rmContainer.getNodeLabelExpression(),
   getUser(), 1, containerResource);
   this.attemptResourceUsage.decUsed(containerResource);
+  getQueue().decUsedResource(containerResource);
 
   // Clear resource utilization metrics cache.
   lastMemoryAggregateAllocationUpdateTime = -1;
@@ -468,6 +470,7 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   List resourceRequestList = appSchedulingInfo.allocate(
   type, node, schedulerKey, container);
   this.attemptResourceUsage.incUsed(container.getResource());
+  getQueue().incUsedResource(container.getResource());
 
   // Update resource requests related to "request" and store in RMContainer
   ((RMContainerImpl) rmContainer).setResourceRequests(resourceRequestList);
@@ -651,6 +654,22 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 schedulerKey.getAllocationRequestId());
   }
 
+  @Override
+  public synchronized void recoverContainer(SchedulerNode node,
+  RMContainer rmContainer) {
+try {
+  writeLock.lock();
+
+  super.recoverContainer(node, rmContainer);
+
+  if (!rmContainer.getState().equals(RMContainerState.COMPLETED)) {
+getQueue().incUsedResource(rmContainer.getContainer().getResource());
+  }
+} finally {
+  writeLock.unlock();
+}
+  }
+
   /**
* Reserve a spot for {@code container} on this {@code node}. If
* the container is {@code alreadyReserved} on the node, simply

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4f28c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSL

[3/3] hadoop git commit: YARN-7355. TestDistributedShell should be scheduler agnostic. (Contributed by Haibo)

2017-10-20 Thread vrushali
YARN-7355. TestDistributedShell should be scheduler agnostic. (Contributed by 
Haibo)

(cherry picked from commit 6b7c87c94592606966a4229313b3d0da48f16158)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d71d97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d71d97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d71d97

Branch: refs/heads/YARN-3368_branch2
Commit: 93d71d97d4eb29d6d95922efca3e10ddf89f8493
Parents: 378fa3a
Author: Yufei Gu 
Authored: Fri Oct 20 11:15:20 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:49:34 2017 -0700

--
 .../yarn/applications/distributedshell/TestDistributedShell.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d71d97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 47485ae..af7d21e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -149,7 +149,6 @@ public class TestDistributedShell {
 conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
 
 conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8");
-conf.set(YarnConfiguration.RM_SCHEDULER, 
CapacityScheduler.class.getName());
 conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
 conf.set("mapreduce.jobhistory.address",
 "0.0.0.0:" + ServerSocketUtil.getPort(10021, 10));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] hadoop git commit: HADOOP-14880. [KMS] Document&test missing KMS client side configs. Contributed by Gabor Bota.

2017-10-20 Thread vrushali
HADOOP-14880. [KMS] Document&test missing KMS client side configs. Contributed 
by Gabor Bota.

(cherry picked from commit 97c70c7ac6881f87eee1575bcbdd28b31ecac231)

Conflicts:

hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a47e8e66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a47e8e66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a47e8e66

Branch: refs/heads/YARN-3368_branch2
Commit: a47e8e660f56a2b431d40aea5787fef092cc2c72
Parents: 86c2adc
Author: Wei-Chiu Chuang 
Authored: Thu Oct 19 06:02:13 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 8 +++-
 .../org/apache/hadoop/fs/CommonConfigurationKeysPublic.java | 9 +
 .../hadoop-common/src/main/resources/core-default.xml   | 8 
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java| 2 +-
 4 files changed, 21 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 77e09e3..42c97d3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -115,10 +115,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
 
-  /* It's possible to specify a timeout, in seconds, in the config file */
-  public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
-  public static final int DEFAULT_TIMEOUT = 60;
-
   /* Number of times to retry authentication in the event of auth failure
* (normally happens due to stale authToken) 
*/
@@ -436,7 +432,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 throw new IOException(ex);
   }
 }
-int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
+int timeout = conf.getInt(
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_SECONDS,
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_DEFAULT);
 authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
 configurator = new TimeoutConnConfigurator(timeout, sslFactory);
 encKeyVersionQueue =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index d27f61d..2edb716 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -748,6 +748,15 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String KMS_CLIENT_TIMEOUT_SECONDS =
+  "hadoop.security.kms.client.timeout";
+  public static final int KMS_CLIENT_TIMEOUT_DEFAULT = 60;
+
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   /** Default value is the number of providers specified. */
   public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
   "hadoop.security.kms.client.failover.max.retries";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47e8e66/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 79faddc..156b25b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2496,6 +2496,14 @@
 key will be dropped. Default = 12hrs
   
 
+
+  hadoop.security.kms.client

[21/50] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c51de708/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
new file mode 100644
index 000..ee6f57d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator;
+import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.Time;
+
+/**
+ * In-memory cache/mock of a namenode and file resolver. Stores the most
+ * recently updated NN information for each nameservice and block pool. Also
+ * stores a virtual mount table for resolving global namespace paths to local 
NN
+ * paths.
+ */
+public class MockResolver
+implements ActiveNamenodeResolver, FileSubclusterResolver {
+
+  private Map> resolver =
+  new HashMap>();
+  private Map> locations =
+  new HashMap>();
+  private Set namespaces =
+  new HashSet();
+  private String defaultNamespace = null;
+
+  public MockResolver(Configuration conf, StateStoreService store) {
+this.cleanRegistrations();
+  }
+
+  public void addLocation(String mount, String nameservice, String location) {
+RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
+List locationsList = locations.get(mount);
+if (locationsList == null) {
+  locationsList = new LinkedList();
+  locations.put(mount, locationsList);
+}
+if (!locationsList.contains(remoteLocation)) {
+  locationsList.add(remoteLocation);
+}
+
+if (this.defaultNamespace == null) {
+  this.defaultNamespace = nameservice;
+}
+  }
+
+  public synchronized void cleanRegistrations() {
+this.resolver =
+new HashMap>();
+this.namespaces = new HashSet();
+  }
+
+  @Override
+  public void updateActiveNamenode(
+  String ns, InetSocketAddress successfulAddress) {
+
+String address = successfulAddress.getHostName() + ":" +
+successfulAddress.getPort();
+String key = ns;
+if (key != null) {
+  // Update the active entry
+  @SuppressWarnings("unchecked")
+  List iterator =
+  (List) resolver.get(key);
+  for (FederationNamenodeContext namenode : iterator) {
+if (namenode.getRpcAddress().equals(address)) {
+  MockNamenodeContext nn = (MockNamenodeContext) namenode;
+  nn.setState(FederationNamenodeServiceState.ACTIVE);
+  break;
+}
+  }
+  Collections.sort(iterator, new NamenodePriorityComparator());
+}
+  }
+
+  @Override
+  public List
+  getNamenodesForNameserviceId(String nameserviceId) {
+return resolver.get(nameserviceId);
+  }
+
+  @Override
+  public List getNamenodesForBlockPoolId(
+  

[35/50] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

(cherry picked from commit ae27e31fbcf546481db0b0345772db2e9132372e)
(cherry picked from commit b3e6bd22e3c02b3e4f50396538f56a1bcb007638)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89c80185
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89c80185
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89c80185

Branch: refs/heads/YARN-3368_branch2
Commit: 89c8018592cf5516ba30f579ca634fb14c15098c
Parents: c77a04b
Author: Inigo Goiri 
Authored: Tue Aug 8 14:44:43 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 ++
 .../hdfs/protocolPB/RouterAdminProtocolPB.java  |  44 +++
 ...uterAdminProtocolServerSideTranslatorPB.java | 151 
 .../RouterAdminProtocolTranslatorPB.java| 150 
 .../resolver/MembershipNamenodeResolver.java|  34 +-
 .../hdfs/server/federation/router/Router.java   |  52 +++
 .../federation/router/RouterAdminServer.java| 183 ++
 .../server/federation/router/RouterClient.java  |  76 +
 .../hdfs/tools/federation/RouterAdmin.java  | 341 +++
 .../hdfs/tools/federation/package-info.java |  28 ++
 .../src/main/proto/RouterProtocol.proto |  47 +++
 .../src/main/resources/hdfs-default.xml |  46 +++
 .../server/federation/RouterConfigBuilder.java  |  26 ++
 .../server/federation/RouterDFSCluster.java |  43 ++-
 .../server/federation/StateStoreDFSCluster.java | 148 
 .../federation/router/TestRouterAdmin.java  | 261 ++
 18 files changed, 1639 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 4f9b782..453c919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -349,6 +349,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   editlog.proto
   fsimage.proto
   FederationProtocol.proto
+  RouterProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index b9853d6..53bdf70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -184,6 +184,11 @@ goto :eof
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
+:federation
+  set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :debug
   set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
   goto :eof

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f3bc592..b161bc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1085,6 +1085,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_PREFIX =
   FEDERATION_ROUTER_PREFIX + "store.";
 
+  public static final String DFS_ROUTER_STORE_E

[42/50] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by 
Inigo Goiri.

(cherry picked from commit fabe02c8fafa807198054da0c02b2ebaafda76aa)
(cherry picked from commit cc58e7a983d8f1351089462f531993f7b4f0a9c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c778f9dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c778f9dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c778f9dd

Branch: refs/heads/YARN-3368_branch2
Commit: c778f9dd3739829333679f29bdf0147823af4aea
Parents: b71a753
Author: Inigo Goiri 
Authored: Thu Sep 7 13:53:08 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:32 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  |  4 
 .../server/federation/router/RouterRpcServer.java| 15 +++
 2 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c778f9dd/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 64ad6fb..8ae3db8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -222,10 +222,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
 
   org.apache.curator
-  curator-framework
-
-
-  org.apache.curator
   curator-test
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c778f9dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index c77d255..f9b4a5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
@@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
+  public void reencryptEncryptionZone(String zone, ReencryptAction action)
+  throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override // ClientProtocol
+  public BatchedEntries listReencryptionStatus(
+  long prevId) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override // ClientProtocol
   public void setXAttr(String src, XAttr xAttr, EnumSet flag)
   throws IOException {
 checkOperation(OperationCategory.WRITE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] hadoop git commit: HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by Junping Du.

2017-10-20 Thread vrushali
HADOOP-14958. Fix source-level compatibility after HADOOP-11252. Contributed by 
Junping Du.

(cherry picked from commit b016f08f67830ed3ca741bc6a10c3f5164781be5)
(cherry picked from commit 9433f9eb09af0ca61f01a2eb42ff3ffe31a94d5f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8c6ef01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8c6ef01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8c6ef01

Branch: refs/heads/YARN-3368_branch2
Commit: e8c6ef01d382dd49d7324b87fb020516ac432697
Parents: bc3ca4c
Author: Junping Du 
Authored: Wed Oct 18 15:06:30 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:27 2017 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8c6ef01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 05928f3..533b6ca 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -214,7 +214,8 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @param pingInterval the ping interval
*/
-  static final void setPingInterval(Configuration conf, int pingInterval) {
+  public static final void setPingInterval(Configuration conf,
+  int pingInterval) {
 conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
   }
 
@@ -225,7 +226,7 @@ public class Client implements AutoCloseable {
* @param conf Configuration
* @return the ping interval
*/
-  static final int getPingInterval(Configuration conf) {
+  public static final int getPingInterval(Configuration conf) {
 return conf.getInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
 CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo 
Goiri.

(cherry picked from commit 8a9cdebebf26841a0f1e99fb08135f4597f2eba2)
(cherry picked from commit ca4f209b49e3aad6a80306f7342c9b6b560a79a7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6989725
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6989725
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6989725

Branch: refs/heads/YARN-3368_branch2
Commit: e69897253daa4749153143935459543e8ecadb6e
Parents: 93687da
Author: Inigo Goiri 
Authored: Thu May 11 09:57:03 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:30 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   38 +
 .../resolver/FederationNamespaceInfo.java   |   46 +-
 .../federation/resolver/RemoteLocation.java |   46 +-
 .../federation/router/ConnectionContext.java|  104 +
 .../federation/router/ConnectionManager.java|  408 
 .../federation/router/ConnectionPool.java   |  314 +++
 .../federation/router/ConnectionPoolId.java |  117 ++
 .../router/RemoteLocationContext.java   |   38 +-
 .../server/federation/router/RemoteMethod.java  |  164 ++
 .../server/federation/router/RemoteParam.java   |   71 +
 .../hdfs/server/federation/router/Router.java   |   58 +-
 .../federation/router/RouterRpcClient.java  |  856 
 .../federation/router/RouterRpcServer.java  | 1867 +-
 .../src/main/resources/hdfs-default.xml |   95 +
 .../server/federation/FederationTestUtils.java  |   80 +-
 .../hdfs/server/federation/MockResolver.java|   90 +-
 .../server/federation/RouterConfigBuilder.java  |   20 +-
 .../server/federation/RouterDFSCluster.java |  535 +++--
 .../server/federation/router/TestRouter.java|   31 +-
 .../server/federation/router/TestRouterRpc.java |  869 
 .../router/TestRouterRpcMultiDestination.java   |  216 ++
 21 files changed, 5675 insertions(+), 388 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6989725/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1b66ead..5d6c467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1012,6 +1012,44 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router-based federation
   public static final String FEDERATION_ROUTER_PREFIX =
   "dfs.federation.router.";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
+  FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.count";
+  public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
+  public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.queue.size";
+  public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_READER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.count";
+  public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
+  public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.queue.size";
+  public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
+  public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ;
+  public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-address";
+  public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
+  public static final String DFS_ROUTER_RPC_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "rpc.enable";
+  public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
+
+  // HDFS Router NN client
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.pool-size";
+  public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
+  64;
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
+  FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
+  public static final lon

[19/50] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace 
and Inigo Goiri.

(cherry picked from commit c6e0bd640cdaf83a660fa050809cad6f1d4c6f4d)
(cherry picked from commit 4bf877b03f0e01c4bcedc689c66689701e62b560)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93687da4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93687da4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93687da4

Branch: refs/heads/YARN-3368_branch2
Commit: 93687da438f879c4c2c0016216056fb570012ee2
Parents: 04e3f38
Author: Inigo Goiri 
Authored: Tue May 2 15:49:53 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:29 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../federation/router/PeriodicService.java  | 198 
 .../StateStoreConnectionMonitorService.java |  67 +++
 .../federation/store/StateStoreService.java | 152 +-
 .../federation/store/StateStoreUtils.java   |  51 +-
 .../store/driver/StateStoreDriver.java  |  31 +-
 .../driver/StateStoreRecordOperations.java  |  17 +-
 .../store/driver/impl/StateStoreBaseImpl.java   |  31 +-
 .../driver/impl/StateStoreFileBaseImpl.java | 429 
 .../store/driver/impl/StateStoreFileImpl.java   | 161 +++
 .../driver/impl/StateStoreFileSystemImpl.java   | 178 +++
 .../driver/impl/StateStoreSerializableImpl.java |  77 +++
 .../federation/store/records/BaseRecord.java|  20 +-
 .../server/federation/store/records/Query.java  |  66 +++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../store/FederationStateStoreTestUtils.java| 232 +
 .../store/driver/TestStateStoreDriverBase.java  | 483 +++
 .../store/driver/TestStateStoreFile.java|  64 +++
 .../store/driver/TestStateStoreFileSystem.java  |  88 
 19 files changed, 2329 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b645347..1b66ead 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
@@ -1029,6 +1033,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
   StateStoreSerializerPBImpl.class;
 
+  public static final String FEDERATION_STORE_DRIVER_CLASS =
+  FEDERATION_STORE_PREFIX + "driver.class";
+  public static final Class
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+
+  public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
+  FEDERATION_STORE_PREFIX + "connection.test";
+  public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
new file mode 100644
index 000..5e1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/

[34/50] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c80185/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
new file mode 100644
index 000..170247f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * The administrator interface of the {@link Router} implemented by
+ * {@link RouterAdminServer}.
+ */
+public class TestRouterAdmin {
+
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  public static final String RPC_BEAN =
+  "Hadoop:service=Router,name=FederationRPC";
+  private static List mockMountTable;
+  private static StateStoreService stateStore;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+cluster = new StateStoreDFSCluster(false, 1);
+// Build and start a router with State Store + admin + RPC
+Configuration conf = new RouterConfigBuilder()
+.stateStore()
+.admin()
+.rpc()
+.build();
+cluster.addRouterOverrides(conf);
+cluster.startRouters();
+routerContext = cluster.getRandomRouter();
+mockMountTable = cluster.generateMockMountTable();
+Router router = routerContext.getRouter();
+stateStore = router.getStateStore();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+cluster.stopRouter(routerContext);
+  }
+
+  @Before
+  public void testSetup() throws Exception {
+assertTrue(
+synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+  }
+
+  @Test
+  public void testAddMountTable() throws IOException {
+MountTable newEntry = MountTable.newInstance(
+"/testpath", Collections.singletonMap("ns0", "/testdir"),
+Time.now(), Time.now());
+
+RouterClient client = routerContext.getAdminClient();
+MountTableManager mountTable = client.getMountTableManager();
+
+// Existing mount table size
+List records = getMountTableEntrie

[36/50] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-11554. [Documentation] Router-based federation documentation. Contributed 
by Inigo Goiri.

(cherry picked from commit ee3260211d94aed223dd6f2386a166eb2c7d67af)
(cherry picked from commit 67d10087aff9d4ab2748aefc1b97522495c148f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/568a32e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/568a32e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/568a32e5

Branch: refs/heads/YARN-3368_branch2
Commit: 568a32e5fe9f6dde21ea3de0456bda7807051261
Parents: 89c8018
Author: Inigo Goiri 
Authored: Wed Aug 16 17:23:29 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:31 2017 -0700

--
 .../src/site/markdown/HDFSRouterFederation.md   | 170 +++
 .../site/resources/images/routerfederation.png  | Bin 0 -> 24961 bytes
 2 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/568a32e5/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
new file mode 100644
index 000..f094238
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -0,0 +1,170 @@
+
+
+HDFS Router-based Federation
+
+
+
+
+Introduction
+
+
+NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
+
+
+Architecture
+
+
+A natural extension to this partitioned federation is to add a layer of 
software responsible for federating the namespaces.
+This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and supports 
rebalancing of data across subclusters.
+To accomplish these goals, the federation layer directs block accesses to the 
proper subcluster, maintains the state of the namespaces, and provides 
mechanisms for data rebalancing.
+This layer must be scalable, highly available, and fault tolerant.
+
+This federation layer comprises multiple components.
+The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
+
+![Router-based Federation Sequence Diagram | 
width=800](./images/routerfederation.png)
+
+
+### Example flow
+The simplest configuration deploys a Router on each NameNode machine.
+The Router monitors the local NameNode and heartbeats the state to the State 
Store.
+When a regular DFS client contacts any of the Routers to access a file in the 
federated filesystem, the Router checks the Mount Table in the State Store 
(i.e., the local cache) to find out which subcluster contains the file.
+Then it checks the Membership table in the State Store (i.e., the local cache) 
for the NameNode responsible for the subcluster.
+After it has identified the correct NameNode, the Router proxies the request.
+The client accesses Datanodes directly.
+
+
+### Router
+There can be multiple Routers in the system with soft state.
+Each Router has two roles:
+
+* Federated interface: expose a single, global NameNode interface to the 
clients and forward the requests to the active NameNode in the correct 
subcluster
+* NameNode heartbeat: maintain the information about a NameNode in the State 
Store
+
+ Federated interface
+The Router receives a client request, checks the State Store for the correct 
subcluster, and forwards the request to the active NameNode of that subcluster.
+The reply from the NameNode then flows in the opposite direction.
+The Routers are stateless and can be behind a load balancer.
+For performance, the Router also caches remote mount table entries and the 
state of the subclusters.
+To make sure that changes have been propagated to all Routers, each Router 
heartbeats its state

[18/50] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-10-20 Thread vrushali
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93687da4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
new file mode 100644
index 000..7f0b36a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -0,0 +1,483 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.driver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
+import org.apache.hadoop.hdfs.server.federation.store.records.Query;
+import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.junit.AfterClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base tests for the driver. The particular implementations will use this to
+ * test their functionality.
+ */
+public class TestStateStoreDriverBase {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStateStoreDriverBase.class);
+
+  private static StateStoreService stateStore;
+  private static Configuration conf;
+
+
+  /**
+   * Get the State Store driver.
+   * @return State Store driver.
+   */
+  protected StateStoreDriver getStateStoreDriver() {
+return stateStore.getDriver();
+  }
+
+  @AfterClass
+  public static void tearDownCluster() {
+if (stateStore != null) {
+  stateStore.stop();
+}
+  }
+
+  /**
+   * Get a new State Store using this configuration.
+   *
+   * @param config Configuration for the State Store.
+   * @throws Exception If we cannot get the State Store.
+   */
+  public static void getStateStore(Configuration config) throws Exception {
+conf = config;
+stateStore = FederationStateStoreTestUtils.getStateStore(conf);
+  }
+
+  private  T generateFakeRecord(Class recordClass)
+  throws IllegalArgumentException, IllegalAccessException, IOException {
+
+// TODO add record
+return null;
+  }
+
+  /**
+   * Validate if a record is the same.
+   *
+   * @param original
+   * @param committed
+   * @param assertEquals Assert if the records are equal or just return.
+   * @return
+   * @throws IllegalArgumentException
+   * @throws IllegalAccessException
+   */
+  private boolean validateRecord(
+  BaseRecord original, BaseRecord committed, boolean assertEquals)
+  throws IllegalArgumentException, IllegalAccessException {
+
+boolean ret = true;
+
+Map> fields = getFields(original);
+for (String key : fields.keySet()) {
+  if (key.equals("dateModified") ||
+  key.equals("dateCreated") ||
+  key.equals("proto")) {
+// Fields are updated/set on commit and fetch and may not match
+// the fields that are initialized in a non-committed object.
+continue;
+  }
+  Object data1 = getField(original, key);
+  Object data2 = getField(committed, key);
+  if (assertEquals) {
+assertEquals("Field " + key + " does not match", data1, data2);
+  } else if (!data1.equals(data2)) {
+ret = false;
+  }
+}
+
+long now = stateStore.getDriver().getTime();
+asser

[48/50] hadoop git commit: HDFS-12273. Federation UI. Contributed by Inigo Goiri.

2017-10-20 Thread vrushali
HDFS-12273. Federation UI. Contributed by Inigo Goiri.

(cherry picked from commit adbb2e00c7b85524fd43bd68895d49814c16680a)
(cherry picked from commit 81601dac8ec7650bec14700b174910390a92fe1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1772d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1772d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1772d456

Branch: refs/heads/YARN-3368_branch2
Commit: 1772d4563db69eee057fa01655c77de757b8541a
Parents: 9920a89
Author: Inigo Goiri 
Authored: Thu Oct 5 17:26:43 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:34 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 +
 .../federation/metrics/FederationMBean.java |   7 +
 .../federation/metrics/FederationMetrics.java   |  25 +-
 .../resolver/MembershipNamenodeResolver.java|  23 ++
 .../hdfs/server/federation/router/Router.java   |  36 ++
 .../federation/router/RouterHttpServer.java | 124 +++
 .../federation/router/RouterRpcClient.java  |  45 ++-
 .../federation/router/RouterRpcServer.java  |  15 +-
 .../src/main/resources/hdfs-default.xml |  56 +++
 .../main/webapps/router/federationhealth.html   | 371 +++
 .../src/main/webapps/router/federationhealth.js | 313 
 .../src/main/webapps/router/index.html  |  24 ++
 .../server/federation/RouterConfigBuilder.java  |  13 +
 .../server/federation/RouterDFSCluster.java |  29 +-
 .../federation/metrics/TestMetricsBase.java |   1 +
 .../server/federation/router/TestRouter.java|   9 +-
 17 files changed, 1102 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 8ae3db8..154e4f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -276,6 +276,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3606e7a..3f967da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1141,6 +1141,25 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "admin.enable";
   public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
 
+  // HDFS Router-based federation web
+  public static final String DFS_ROUTER_HTTP_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "http.enable";
+  public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-address";
+  public static final intDFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
+  public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "http-bind-host";
+  public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-address";
+  public static final intDFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
+  public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "https-bind-host";
+  public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1772d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/

[11/50] hadoop git commit: YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler agnostic. (Contributed by Haibo Chen)

2017-10-20 Thread vrushali
YARN-7359. TestAppManager.testQueueSubmitWithNoPermission() should be scheduler 
agnostic. (Contributed by Haibo Chen)

(cherry picked from commit 7b4b0187806601e33f5a88d48991e7c12ee4419f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f462461d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f462461d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f462461d

Branch: refs/heads/YARN-3368_branch2
Commit: f462461d4eeb0dc2f10e075d1a1a815537d4e471
Parents: 1000811
Author: Yufei Gu 
Authored: Thu Oct 19 16:51:29 2017 -0700
Committer: vrushali 
Committed: Fri Oct 20 11:22:28 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/TestAppManager.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f462461d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 009eb2c..8a5c730 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -301,8 +301,6 @@ public class TestAppManager{
   @Test
   public void testQueueSubmitWithNoPermission() throws IOException {
 YarnConfiguration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 conf.set(PREFIX + "root.acl_submit_applications", " ");
 conf.set(PREFIX + "root.acl_administer_queue", " ");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   >