[3/3] hadoop git commit: YARN-3139. Improve locks in AbstractYarnScheduler/CapacityScheduler/FairScheduler. Contributed by Wangda Tan

2016-10-04 Thread jianhe
YARN-3139. Improve locks in 
AbstractYarnScheduler/CapacityScheduler/FairScheduler. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31f8da22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31f8da22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31f8da22

Branch: refs/heads/trunk
Commit: 31f8da22d0b8d2dcce5fbc8e45d832f40acf056f
Parents: 44f48ee
Author: Jian He 
Authored: Tue Oct 4 17:23:13 2016 -0700
Committer: Jian He 
Committed: Tue Oct 4 17:23:13 2016 -0700

--
 .../server/resourcemanager/RMServerUtils.java   |5 +-
 .../scheduler/AbstractYarnScheduler.java|  416 +++--
 .../scheduler/SchedulerApplicationAttempt.java  |8 +-
 .../scheduler/capacity/CapacityScheduler.java   | 1729 ++
 .../scheduler/capacity/LeafQueue.java   |   16 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |4 +
 .../scheduler/fair/FairScheduler.java   | 1048 ++-
 7 files changed, 1745 insertions(+), 1481 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index b90e499..b2a085a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -211,10 +211,7 @@ public class RMServerUtils {
   }
 
   /**
-   * Validate increase/decrease request. This function must be called under
-   * the queue lock to make sure that the access to container resource is
-   * atomic. Refer to LeafQueue.decreaseContainer() and
-   * CapacityScheduelr.updateIncreaseRequests()
+   * Validate increase/decrease request.
* 
* - Throw exception when any other error happens
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 45415de..645e06d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -72,8 +73,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
@@ -94,7 +93,7 @@ public abstract class AbstractYarnScheduler
 
   protected Resource minimumAllocation;
 
-  protected RMContext rmContext;
+  protected volatile RMContext rmContext;
   
   private volatile Priority maxClusterLevelAppPriority;
 
@@ -112,6 +111,18 @@ public abstract c

[2/3] hadoop git commit: YARN-3139. Improve locks in AbstractYarnScheduler/CapacityScheduler/FairScheduler. Contributed by Wangda Tan

2016-10-04 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 33fe9ad..6d00bee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -39,7 +39,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -267,8 +266,7 @@ public class CapacityScheduler extends
   }
 
   @Override
-  public synchronized RMContainerTokenSecretManager 
-  getContainerTokenSecretManager() {
+  public RMContainerTokenSecretManager getContainerTokenSecretManager() {
 return this.rmContext.getContainerTokenSecretManager();
   }
 
@@ -293,52 +291,62 @@ public class CapacityScheduler extends
   }
 
   @Override
-  public synchronized RMContext getRMContext() {
+  public RMContext getRMContext() {
 return this.rmContext;
   }
 
   @Override
-  public synchronized void setRMContext(RMContext rmContext) {
+  public void setRMContext(RMContext rmContext) {
 this.rmContext = rmContext;
   }
 
-  private synchronized void initScheduler(Configuration configuration) throws
+  private void initScheduler(Configuration configuration) throws
   IOException {
-this.conf = loadCapacitySchedulerConfiguration(configuration);
-validateConf(this.conf);
-this.minimumAllocation = this.conf.getMinimumAllocation();
-initMaximumResourceCapability(this.conf.getMaximumAllocation());
-this.calculator = this.conf.getResourceCalculator();
-this.usePortForNodeName = this.conf.getUsePortForNodeName();
-this.applications = new ConcurrentHashMap<>();
-this.labelManager = rmContext.getNodeLabelManager();
-authorizer = YarnAuthorizationProvider.getInstance(yarnConf);
-this.activitiesManager = new ActivitiesManager(rmContext);
-activitiesManager.init(conf);
-initializeQueues(this.conf);
-this.isLazyPreemptionEnabled = conf.getLazyPreemptionEnabled();
-
-scheduleAsynchronously = this.conf.getScheduleAynschronously();
-asyncScheduleInterval =
-this.conf.getLong(ASYNC_SCHEDULER_INTERVAL,
-DEFAULT_ASYNC_SCHEDULER_INTERVAL);
-if (scheduleAsynchronously) {
-  asyncSchedulerThread = new AsyncScheduleThread(this);
-}
-
-LOG.info("Initialized CapacityScheduler with " +
-"calculator=" + getResourceCalculator().getClass() + ", " +
-"minimumAllocation=<" + getMinimumResourceCapability() + ">, " +
-"maximumAllocation=<" + getMaximumResourceCapability() + ">, " +
-"asynchronousScheduling=" + scheduleAsynchronously + ", " +
-"asyncScheduleInterval=" + asyncScheduleInterval + "ms");
-  }
-
-  private synchronized void startSchedulerThreads() {
-if (scheduleAsynchronously) {
-  Preconditions.checkNotNull(asyncSchedulerThread,
-  "asyncSchedulerThread is null");
-  asyncSchedulerThread.start();
+try {
+  writeLock.lock();
+  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  validateConf(this.conf);
+  this.minimumAllocation = this.conf.getMinimumAllocation();
+  initMaximumResourceCapability(this.conf.getMaximumAllocation());
+  this.calculator = this.conf.getResourceCalculator();
+  this.usePortForNodeName = this.conf.getUsePortForNodeName();
+  this.applications = new ConcurrentHashMap<>();
+  this.labelManager = rmContext.getNodeLabelManager();
+  authorizer = YarnAuthorizationProvider.getInstance(yarnConf);
+  this.activitiesManager = new ActivitiesManager(rmContext);
+  activitiesManager.init(conf);
+  initializeQueues(this.conf);
+  this.isLazyPreemptionEnabled = conf.getLazyPreemptionEnabled();
+
+  scheduleAsynchronously = this.conf.getScheduleAynschronously();
+  asy

[1/3] hadoop git commit: YARN-3139. Improve locks in AbstractYarnScheduler/CapacityScheduler/FairScheduler. Contributed by Wangda Tan

2016-10-04 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 44f48ee96 -> 31f8da22d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 6129772..eecd4ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -2227,6 +2227,22 @@ public class LeafQueue extends AbstractCSQueue {
 }
   }
 
+  public void updateApplicationPriority(SchedulerApplication 
app,
+  Priority newAppPriority) {
+try {
+  writeLock.lock();
+  FiCaSchedulerApp attempt = app.getCurrentAppAttempt();
+  getOrderingPolicy().removeSchedulableEntity(attempt);
+
+  // Update new priority in SchedulerApplication
+  attempt.setPriority(newAppPriority);
+
+  getOrderingPolicy().addSchedulableEntity(attempt);
+} finally {
+  writeLock.unlock();
+}
+  }
+
   public OrderingPolicy
   getPendingAppsOrderingPolicy() {
 return pendingOrderingPolicy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index fd43e74..aa7ad50 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -23,6 +23,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -666,6 +667,9 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
 } finally {
   writeLock.unlock();
 }
+  }
 
+  public ReentrantReadWriteLock.WriteLock getWriteLock() {
+return this.writeLock;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f8da22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 920052f..8daf0f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -186,10 +186,13 @@ public class FairScheduler extends
   // an app can be reserved on
 
   protected boolean sizeBasedWeight; // Give larger weights to larger jobs
-  protected boolean continuousSchedulingEnabled; // Continuous Scheduling 
enabled or not
-  protected int continuousSchedulingSleepMs; // Sleep time for each pass in 
continuous scheduling
+  // Continuous Schedu

svn commit: r1763336 - in /hadoop/common/site/main: author/src/documentation/resources/images/hadoop-logo.jpg publish/images/hadoop-logo.jpg

2016-10-04 Thread cdouglas
Author: cdouglas
Date: Wed Oct  5 00:09:55 2016
New Revision: 1763336

URL: http://svn.apache.org/viewvc?rev=1763336&view=rev
Log:
Restore logo, this time in the correct place

Modified:

hadoop/common/site/main/author/src/documentation/resources/images/hadoop-logo.jpg
hadoop/common/site/main/publish/images/hadoop-logo.jpg

Modified: 
hadoop/common/site/main/author/src/documentation/resources/images/hadoop-logo.jpg
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/resources/images/hadoop-logo.jpg?rev=1763336&r1=1763335&r2=1763336&view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/images/hadoop-logo.jpg
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/images/hadoop-logo.jpg?rev=1763336&r1=1763335&r2=1763336&view=diff
==
Binary files - no diff available.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock. Contributed by Erik Krogen.

2016-10-04 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 3eff0873f -> 5c99959b2


HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock. 
Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c99959b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c99959b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c99959b

Branch: refs/heads/branch-2.7
Commit: 5c99959b2d34bb02a1693a4c7d911718c2529ee8
Parents: 3eff087
Author: Zhe Zhang 
Authored: Tue Oct 4 16:52:56 2016 -0700
Committer: Zhe Zhang 
Committed: Tue Oct 4 16:52:56 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 148 +
 .../hdfs/server/namenode/FSNamesystemLock.java  | 187 ++-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 297 -
 .../server/namenode/TestFSNamesystemLock.java   | 317 +++
 4 files changed, 496 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c99959b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a090748..18b0665 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -71,12 +71,6 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
@@ -130,8 +124,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -149,7 +141,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
@@ -292,7 +283,6 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AsyncAppender;
@@ -731,12 +721,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   LOG.info("Enabling async auditlog");
   enableAsyncAuditLog();
 }
-boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
-LOG.info("fsLock is fair:" + fair);
-fsLock = new FSNamesystemLock(fair);
-cond = fsLock.writeLock().newCondition();
+fsLock = new FSNamesystemLock(conf);
+cond = fsLock.newWriteLockCondition();
 cpLock = new ReentrantLock();
-setTimer(new Timer());
 
 this.fsImage = fsImage;
 try {
@@ -830,17 +817,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMB

hadoop git commit: YARN-5491. Fix random failure of TestCapacityScheduler#testCSQueueBlocked (Bibin A Chundatt via Varun Saxena) (cherry picked from commit d677b68c2599445fff56db4df26448a8bad0f5dd)

2016-10-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d03240e0b -> 3656b8408


YARN-5491. Fix random failure of TestCapacityScheduler#testCSQueueBlocked 
(Bibin A Chundatt via Varun Saxena)
(cherry picked from commit d677b68c2599445fff56db4df26448a8bad0f5dd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3656b840
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3656b840
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3656b840

Branch: refs/heads/branch-2.8
Commit: 3656b8408529dd42e14eb87dde63db7e7b403068
Parents: d03240e
Author: Varun Saxena 
Authored: Mon Aug 15 03:31:21 2016 +0530
Committer: Jason Lowe 
Committed: Tue Oct 4 20:57:01 2016 +

--
 .../scheduler/capacity/TestCapacityScheduler.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3656b840/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index ceffebd..134ebd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -3606,7 +3606,7 @@ public class TestCapacityScheduler {
 }
 assertEquals("A Used Resource should be 2 GB", 2 * GB,
 cs.getQueue("a").getUsedResources().getMemorySize());
-assertEquals("B Used Resource should be 2 GB", 13 * GB,
+assertEquals("B Used Resource should be 13 GB", 13 * GB,
 cs.getQueue("b").getUsedResources().getMemorySize());
 r1 = TestUtils.createResourceRequest(
 ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory);
@@ -3630,10 +3630,14 @@ public class TestCapacityScheduler {
 ContainerId containerId2 =ContainerId.newContainerId(appAttemptId2, 11);
 
 cs.handle(new ContainerExpiredSchedulerEvent(containerId1));
+rm.drainEvents();
+CapacityScheduler.schedule(cs);
+
 cs.handle(new ContainerExpiredSchedulerEvent(containerId2));
 CapacityScheduler.schedule(cs);
 rm.drainEvents();
-assertEquals("A Used Resource should be 2 GB", 4 * GB,
+
+assertEquals("A Used Resource should be 4 GB", 4 * GB,
 cs.getQueue("a").getUsedResources().getMemorySize());
 assertEquals("B Used Resource should be 12 GB", 12 * GB,
 cs.getQueue("b").getUsedResources().getMemorySize());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10956. Remove rename/delete performance penalty when not using snapshots. Contributed by Daryn Sharp.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4ea1e73e5 -> d03240e0b


HDFS-10956. Remove rename/delete performance penalty when not using snapshots. 
Contributed by Daryn Sharp.

(cherry picked from commit 44f48ee96ee6b2a3909911c37bfddb0c963d5ffc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d03240e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d03240e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d03240e0

Branch: refs/heads/branch-2.8
Commit: d03240e0bf5c5ff76b0df6a309fb29c7248377c1
Parents: 4ea1e73
Author: Kihwal Lee 
Authored: Tue Oct 4 15:29:39 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 15:29:39 2016 -0500

--
 .../hdfs/server/namenode/FSDirDeleteOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirRenameOp.java | 12 +--
 .../hdfs/server/namenode/FSDirSnapshotOp.java   | 22 ++--
 .../server/namenode/TestSnapshotPathINodes.java | 16 ++
 4 files changed, 44 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d03240e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 8eb3a40..7d57ee0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -57,7 +57,7 @@ class FSDirDeleteOp {
 try {
   if (deleteAllowed(iip, iip.getPath()) ) {
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 ReclaimContext context = new ReclaimContext(
 fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes,
 removedUCFiles);
@@ -140,7 +140,7 @@ class FSDirDeleteOp {
   return;
 }
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 boolean filesRemoved = unprotectedDelete(fsd, iip,
 new ReclaimContext(fsd.getBlockStoragePolicySuite(),
 collectedBlocks, removedINodes, removedUCFiles),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d03240e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 41971a7..64cd500 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -156,7 +156,7 @@ class FSDirRenameOp {
 assert fsd.hasWriteLock();
 final INode srcInode = srcIIP.getLastINode();
 try {
-  validateRenameSource(srcIIP);
+  validateRenameSource(fsd, srcIIP);
 } catch (SnapshotException e) {
   throw e;
 } catch (IOException ignored) {
@@ -346,7 +346,7 @@ class FSDirRenameOp {
 final String dst = dstIIP.getPath();
 final String error;
 final INode srcInode = srcIIP.getLastINode();
-validateRenameSource(srcIIP);
+validateRenameSource(fsd, srcIIP);
 
 // validate the destination
 if (dst.equals(src)) {
@@ -368,7 +368,7 @@ class FSDirRenameOp {
 List snapshottableDirs = new ArrayList<>();
 if (dstInode != null) { // Destination exists
   validateOverwrite(src, dst, overwrite, srcInode, dstInode);
-  FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs);
+  FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
 }
 
 INode dstParent = dstIIP.getINode(-2);
@@ -540,8 +540,8 @@ class FSDirRenameOp {
 }
   }
 
-  private static void validateRenameSource(INodesInPath srcIIP)
-  throws IOException {
+  private static void validateRenameSource(FSDirectory fsd,
+  INodesInPath srcIIP) throws IOException {
 String error;
 final INode srcInode = srcIIP.getLastINode();
 // validate source
@@ -559,7 +559,7 @@ class FSDirRenameOp {
 }
 // srcInode and its subtree cannot contain snapshot

hadoop git commit: HDFS-10956. Remove rename/delete performance penalty when not using snapshots. Contributed by Daryn Sharp.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 213f0ad7f -> b565fa8e0


HDFS-10956. Remove rename/delete performance penalty when not using snapshots. 
Contributed by Daryn Sharp.

(cherry picked from commit 44f48ee96ee6b2a3909911c37bfddb0c963d5ffc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b565fa8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b565fa8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b565fa8e

Branch: refs/heads/branch-2
Commit: b565fa8e05eee0ce85cc47d0d838726842d098b3
Parents: 213f0ad
Author: Kihwal Lee 
Authored: Tue Oct 4 15:26:42 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 15:26:42 2016 -0500

--
 .../hdfs/server/namenode/FSDirDeleteOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirRenameOp.java | 12 +--
 .../hdfs/server/namenode/FSDirSnapshotOp.java   | 22 ++--
 .../server/namenode/TestSnapshotPathINodes.java | 16 ++
 4 files changed, 44 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b565fa8e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 13f1092..21ee3ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -57,7 +57,7 @@ class FSDirDeleteOp {
 try {
   if (deleteAllowed(iip, iip.getPath()) ) {
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 ReclaimContext context = new ReclaimContext(
 fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes,
 removedUCFiles);
@@ -140,7 +140,7 @@ class FSDirDeleteOp {
   return;
 }
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 boolean filesRemoved = unprotectedDelete(fsd, iip,
 new ReclaimContext(fsd.getBlockStoragePolicySuite(),
 collectedBlocks, removedINodes, removedUCFiles),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b565fa8e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 0fdc545..911b178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -156,7 +156,7 @@ class FSDirRenameOp {
 assert fsd.hasWriteLock();
 final INode srcInode = srcIIP.getLastINode();
 try {
-  validateRenameSource(srcIIP);
+  validateRenameSource(fsd, srcIIP);
 } catch (SnapshotException e) {
   throw e;
 } catch (IOException ignored) {
@@ -365,7 +365,7 @@ class FSDirRenameOp {
 final String dst = dstIIP.getPath();
 final String error;
 final INode srcInode = srcIIP.getLastINode();
-validateRenameSource(srcIIP);
+validateRenameSource(fsd, srcIIP);
 
 // validate the destination
 if (dst.equals(src)) {
@@ -387,7 +387,7 @@ class FSDirRenameOp {
 List snapshottableDirs = new ArrayList<>();
 if (dstInode != null) { // Destination exists
   validateOverwrite(src, dst, overwrite, srcInode, dstInode);
-  FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs);
+  FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
 }
 
 INode dstParent = dstIIP.getINode(-2);
@@ -559,8 +559,8 @@ class FSDirRenameOp {
 }
   }
 
-  private static void validateRenameSource(INodesInPath srcIIP)
-  throws IOException {
+  private static void validateRenameSource(FSDirectory fsd,
+  INodesInPath srcIIP) throws IOException {
 String error;
 final INode srcInode = srcIIP.getLastINode();
 // validate source
@@ -578,7 +578,7 @@ class FSDirRenameOp {
 }
 // srcInode and its subtree cannot contain snapshottabl

hadoop git commit: HDFS-10956. Remove rename/delete performance penalty when not using snapshots. Contributed by Daryn Sharp.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 88b9444a8 -> 44f48ee96


HDFS-10956. Remove rename/delete performance penalty when not using snapshots. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44f48ee9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44f48ee9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44f48ee9

Branch: refs/heads/trunk
Commit: 44f48ee96ee6b2a3909911c37bfddb0c963d5ffc
Parents: 88b9444
Author: Kihwal Lee 
Authored: Tue Oct 4 15:05:09 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 15:05:09 2016 -0500

--
 .../hdfs/server/namenode/FSDirDeleteOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirRenameOp.java | 12 +--
 .../hdfs/server/namenode/FSDirSnapshotOp.java   | 22 ++--
 .../server/namenode/TestSnapshotPathINodes.java | 16 ++
 4 files changed, 44 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f48ee9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 13f1092..21ee3ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -57,7 +57,7 @@ class FSDirDeleteOp {
 try {
   if (deleteAllowed(iip, iip.getPath()) ) {
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 ReclaimContext context = new ReclaimContext(
 fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes,
 removedUCFiles);
@@ -140,7 +140,7 @@ class FSDirDeleteOp {
   return;
 }
 List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 boolean filesRemoved = unprotectedDelete(fsd, iip,
 new ReclaimContext(fsd.getBlockStoragePolicySuite(),
 collectedBlocks, removedINodes, removedUCFiles),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f48ee9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 0fdc545..911b178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -156,7 +156,7 @@ class FSDirRenameOp {
 assert fsd.hasWriteLock();
 final INode srcInode = srcIIP.getLastINode();
 try {
-  validateRenameSource(srcIIP);
+  validateRenameSource(fsd, srcIIP);
 } catch (SnapshotException e) {
   throw e;
 } catch (IOException ignored) {
@@ -365,7 +365,7 @@ class FSDirRenameOp {
 final String dst = dstIIP.getPath();
 final String error;
 final INode srcInode = srcIIP.getLastINode();
-validateRenameSource(srcIIP);
+validateRenameSource(fsd, srcIIP);
 
 // validate the destination
 if (dst.equals(src)) {
@@ -387,7 +387,7 @@ class FSDirRenameOp {
 List snapshottableDirs = new ArrayList<>();
 if (dstInode != null) { // Destination exists
   validateOverwrite(src, dst, overwrite, srcInode, dstInode);
-  FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs);
+  FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
 }
 
 INode dstParent = dstIIP.getINode(-2);
@@ -559,8 +559,8 @@ class FSDirRenameOp {
 }
   }
 
-  private static void validateRenameSource(INodesInPath srcIIP)
-  throws IOException {
+  private static void validateRenameSource(FSDirectory fsd,
+  INodesInPath srcIIP) throws IOException {
 String error;
 final INode srcInode = srcIIP.getLastINode();
 // validate source
@@ -578,7 +578,7 @@ class FSDirRenameOp {
 }
 // srcInode and its subtree cannot contain snapshottable directories with
 // snapshots
-FSDirSnapshotOp.checkSnapshot(srcI

[3/3] hadoop git commit: HADOOP-13674. S3A can provide a more detailed error message when accessing a bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.

2016-10-04 Thread cnauroth
HADOOP-13674. S3A can provide a more detailed error message when accessing a 
bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.

(cherry picked from commit 88b9444a81081da9b168d2e290f9552b58a4d8c6)
(cherry picked from commit 213f0ad7f0e846cf8023b12cf8e5f804950b1aa1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ea1e73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ea1e73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ea1e73e

Branch: refs/heads/branch-2.8
Commit: 4ea1e73e5b4057efd2b8c495ba79c1b72f83b1c1
Parents: 33cafc9
Author: Chris Nauroth 
Authored: Tue Oct 4 10:36:58 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 4 10:37:10 2016 -0700

--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  17 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |  42 +++---
 .../fs/s3a/TestS3AExceptionTranslation.java | 127 +++
 .../hadoop/fs/s3a/TestS3AFailureHandling.java   |  55 
 4 files changed, 162 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ea1e73e/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 80841f9..4b7db39 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -45,6 +45,7 @@ import java.util.concurrent.ExecutionException;
 
 import static org.apache.hadoop.fs.s3a.Constants.ACCESS_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT;
 import static org.apache.hadoop.fs.s3a.Constants.SECRET_KEY;
 
 /**
@@ -61,6 +62,7 @@ public final class S3AUtils {
   = "instantiation exception";
   static final String NOT_AWS_PROVIDER =
   "does not implement AWSCredentialsProvider";
+  static final String ENDPOINT_KEY = "Endpoint";
 
   private S3AUtils() {
   }
@@ -114,6 +116,21 @@ public final class S3AUtils {
   int status = ase.getStatusCode();
   switch (status) {
 
+  case 301:
+if (s3Exception != null) {
+  if (s3Exception.getAdditionalDetails() != null &&
+  s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
+message = String.format("Received permanent redirect response to "
++ "endpoint %s.  This likely indicates that the S3 endpoint "
++ "configured in %s does not match the AWS region containing "
++ "the bucket.",
+s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), 
ENDPOINT);
+  }
+  ioe = new AWSS3IOException(message, s3Exception);
+} else {
+  ioe = new AWSServiceIOException(message, ase);
+}
+break;
   // permissions
   case 401:
   case 403:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ea1e73e/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index e1029c1..5270dfa 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1213,33 +1213,27 @@ As an example, the endpoint for S3 Frankfurt is 
`s3.eu-central-1.amazonaws.com`:
 
 ### Error message "The bucket you are attempting to access must be addressed 
using the specified endpoint"
 
-This surfaces when `fs.s3a.endpoint` is configured to use S3 service endpoint
+This surfaces when `fs.s3a.endpoint` is configured to use an S3 service 
endpoint
 which is neither the original AWS one, `s3.amazonaws.com` , nor the one where
-the bucket is hosted.
+the bucket is hosted.  The error message contains the redirect target returned
+by S3, which can be used to determine the correct value for `fs.s3a.endpoint`.
 
 ```
-org.apache.hadoop.fs.s3a.AWSS3IOException: purging multipart uploads on 
landsat-pds:
- com.amazonaws.services.s3.model.AmazonS3Exception:
-  The bucket you are attempting to access must be addressed using the 
specified endpoint.
-  Please send all future requests to this endpoint.
-   (Service: Amazon S3; Status Code: 301; Error Code: PermanentRedirect; 
Request ID: 5B7A5D18BE596E4B),
-S3 Extended Request ID: 
uE4pbbmpxi8Nh7rycS6GfIEi9UH/SWmJfGtM9IeKvRyBPZp/hN7DbPyz272eynz3PEMM2azlhjE=:
-
-   

[1/3] hadoop git commit: HADOOP-13674. S3A can provide a more detailed error message when accessing a bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.

2016-10-04 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a9a2f64e5 -> 213f0ad7f
  refs/heads/branch-2.8 33cafc9fc -> 4ea1e73e5
  refs/heads/trunk 382307cbd -> 88b9444a8


HADOOP-13674. S3A can provide a more detailed error message when accessing a 
bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88b9444a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88b9444a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88b9444a

Branch: refs/heads/trunk
Commit: 88b9444a81081da9b168d2e290f9552b58a4d8c6
Parents: 382307c
Author: Chris Nauroth 
Authored: Tue Oct 4 10:36:58 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 4 10:36:58 2016 -0700

--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  17 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |  42 +++---
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  55 
 .../fs/s3a/TestS3AExceptionTranslation.java | 127 +++
 4 files changed, 162 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88b9444a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index a5e8e7a..93d819b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -48,6 +48,7 @@ import java.util.concurrent.ExecutionException;
 
 import static org.apache.hadoop.fs.s3a.Constants.ACCESS_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT;
 import static org.apache.hadoop.fs.s3a.Constants.SECRET_KEY;
 
 /**
@@ -64,6 +65,7 @@ public final class S3AUtils {
   = "instantiation exception";
   static final String NOT_AWS_PROVIDER =
   "does not implement AWSCredentialsProvider";
+  static final String ENDPOINT_KEY = "Endpoint";
 
   private S3AUtils() {
   }
@@ -117,6 +119,21 @@ public final class S3AUtils {
   int status = ase.getStatusCode();
   switch (status) {
 
+  case 301:
+if (s3Exception != null) {
+  if (s3Exception.getAdditionalDetails() != null &&
+  s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
+message = String.format("Received permanent redirect response to "
++ "endpoint %s.  This likely indicates that the S3 endpoint "
++ "configured in %s does not match the AWS region containing "
++ "the bucket.",
+s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), 
ENDPOINT);
+  }
+  ioe = new AWSS3IOException(message, s3Exception);
+} else {
+  ioe = new AWSServiceIOException(message, ase);
+}
+break;
   // permissions
   case 401:
   case 403:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88b9444a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 160aa46..cf785d5 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1184,33 +1184,27 @@ As an example, the endpoint for S3 Frankfurt is 
`s3.eu-central-1.amazonaws.com`:
 
 ### Error message "The bucket you are attempting to access must be addressed 
using the specified endpoint"
 
-This surfaces when `fs.s3a.endpoint` is configured to use S3 service endpoint
+This surfaces when `fs.s3a.endpoint` is configured to use an S3 service 
endpoint
 which is neither the original AWS one, `s3.amazonaws.com` , nor the one where
-the bucket is hosted.
+the bucket is hosted.  The error message contains the redirect target returned
+by S3, which can be used to determine the correct value for `fs.s3a.endpoint`.
 
 ```
-org.apache.hadoop.fs.s3a.AWSS3IOException: purging multipart uploads on 
landsat-pds:
- com.amazonaws.services.s3.model.AmazonS3Exception:
-  The bucket you are attempting to access must be addressed using the 
specified endpoint.
-  Please send all future requests to this endpoint.
-   (Service: Amazon S3; Status Code: 301; Error Code: PermanentRedirect; 
Request ID: 5B7A5D18BE596E4B),
-S3 Extended Request ID: 
uE4pbbmpxi8Nh7rycS6GfIEi9UH/SWmJfGtM9IeKvRyBPZp/hN7DbPyz272

[2/3] hadoop git commit: HADOOP-13674. S3A can provide a more detailed error message when accessing a bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.

2016-10-04 Thread cnauroth
HADOOP-13674. S3A can provide a more detailed error message when accessing a 
bucket through an incorrect S3 endpoint. Contributed by Chris Nauroth.

(cherry picked from commit 88b9444a81081da9b168d2e290f9552b58a4d8c6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/213f0ad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/213f0ad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/213f0ad7

Branch: refs/heads/branch-2
Commit: 213f0ad7f0e846cf8023b12cf8e5f804950b1aa1
Parents: a9a2f64
Author: Chris Nauroth 
Authored: Tue Oct 4 10:36:58 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 4 10:37:04 2016 -0700

--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  17 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |  42 +++---
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  55 
 .../fs/s3a/TestS3AExceptionTranslation.java | 127 +++
 4 files changed, 162 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/213f0ad7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index a5e8e7a..93d819b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -48,6 +48,7 @@ import java.util.concurrent.ExecutionException;
 
 import static org.apache.hadoop.fs.s3a.Constants.ACCESS_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT;
 import static org.apache.hadoop.fs.s3a.Constants.SECRET_KEY;
 
 /**
@@ -64,6 +65,7 @@ public final class S3AUtils {
   = "instantiation exception";
   static final String NOT_AWS_PROVIDER =
   "does not implement AWSCredentialsProvider";
+  static final String ENDPOINT_KEY = "Endpoint";
 
   private S3AUtils() {
   }
@@ -117,6 +119,21 @@ public final class S3AUtils {
   int status = ase.getStatusCode();
   switch (status) {
 
+  case 301:
+if (s3Exception != null) {
+  if (s3Exception.getAdditionalDetails() != null &&
+  s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
+message = String.format("Received permanent redirect response to "
++ "endpoint %s.  This likely indicates that the S3 endpoint "
++ "configured in %s does not match the AWS region containing "
++ "the bucket.",
+s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), 
ENDPOINT);
+  }
+  ioe = new AWSS3IOException(message, s3Exception);
+} else {
+  ioe = new AWSServiceIOException(message, ase);
+}
+break;
   // permissions
   case 401:
   case 403:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/213f0ad7/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index ff840da..67972ca 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1213,33 +1213,27 @@ As an example, the endpoint for S3 Frankfurt is 
`s3.eu-central-1.amazonaws.com`:
 
 ### Error message "The bucket you are attempting to access must be addressed 
using the specified endpoint"
 
-This surfaces when `fs.s3a.endpoint` is configured to use S3 service endpoint
+This surfaces when `fs.s3a.endpoint` is configured to use an S3 service 
endpoint
 which is neither the original AWS one, `s3.amazonaws.com` , nor the one where
-the bucket is hosted.
+the bucket is hosted.  The error message contains the redirect target returned
+by S3, which can be used to determine the correct value for `fs.s3a.endpoint`.
 
 ```
-org.apache.hadoop.fs.s3a.AWSS3IOException: purging multipart uploads on 
landsat-pds:
- com.amazonaws.services.s3.model.AmazonS3Exception:
-  The bucket you are attempting to access must be addressed using the 
specified endpoint.
-  Please send all future requests to this endpoint.
-   (Service: Amazon S3; Status Code: 301; Error Code: PermanentRedirect; 
Request ID: 5B7A5D18BE596E4B),
-S3 Extended Request ID: 
uE4pbbmpxi8Nh7rycS6GfIEi9UH/SWmJfGtM9IeKvRyBPZp/hN7DbPyz272eynz3PEMM2azlhjE=:
-
-   at 
com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpC

hadoop git commit: HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws ConcurrentModificationException. Contributed by Rushabh S Shah. Modified CHANGES.txt (cherry picked from

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c08346ea2 -> 3eff0873f


HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws 
ConcurrentModificationException. Contributed by Rushabh S Shah.
Modified CHANGES.txt
(cherry picked from commit 382307cbdd94107350fe6fad1acf87d63c9be9d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3eff0873
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3eff0873
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3eff0873

Branch: refs/heads/branch-2.7
Commit: 3eff0873f609ff574db27956d6740a87c7b9ea02
Parents: c08346e
Author: Kihwal Lee 
Authored: Tue Oct 4 11:16:43 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 11:17:59 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java   | 4 +---
 2 files changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3eff0873/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c0ba914..2e9ce7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-10889. Remove outdated Fault Injection Framework documentaion.
 (Brahma Reddy Battula)
 
+HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose
+throws ConcurrentModificationException. (Rushabh S Shaah via kihwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3eff0873/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 599adbf..7120d78 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -102,7 +102,6 @@ import 
org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Joiner;
 
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
@@ -481,8 +480,7 @@ public class TestDFSClientRetries {
   // complete() may return false a few times before it returns
   // true. We want to wait until it returns true, and then
   // make it retry one more time after that.
-  LOG.info("Called complete(: " +
-  Joiner.on(",").join(invocation.getArguments()) + ")");
+  LOG.info("Called complete:");
   if (!(Boolean)invocation.callRealMethod()) {
 LOG.info("Complete call returned false, not faking a retry RPC");
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws ConcurrentModificationException. Contributed by Rushabh S Shah.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 253e2442e -> 33cafc9fc


HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws 
ConcurrentModificationException. Contributed by Rushabh S Shah.

(cherry picked from commit 382307cbdd94107350fe6fad1acf87d63c9be9d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33cafc9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33cafc9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33cafc9f

Branch: refs/heads/branch-2.8
Commit: 33cafc9fcdb3a7b8088daad8f9eeab978b850bf6
Parents: 253e244
Author: Kihwal Lee 
Authored: Tue Oct 4 11:13:39 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 11:13:39 2016 -0500

--
 .../test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33cafc9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 6325957..cf93943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -103,7 +103,6 @@ import 
org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Joiner;
 
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
@@ -485,8 +484,7 @@ public class TestDFSClientRetries {
   // complete() may return false a few times before it returns
   // true. We want to wait until it returns true, and then
   // make it retry one more time after that.
-  LOG.info("Called complete(: " +
-  Joiner.on(",").join(invocation.getArguments()) + ")");
+  LOG.info("Called complete:");
   if (!(Boolean)invocation.callRealMethod()) {
 LOG.info("Complete call returned false, not faking a retry RPC");
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws ConcurrentModificationException. Contributed by Rushabh S Shah.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 560e524f9 -> a9a2f64e5


HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws 
ConcurrentModificationException. Contributed by Rushabh S Shah.

(cherry picked from commit 382307cbdd94107350fe6fad1acf87d63c9be9d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9a2f64e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9a2f64e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9a2f64e

Branch: refs/heads/branch-2
Commit: a9a2f64e56e1601cafbd5bde2d6aacae80e992ec
Parents: 560e524
Author: Kihwal Lee 
Authored: Tue Oct 4 11:10:29 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 11:10:29 2016 -0500

--
 .../test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a2f64e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 6325957..cf93943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -103,7 +103,6 @@ import 
org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Joiner;
 
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
@@ -485,8 +484,7 @@ public class TestDFSClientRetries {
   // complete() may return false a few times before it returns
   // true. We want to wait until it returns true, and then
   // make it retry one more time after that.
-  LOG.info("Called complete(: " +
-  Joiner.on(",").join(invocation.getArguments()) + ")");
+  LOG.info("Called complete:");
   if (!(Boolean)invocation.callRealMethod()) {
 LOG.info("Complete call returned false, not faking a retry RPC");
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws ConcurrentModificationException. Contributed by Rushabh S Shah.

2016-10-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ef7f06f7d -> 382307cbd


HDFS-10878. TestDFSClientRetries#testIdempotentAllocateBlockAndClose throws 
ConcurrentModificationException. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/382307cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/382307cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/382307cb

Branch: refs/heads/trunk
Commit: 382307cbdd94107350fe6fad1acf87d63c9be9d6
Parents: ef7f06f
Author: Kihwal Lee 
Authored: Tue Oct 4 11:09:03 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 4 11:09:03 2016 -0500

--
 .../test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/382307cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index c7997d7..6db70d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -103,7 +103,6 @@ import 
org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Joiner;
 
 /**
  * These tests make sure that DFSClient retries fetching data from DFS
@@ -485,8 +484,7 @@ public class TestDFSClientRetries {
   // complete() may return false a few times before it returns
   // true. We want to wait until it returns true, and then
   // make it retry one more time after that.
-  LOG.info("Called complete(: " +
-  Joiner.on(",").join(invocation.getArguments()) + ")");
+  LOG.info("Called complete:");
   if (!(Boolean)invocation.callRealMethod()) {
 LOG.info("Complete call returned false, not faking a retry RPC");
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded files in target folder (Wangda Tan via Sunil G)

2016-10-04 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 c439ddf4a -> 424117bf3


YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded 
files in target folder (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/424117bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/424117bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/424117bf

Branch: refs/heads/YARN-3368
Commit: 424117bf3d3bb00c2f1b29ee3c633b2694e283a3
Parents: c439ddf
Author: sunilg 
Authored: Tue Oct 4 21:07:42 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 21:07:42 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 54 
 hadoop-yarn-project/hadoop-yarn/pom.xml |  2 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/424117bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index b750a73..440aca9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -31,7 +31,7 @@
 
   
 war
-src/main/webapp
+${basedir}/target/src/main/webapp
 node
 v0.12.2
 2.10.0
@@ -84,10 +84,10 @@
   false
   
 
-  
${basedir}/src/main/webapp/bower_components
+  ${webappTgtDir}/bower_components
 
 
-  ${basedir}/src/main/webapp/node_modules
+  ${webappTgtDir}/node_modules
 
   
 
@@ -109,6 +109,33 @@
 
   
 
+  
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+prepare-source-code
+generate-sources
+
+  run
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+  
+
+  
+
+
   
   
 exec-maven-plugin
@@ -121,7 +148,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   npm
   
 install
@@ -135,7 +162,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   bower
   
 --allow-root
@@ -150,7 +177,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   ember
   
 build
@@ -160,21 +187,6 @@
   
 
   
-  
-cleanup tmp
-generate-sources
-
-  exec
-
-
-  ${webappDir}
-  rm
-  
--rf
-tmp
-  
-
-  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424117bf/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index ca78ef8..70b68d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -230,7 +230,6 @@
   
 
   
-hadoop-yarn-ui
 hadoop-yarn-api
 hadoop-yarn-common
 hadoop-yarn-server
@@ -238,5 +237,6 @@
 hadoop-yarn-site
 hadoop-yarn-client
 hadoop-yarn-registry
+hadoop-yarn-ui
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: YARN-5598. [YARN-3368] Fix create-release to be able to generate bits for the new yarn-ui (Wangda Tan via Sunil G)

2016-10-04 Thread sunilg
YARN-5598. [YARN-3368] Fix create-release to be able to generate bits for the 
new yarn-ui (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88ca7e4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88ca7e4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88ca7e4b

Branch: refs/heads/YARN-3368
Commit: 88ca7e4b5e31d9776791f779f5651f6c92e06f12
Parents: 6552a25
Author: sunilg 
Authored: Tue Sep 6 23:15:59 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 dev-support/bin/create-release |   2 +-
 dev-support/create-release.sh  | 144 
 dev-support/docker/Dockerfile  |   6 +-
 3 files changed, 6 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ca7e4b/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 0e0ab86..d40fffa 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -527,7 +527,7 @@ function makearelease
   # shellcheck disable=SC2046
   run_and_redirect "${LOGDIR}/mvn_install.log" \
 "${MVN}" "${MVN_ARGS[@]}" install \
-  -Pdist,src \
+  -Pdist,src,yarn-ui \
   "${signflags[@]}" \
   -DskipTests -Dtar $(hadoop_native_flags)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ca7e4b/dev-support/create-release.sh
--
diff --git a/dev-support/create-release.sh b/dev-support/create-release.sh
deleted file mode 100755
index 792a805..000
--- a/dev-support/create-release.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Function to probe the exit code of the script commands, 
-# and stop in the case of failure with an contextual error 
-# message.
-run() {
-  echo "\$ ${@}"
-  "${@}"
-  exitCode=$?
-  if [[ $exitCode != 0 ]]; then
-echo
-echo "Failed! running ${@} in `pwd`"
-echo
-exit $exitCode
-  fi
-}
-
-doMD5() {
-  MD5CMD="md5sum"
-  which $MD5CMD
-  if [[ $? != 0 ]]; then
-MD5CMD="md5"
-  fi
-  run $MD5CMD ${1} > ${1}.md5
-}
-
-# If provided, the created release artifacts will be tagged with it 
-# (use RC#, i.e: RC0). Do not use a label to create the final release 
-# artifact.
-RC_LABEL=$1
-
-# Extract Hadoop version from POM
-HADOOP_VERSION=`cat pom.xml | grep "" | head -1 | sed 's|^ 
*||' | sed 's|.*$||'`
-
-# Setup git
-GIT=${GIT:-git}
-
-echo
-echo "*"
-echo
-echo "Hadoop version to create release artifacts: ${HADOOP_VERSION}"
-echo 
-echo "Release Candidate Label: ${RC_LABEL}"
-echo
-echo "*"
-echo
-
-if [[ ! -z ${RC_LABEL} ]]; then
-  RC_LABEL="-${RC_LABEL}"
-fi
-
-# Get Maven command
-if [ -z "$MAVEN_HOME" ]; then
-  MVN=mvn
-else
-  MVN=$MAVEN_HOME/bin/mvn
-fi
-
-ARTIFACTS_DIR="target/artifacts"
-
-# git clean to clear any remnants from previous build
-run ${GIT} clean -xdf
-
-# mvn clean for sanity
-run ${MVN} clean
-
-# Create staging dir for release artifacts
-run mkdir -p ${ARTIFACTS_DIR}
-
-# Create RAT report
-run ${MVN} apache-rat:check
-
-# Create SRC and BIN tarballs for release,
-# Using 'install’ goal instead of 'package' so artifacts are available 
-# in the Maven local cache for the site generation
-run ${MVN} install -Pdist,src,native,yarn-ui -DskipTests -Dtar
-
-# Create site for release
-run ${MVN} site site:stage -Pdist -Psrc
-run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn
-run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce
-run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html 
target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
-run cp ./hadoop-common-project/hadoop-common/CHANGES.txt 
target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
-run cp

[39/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix ASF warnings. (Wangda Tan via Sunil G)

2016-10-04 Thread sunilg
YARN-4849. Addendum patch to fix ASF warnings. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6552a25e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6552a25e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6552a25e

Branch: refs/heads/YARN-3368
Commit: 6552a25e6df523efdbcf0ff8b6483f9d63c6302e
Parents: f6195e4
Author: sunilg 
Authored: Wed Aug 31 23:43:02 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../assets/images/datatables/Sorting icons.psd | Bin 27490 -> 0 bytes
 .../public/assets/images/datatables/favicon.ico| Bin 894 -> 0 bytes
 .../public/assets/images/datatables/sort_asc.png   | Bin 160 -> 0 bytes
 .../assets/images/datatables/sort_asc_disabled.png | Bin 148 -> 0 bytes
 .../public/assets/images/datatables/sort_both.png  | Bin 201 -> 0 bytes
 .../public/assets/images/datatables/sort_desc.png  | Bin 158 -> 0 bytes
 .../images/datatables/sort_desc_disabled.png   | Bin 146 -> 0 bytes
 7 files changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd
deleted file mode 100644
index 53b2e06..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/Sorting
 icons.psd and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
deleted file mode 100644
index 6eeaa2a..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/favicon.ico
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
deleted file mode 100644
index e1ba61a..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
deleted file mode 100644
index fb11dfe..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_asc_disabled.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
deleted file mode 100644
index af5bc7c..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_both.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a25e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
deleted file mode 100644
index 0e156de..000
Binary files 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/assets/images/datatables/sort_desc.png
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6552a

[35/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to 
mvn, and fix licenses. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6804e642
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6804e642
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6804e642

Branch: refs/heads/YARN-3368
Commit: 6804e642549c5d0aec36d9f98b2d4d312bc3f849
Parents: ade6125
Author: Wangda Tan 
Authored: Mon Mar 21 14:03:13 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .gitignore  |  13 +
 BUILDING.txt|   4 +-
 LICENSE.txt |  80 +
 dev-support/create-release.sh   | 144 +
 dev-support/docker/Dockerfile   |   5 +
 .../src/site/markdown/YarnUI2.md|  43 +++
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 -
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 ---
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 -
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 --
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 --
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 --
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 -
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 --
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  20 --
 .../app/adapters/cluster-metric.js  |  20 --
 .../app/adapters/yarn-app-attempt.js|  32 --
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  26 --
 .../app/adapters/yarn-container-log.js  |  74 -
 .../app/adapters/yarn-container.js  |  43 ---
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 ---
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  20 --
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ---
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 --
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 -
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 -
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 ---
 .../app/components/base-chart-component.js  | 109 ---
 .../app/components/container-table.js   |   4 -
 .../app/components/donut-chart.js   | 148 --
 .../app/components/item-selector.js |  21 --
 .../app/components/queue-configuration-table.js |   4 -
 .../app/components/queue-navigator.js   |   4 -
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 -
 .../app/components/simple-table.js  |  58 
 .../app/components/timeline-view.js | 250 
 .../app/components/tree-selector.js | 257 
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 --
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 --
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/application.js  |  55 
 .../app/controllers/cluster-overview.js |   5 -
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 -
 .../app/controllers/yarn-queue.js   |   6 -
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 --
 .../app/helpers/log-files-comma.js  |  48 ---
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 ---
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 -
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 --
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 -
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 ---
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 -
 .../app/models/yarn-container-log.js|  25 --
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 ---
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ---
 .../app/models/yarn-node-container.js   |  57 
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 ---
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 -
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 --
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 -
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  29 --
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../hadoop-yarn-ui/app/routes/application.js|  38 ---
 .../app/routes/cluster-overview.js  |  11 -
 .../hadoop-yarn-ui/app/routes/index.js  |  29 --
 .../app/routes/yarn-app-attempt.js  |  21 --
 .../hadoop-yarn-ui/app/routes/yarn-app.js   |  10 -
 .../hadoop-yarn-ui/

[48/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
new file mode 100644
index 000..21a715c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-app-test.js
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('serializer:yarn-node-app', 'Unit | Serializer | NodeApp', {
+});
+
+test('Basic creation test', function(assert) {
+  let serializer = this.subject();
+
+  assert.ok(serializer);
+  assert.ok(serializer.normalizeSingleResponse);
+  assert.ok(serializer.normalizeArrayResponse);
+  assert.ok(serializer.internalNormalizeSingleResponse);
+});
+
+test('normalizeArrayResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+apps: {
+  app: [{
+id:"application_1456251210105_0001", state:"FINISHED", user:"root"
+  },{
+id:"application_1456251210105_0002", state:"RUNNING",user:"root",
+containerids:["container_e38_1456251210105_0002_01_01",
+"container_e38_1456251210105_0002_01_02"]
+  }]
+}
+  };
+  assert.expect(15);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 2);
+  assert.equal(response.data[0].attributes.containers, undefined);
+  assert.equal(response.data[1].attributes.containers.length, 2);
+  assert.deepEqual(response.data[1].attributes.containers,
+  payload.apps.app[1].containerids);
+  for (var i = 0; i < 2; i++) {
+assert.equal(response.data[i].type, modelClass.modelName);
+assert.equal(response.data[i].id, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.appId, payload.apps.app[i].id);
+assert.equal(response.data[i].attributes.state, payload.apps.app[i].state);
+assert.equal(response.data[i].attributes.user, payload.apps.app[i].user);
+  }
+});
+
+test('normalizeArrayResponse no apps test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = { apps: null };
+  assert.expect(5);
+  var response =
+  serializer.normalizeArrayResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(response.data.length, 1);
+  assert.equal(response.data[0].type, modelClass.modelName);
+  assert.equal(response.data[0].id, "dummy");
+  assert.equal(response.data[0].attributes.appId, undefined);
+});
+
+test('normalizeSingleResponse test', function(assert) {
+  let serializer = this.subject(),
+  modelClass = {
+modelName: "yarn-node-app"
+  },
+  payload = {
+app: {id:"application_1456251210105_0001", state:"FINISHED", user:"root"}
+  };
+  assert.expect(7);
+  var response =
+  serializer.normalizeSingleResponse({}, modelClass, payload, null, null);
+  assert.ok(response.data);
+  assert.equal(payload.app.id, response.data.id);
+  assert.equal(modelClass.modelName, response.data.type);
+  assert.equal(payload.app.id, response.data.attributes.appId);
+  assert.equal(payload.app.state, response.data.attributes.state);
+  assert.equal(payload.app.user, response.data.attributes.user);
+  assert.equal(response.data.attributes.containers, undefined);
+});
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/serializers/yarn-node-container-test.js
new file mode 100644
index 000..1f08467
--- /dev/nu

[45/50] [abbrv] hadoop git commit: YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS addresses. (Sunil G via wangda)

2016-10-04 Thread sunilg
YARN-4514. [YARN-3368] Cleanup hardcoded configurations, such as RM/ATS 
addresses. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/506416ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/506416ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/506416ab

Branch: refs/heads/YARN-3368
Commit: 506416ab4b3bbfab0aec2771b879641bbb3ed528
Parents: 6804e64
Author: Wangda Tan 
Authored: Sat Apr 16 23:04:45 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../src/main/webapp/app/adapters/abstract.js| 48 +
 .../main/webapp/app/adapters/cluster-info.js| 22 ++
 .../main/webapp/app/adapters/cluster-metric.js  | 22 ++
 .../webapp/app/adapters/yarn-app-attempt.js | 24 ++-
 .../src/main/webapp/app/adapters/yarn-app.js| 27 ++-
 .../webapp/app/adapters/yarn-container-log.js   | 10 ++-
 .../main/webapp/app/adapters/yarn-container.js  | 20 +++---
 .../main/webapp/app/adapters/yarn-node-app.js   | 24 +++
 .../webapp/app/adapters/yarn-node-container.js  | 24 +++
 .../src/main/webapp/app/adapters/yarn-node.js   | 23 +++---
 .../src/main/webapp/app/adapters/yarn-queue.js  | 22 ++
 .../main/webapp/app/adapters/yarn-rm-node.js| 21 ++
 .../hadoop-yarn-ui/src/main/webapp/app/app.js   |  4 +-
 .../src/main/webapp/app/config.js   |  5 +-
 .../src/main/webapp/app/index.html  |  1 +
 .../src/main/webapp/app/initializers/env.js | 29 
 .../src/main/webapp/app/initializers/hosts.js   | 28 
 .../src/main/webapp/app/services/env.js | 59 
 .../src/main/webapp/app/services/hosts.js   | 74 
 .../hadoop-yarn-ui/src/main/webapp/bower.json   | 25 +++
 .../src/main/webapp/config/configs.env  | 48 +
 .../src/main/webapp/config/default-config.js| 32 +
 .../src/main/webapp/config/environment.js   | 11 ++-
 .../src/main/webapp/ember-cli-build.js  | 10 ++-
 .../hadoop-yarn-ui/src/main/webapp/package.json | 35 -
 .../webapp/tests/unit/initializers/env-test.js  | 41 +++
 .../tests/unit/initializers/hosts-test.js   | 41 +++
 .../tests/unit/initializers/jquery-test.js  | 41 +++
 .../main/webapp/tests/unit/services/env-test.js | 30 
 .../webapp/tests/unit/services/hosts-test.js| 30 
 30 files changed, 637 insertions(+), 194 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/506416ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
new file mode 100644
index 000..c7e5c36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import Ember from 'ember';
+
+export default DS.JSONAPIAdapter.extend({
+  address: null, //Must be set by inheriting classes
+  restNameSpace: null, //Must be set by inheriting classes
+  serverName: null, //Must be set by inheriting classes
+
+  headers: {
+Accept: 'application/json'
+  },
+
+  host: Ember.computed("address", function () {
+var address = this.get("address");
+return this.get(`hosts.${address}`);
+  }),
+
+  namespace: Ember.computed("restNameSpace", function () {
+var serverName = this.get("restNameSpace");
+return this.get(`env.app.namespaces.${serverName}`);
+  }),
+
+  ajax: function(url, method, options) {
+options = options || {};
+options.crossDomain = true;
+options.xhrFields = {
+  withCredentials: true
+};
+options.targetServer = this.get('serverName');
+return this._super(url, method, options);
+  }
+});
\ No newline at end of file

htt

[30/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
new file mode 100644
index 000..89858bf
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model(param) {
+return Ember.RSVP.hash({
+  selected : param.queue_name,
+  queues: this.store.findAll('yarnQueue'),
+  selectedQueue : undefined,
+  apps: undefined, // apps of selected queue
+});
+  },
+
+  afterModel(model) {
+model.selectedQueue = this.store.peekRecord('yarnQueue', model.selected);
+model.apps = this.store.findAll('yarnApp');
+model.apps.forEach(function(o) {
+  console.log(o);
+})
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
new file mode 100644
index 000..7da6f6d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default Ember.Route.extend({
+  beforeModel() {
+this.transitionTo('yarnQueues.root');
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
new file mode 100644
index 000..3686c83
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import

[46/50] [abbrv] hadoop git commit: YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath Somarajapuram via Sunil G)

2016-10-04 Thread sunilg
YARN-3334. [YARN-3368] Introduce REFRESH button in various UI pages (Sreenath 
Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61aa1256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61aa1256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61aa1256

Branch: refs/heads/YARN-3368
Commit: 61aa12562f7f278c4a2c97eca91a463b33e07584
Parents: 5387460
Author: sunilg 
Authored: Wed Aug 10 06:53:13 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../app/components/app-usage-donut-chart.js |  5 ---
 .../src/main/webapp/app/components/bar-chart.js |  4 +-
 .../webapp/app/components/breadcrumb-bar.js | 31 ++
 .../main/webapp/app/components/donut-chart.js   |  8 ++--
 .../app/components/queue-usage-donut-chart.js   |  2 +-
 .../app/controllers/yarn-container-log.js   | 40 ++
 .../webapp/app/controllers/yarn-node-app.js | 36 
 .../src/main/webapp/app/routes/abstract.js  | 32 +++
 .../main/webapp/app/routes/cluster-overview.js  | 12 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  9 +++-
 .../main/webapp/app/routes/yarn-app-attempts.js |  8 +++-
 .../src/main/webapp/app/routes/yarn-app.js  | 11 -
 .../src/main/webapp/app/routes/yarn-apps.js |  9 +++-
 .../webapp/app/routes/yarn-container-log.js | 10 -
 .../src/main/webapp/app/routes/yarn-node-app.js |  8 +++-
 .../main/webapp/app/routes/yarn-node-apps.js|  8 +++-
 .../webapp/app/routes/yarn-node-container.js|  8 +++-
 .../webapp/app/routes/yarn-node-containers.js   |  8 +++-
 .../src/main/webapp/app/routes/yarn-node.js |  9 +++-
 .../src/main/webapp/app/routes/yarn-nodes.js|  9 +++-
 .../main/webapp/app/routes/yarn-queue-apps.js   | 12 --
 .../src/main/webapp/app/routes/yarn-queue.js| 14 ---
 .../src/main/webapp/app/routes/yarn-queues.js   | 14 ---
 .../src/main/webapp/app/styles/app.css  |  6 +++
 .../webapp/app/templates/cluster-overview.hbs   |  4 +-
 .../app/templates/components/breadcrumb-bar.hbs | 22 ++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  4 +-
 .../webapp/app/templates/yarn-container-log.hbs |  2 +
 .../main/webapp/app/templates/yarn-node-app.hbs |  2 +
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +-
 .../app/templates/yarn-node-container.hbs   |  4 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +-
 .../src/main/webapp/app/templates/yarn-node.hbs |  4 +-
 .../main/webapp/app/templates/yarn-nodes.hbs|  4 +-
 .../webapp/app/templates/yarn-queue-apps.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queue.hbs|  4 +-
 .../main/webapp/app/templates/yarn-queues.hbs   |  4 +-
 .../components/breadcrumb-bar-test.js   | 43 
 .../unit/controllers/yarn-container-log-test.js | 30 ++
 .../unit/controllers/yarn-node-app-test.js  | 30 ++
 43 files changed, 417 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61aa1256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
index 0baf630..90f41fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
@@ -26,7 +26,6 @@ export default BaseUsageDonutChart.extend({
   colors: d3.scale.category20().range(),
 
   draw: function() {
-this.initChart();
 var usageByApps = [];
 var avail = 100;
 
@@ -60,8 +59,4 @@ export default BaseUsageDonutChart.extend({
 this.renderDonutChart(usageByApps, this.get("title"), 
this.get("showLabels"),
   this.get("middleLabel"), "100%", "%");
   },
-
-  didInsertElement: function() {
-this.draw();
-  },
 })
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61aa1256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-cha

[49/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
new file mode 100644
index 000..c546bf7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
@@ -0,0 +1,19 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+Sorry, Error Occured.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
new file mode 100644
index 000..588ea44
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
@@ -0,0 +1,20 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+404, Not Found
+Please Check your URL

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
index e58d6bd..3a79080 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
@@ -1,3 +1,3 @@
 {{app-table table-id="apps-table" arr=model}}
-{{simple-table table-id="apps-table" bFilter=true colTypes="elapsed-time" 
colTargets="7"}}
-{{outlet}}
\ No newline at end of file
+{{simple-table table-id="apps-table" bFilter=true colsOrder="0,desc" 
colTypes="natural elapsed-time" colTargets="0 7"}}
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade61259/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
new file mode 100644
index 000..9cc3b0f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
@@ -0,0 +1,36 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}

[25/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-10-04 Thread sunilg
YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/316a1a0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/316a1a0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/316a1a0c

Branch: refs/heads/YARN-3368
Commit: 316a1a0c82296f30cb9a0e54701a4346c12bac70
Parents: ef7f06f
Author: Wangda Tan 
Authored: Tue Dec 8 16:37:50 2015 -0800
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/.bowerrc |   4 +
 .../hadoop-yarn/hadoop-yarn-ui/.editorconfig|  34 +++
 .../hadoop-yarn/hadoop-yarn-ui/.ember-cli   |  11 +
 .../hadoop-yarn/hadoop-yarn-ui/.gitignore   |  17 ++
 .../hadoop-yarn/hadoop-yarn-ui/.jshintrc|  32 +++
 .../hadoop-yarn/hadoop-yarn-ui/.travis.yml  |  23 ++
 .../hadoop-yarn/hadoop-yarn-ui/.watchmanconfig  |   3 +
 .../hadoop-yarn/hadoop-yarn-ui/README.md|  24 ++
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |  19 ++
 .../app/adapters/cluster-metric.js  |  19 ++
 .../app/adapters/yarn-app-attempt.js|  31 +++
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |  25 ++
 .../app/adapters/yarn-container.js  |  42 +++
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |  19 ++
 .../hadoop-yarn/hadoop-yarn-ui/app/app.js   |  20 ++
 .../hadoop-yarn-ui/app/components/.gitkeep  |   0
 .../app/components/app-attempt-table.js |   4 +
 .../hadoop-yarn-ui/app/components/app-table.js  |   4 +
 .../hadoop-yarn-ui/app/components/bar-chart.js  | 104 +++
 .../app/components/base-chart-component.js  | 109 
 .../app/components/container-table.js   |   4 +
 .../app/components/donut-chart.js   | 148 ++
 .../app/components/item-selector.js |  21 ++
 .../app/components/queue-configuration-table.js |   4 +
 .../app/components/queue-navigator.js   |   4 +
 .../hadoop-yarn-ui/app/components/queue-view.js | 272 +++
 .../app/components/simple-table.js  |  30 ++
 .../app/components/timeline-view.js | 250 +
 .../app/components/tree-selector.js | 257 ++
 .../hadoop-yarn-ui/app/controllers/.gitkeep |   0
 .../app/controllers/cluster-overview.js |   5 +
 .../hadoop-yarn-ui/app/controllers/yarn-apps.js |   4 +
 .../app/controllers/yarn-queue.js   |   6 +
 .../hadoop-yarn-ui/app/helpers/.gitkeep |   0
 .../hadoop-yarn/hadoop-yarn-ui/app/index.html   |  25 ++
 .../hadoop-yarn-ui/app/models/.gitkeep  |   0
 .../hadoop-yarn-ui/app/models/cluster-info.js   |  13 +
 .../hadoop-yarn-ui/app/models/cluster-metric.js | 115 
 .../app/models/yarn-app-attempt.js  |  44 +++
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  65 +
 .../hadoop-yarn-ui/app/models/yarn-container.js |  39 +++
 .../hadoop-yarn-ui/app/models/yarn-queue.js |  76 ++
 .../hadoop-yarn-ui/app/models/yarn-user.js  |   8 +
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  16 ++
 .../hadoop-yarn-ui/app/routes/.gitkeep  |   0
 .../app/routes/cluster-overview.js  |  11 +
 .../app/routes/yarn-app-attempt.js  |  21 ++
 .../hadoop-yarn-ui/app/routes/yarn-app.js   |  10 +
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   8 +
 .../hadoop-yarn-ui/app/routes/yarn-queue.js |  20 ++
 .../app/routes/yarn-queues/index.js |   5 +
 .../app/routes/yarn-queues/queues-selector.js   |   7 +
 .../app/serializers/cluster-info.js |  29 ++
 .../app/serializers/cluster-metric.js   |  29 ++
 .../app/serializers/yarn-app-attempt.js |  49 
 .../hadoop-yarn-ui/app/serializers/yarn-app.js  |  66 +
 .../app/serializers/yarn-container.js   |  54 
 .../app/serializers/yarn-queue.js   | 127 +
 .../hadoop-yarn-ui/app/styles/app.css   | 141 ++
 .../app/templates/application.hbs   |  25 ++
 .../app/templates/cluster-overview.hbs  |  56 
 .../app/templates/components/.gitkeep   |   0
 .../templates/components/app-attempt-table.hbs  |  28 ++
 .../app/templates/components/app-table.hbs  |  62 +
 .../templates/components/container-table.hbs|  36 +++
 .../components/queue-configuration-table.hbs|  40 +++
 .../templates/components/queue-navigator.hbs|  18 ++
 .../app/templates/components/timeline-view.hbs  |  35 +++
 .../app/templates/yarn-app-attempt.hbs  |  12 +
 .../hadoop-yarn-ui/app/templates/yarn-app.hbs   | 145 ++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   3 +
 .../hadoop-yarn-ui/app/templates/yarn-queue.hbs |  48 
 .../hadoop-yarn-ui/app/utils/converter.js   |  74 +
 .../hadoop-yarn-ui/app/utils/sorter.js  |  15 

[28/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
new file mode 100644
index 000..4e68da0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+import Constants from 'yarn-ui/constants';
+
+moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
+});
+
+test('Basic creation test', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+  assert.ok(route.model);
+});
+
+test('Test getting container log', function(assert) {
+  var response = {
+  logs: "This is syslog",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve) {
+resolve(response);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+/**
+ * This can happen when an empty response is sent from server
+ */
+test('Test non HTTP error while getting container log', function(assert) {
+  var error = {};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+test('Test HTTP error while getting container log', function(assert) {
+  var error = {errors: [{status: 404, responseText: 'Not Found'}]};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(5);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.errors);
+ assert.equal(value.errors.length, 1);
+ assert.equal(value.errors[0].status, 404);
+ assert.equal(value.errors[0].responseText, 'Not Found');
+   });
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
--
diff --gi

[17/50] [abbrv] hadoop git commit: HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed by Surendra Singh Lilhore

2016-10-04 Thread sunilg
HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed 
by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef7f06f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef7f06f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef7f06f7

Branch: refs/heads/YARN-3368
Commit: ef7f06f7d1561db13bd3b07a5f62815ec29e1cdf
Parents: 5ea0210
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:46:42 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:46:42 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef7f06f7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 546f99e..f904bda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -172,7 +172,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 *   HTTP POST
 * [`APPEND`](#Append_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
 * [`CONCAT`](#Concat_Files) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
-* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
+* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate)
 *   HTTP DELETE
 * [`DELETE`](#Delete_a_FileDirectory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete)
 * [`DELETESNAPSHOT`](#Delete_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix javadocs. (Sunil G via wangda)

2016-10-04 Thread sunilg
 YARN-4849. Addendum patch to fix javadocs. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c439ddf4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c439ddf4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c439ddf4

Branch: refs/heads/YARN-3368
Commit: c439ddf4a07574d164e8693b64236fd8f6f192d5
Parents: 88ca7e4
Author: Wangda Tan 
Authored: Fri Sep 9 10:54:37 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java| 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c439ddf4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index d32f649..f739e31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -916,6 +916,12 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
* Return a HttpServer.Builder that the journalnode / namenode / secondary
* namenode can use to initialize their HTTP / HTTPS server.
*
+   * @param conf configuration object
+   * @param httpAddr HTTP address
+   * @param httpsAddr HTTPS address
+   * @param name  Name of the server
+   * @throws IOException from Builder
+   * @return builder object
*/
   public static HttpServer2.Builder httpServerTemplateForRM(Configuration conf,
   final InetSocketAddress httpAddr, final InetSocketAddress httpsAddr,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. (Kai Sasaki via Sunil G)

2016-10-04 Thread sunilg
YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. 
(Kai Sasaki via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57045f98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57045f98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57045f98

Branch: refs/heads/YARN-3368
Commit: 57045f980625c3d30daaae8e8eeacfb71353a687
Parents: 4955056
Author: Sunil 
Authored: Fri Jun 10 10:33:41 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57045f98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index bce18ce..d21cc3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -32,6 +32,9 @@ module.exports = function(defaults) {
   app.import("bower_components/select2/dist/js/select2.min.js");
   app.import('bower_components/jquery-ui/jquery-ui.js');
   app.import('bower_components/more-js/dist/more.js');
+  app.import('bower_components/bootstrap/dist/css/bootstrap.css');
+  app.import('bower_components/bootstrap/dist/css/bootstrap-theme.css');
+  app.import('bower_components/bootstrap/dist/js/bootstrap.min.js');
 
   // Use `app.import` to add additional libraries to the generated
   // output files.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki via wangda)

2016-10-04 Thread sunilg
YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23ac516b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23ac516b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23ac516b

Branch: refs/heads/YARN-3368
Commit: 23ac516b1975e84a82be218c8ef8a22799aa
Parents: 61aa125
Author: Wangda Tan 
Authored: Thu Aug 11 14:59:14 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23ac516b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 6d46fda..2933a76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,12 +20,12 @@
   
 hadoop-yarn
 org.apache.hadoop
-3.0.0-alpha1-SNAPSHOT
+3.0.0-alpha2-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-ui
-  3.0.0-alpha1-SNAPSHOT
+  3.0.0-alpha2-SNAPSHOT
   Apache Hadoop YARN UI
   ${packaging.type}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki via Sunil G)

2016-10-04 Thread sunilg
YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/597b8a7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/597b8a7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/597b8a7f

Branch: refs/heads/YARN-3368
Commit: 597b8a7ff700d8225cee93192397c49491a44124
Parents: 57045f9
Author: Sunil 
Authored: Mon Jul 11 14:31:25 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../src/main/webapp/app/styles/app.css |  11 +++
 .../src/main/webapp/app/templates/application.hbs  |  12 +++-
 .../webapp/public/assets/images/hadoop_logo.png| Bin 0 -> 26495 bytes
 3 files changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/597b8a7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index bcb6aab..e2d09dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -157,3 +157,14 @@ table.dataTable thead .sorting_desc_disabled {
   stroke: #ccc;  
   stroke-width: 2px;
 }
+
+.hadoop-brand-image {
+  margin-top: -10px;
+  width: auto;
+  height: 45px;
+}
+
+li a.navigation-link.ember-view {
+  color: #2196f3;
+  font-weight: bold;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/597b8a7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index b45ec6b..03b2c4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -20,35 +20,37 @@
   
 
 
+  
+
+  
   
 Toggle navigation
 
 
 
   
-  Apache Hadoop YARN
 
 
 
 
   
 {{#link-to 'yarn-queue' 'root' tagName="li"}}
-  {{#link-to 'yarn-queue' 'root'}}Queues
+  {{#link-to 'yarn-queue' 'root' class="navigation-link"}}Queues
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-apps' tagName="li"}}
-  {{#link-to 'yarn-apps'}}Applications
+  {{#link-to 'yarn-apps' class="navigation-link"}}Applications
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'cluster-overview' tagName="li"}}
-  {{#link-to 'cluster-overview'}}Cluster Overview
+  {{#link-to 'cluster-overview' class="navigation-link"}}Cluster 
Overview
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-nodes' tagName="li"}}
-  {{#link-to 'yarn-nodes'}}Nodes
+  {{#link-to 'yarn-nodes' class="navigation-link"}}Nodes
 (current)
   {{/link-to}}
 {{/link-to}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/597b8a7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
new file mode 100644
index 000..275d39e
Binary files /dev/null and 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-04 Thread sunilg
YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ade61259
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ade61259
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ade61259

Branch: refs/heads/YARN-3368
Commit: ade61259f1f76b53663910f1699d7a5f7ce7c91e
Parents: 316a1a0
Author: Wangda Tan 
Authored: Mon Mar 21 13:13:02 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop-yarn-ui/app/adapters/cluster-info.js |   5 +-
 .../app/adapters/cluster-metric.js  |   5 +-
 .../app/adapters/yarn-app-attempt.js|   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-app.js |   3 +-
 .../app/adapters/yarn-container-log.js  |  74 +
 .../app/adapters/yarn-container.js  |   5 +-
 .../app/adapters/yarn-node-app.js   |  63 
 .../app/adapters/yarn-node-container.js |  64 
 .../hadoop-yarn-ui/app/adapters/yarn-node.js|  40 +
 .../hadoop-yarn-ui/app/adapters/yarn-queue.js   |   3 +-
 .../hadoop-yarn-ui/app/adapters/yarn-rm-node.js |  45 ++
 .../app/components/simple-table.js  |  38 -
 .../hadoop-yarn/hadoop-yarn-ui/app/config.js|  27 
 .../hadoop-yarn/hadoop-yarn-ui/app/constants.js |  24 +++
 .../app/controllers/application.js  |  55 +++
 .../hadoop-yarn-ui/app/helpers/divide.js|  31 
 .../app/helpers/log-files-comma.js  |  48 ++
 .../hadoop-yarn-ui/app/helpers/node-link.js |  37 +
 .../hadoop-yarn-ui/app/helpers/node-menu.js |  66 
 .../hadoop-yarn-ui/app/models/yarn-app.js   |  14 +-
 .../app/models/yarn-container-log.js|  25 +++
 .../hadoop-yarn-ui/app/models/yarn-node-app.js  |  44 ++
 .../app/models/yarn-node-container.js   |  57 +++
 .../hadoop-yarn-ui/app/models/yarn-node.js  |  33 
 .../hadoop-yarn-ui/app/models/yarn-rm-node.js   |  92 +++
 .../hadoop-yarn/hadoop-yarn-ui/app/router.js|  13 ++
 .../hadoop-yarn-ui/app/routes/application.js|  38 +
 .../hadoop-yarn-ui/app/routes/index.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-apps.js  |   4 +-
 .../app/routes/yarn-container-log.js|  55 +++
 .../hadoop-yarn-ui/app/routes/yarn-node-app.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-node-apps.js |  29 
 .../app/routes/yarn-node-container.js   |  30 
 .../app/routes/yarn-node-containers.js  |  28 
 .../hadoop-yarn-ui/app/routes/yarn-node.js  |  29 
 .../hadoop-yarn-ui/app/routes/yarn-nodes.js |  25 +++
 .../app/serializers/yarn-container-log.js   |  39 +
 .../app/serializers/yarn-node-app.js|  86 +++
 .../app/serializers/yarn-node-container.js  |  74 +
 .../hadoop-yarn-ui/app/serializers/yarn-node.js |  56 +++
 .../app/serializers/yarn-rm-node.js |  77 ++
 .../app/templates/application.hbs   |   4 +-
 .../hadoop-yarn-ui/app/templates/error.hbs  |  19 +++
 .../hadoop-yarn-ui/app/templates/notfound.hbs   |  20 +++
 .../hadoop-yarn-ui/app/templates/yarn-apps.hbs  |   4 +-
 .../app/templates/yarn-container-log.hbs|  36 +
 .../app/templates/yarn-node-app.hbs |  60 
 .../app/templates/yarn-node-apps.hbs|  51 +++
 .../app/templates/yarn-node-container.hbs   |  70 +
 .../app/templates/yarn-node-containers.hbs  |  58 +++
 .../hadoop-yarn-ui/app/templates/yarn-node.hbs  |  94 
 .../hadoop-yarn-ui/app/templates/yarn-nodes.hbs |  65 
 .../hadoop-yarn-ui/app/utils/converter.js   |  21 ++-
 .../hadoop-yarn-ui/app/utils/sorter.js  |  42 -
 .../hadoop-yarn/hadoop-yarn-ui/bower.json   |   2 +-
 .../hadoop-yarn-ui/config/environment.js|   1 -
 .../unit/adapters/yarn-container-log-test.js|  73 +
 .../tests/unit/adapters/yarn-node-app-test.js   |  93 +++
 .../unit/adapters/yarn-node-container-test.js   |  93 +++
 .../tests/unit/adapters/yarn-node-test.js   |  42 +
 .../tests/unit/adapters/yarn-rm-node-test.js|  44 ++
 .../unit/models/yarn-container-log-test.js  |  48 ++
 .../tests/unit/models/yarn-node-app-test.js |  65 
 .../unit/models/yarn-node-container-test.js |  78 ++
 .../tests/unit/models/yarn-node-test.js |  58 +++
 .../tests/unit/models/yarn-rm-node-test.js  |  95 
 .../unit/routes/yarn-container-log-test.js  | 120 +++
 .../tests/unit/routes/yarn-node-app-test.js |  56 +++
 .../tests/unit/routes/yarn-node-apps-test.js|  60 
 .../unit/routes/yarn-node-container-test.js |  61 
 .../unit/routes/yarn-node-conta

[33/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
deleted file mode 100644
index c5394d0..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
+++ /dev/null
@@ -1,49 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  
-  if (payload.appAttempt) {
-payload = payload.appAttempt;  
-  }
-  
-  var fixedPayload = {
-id: payload.appAttemptId,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  startTime: Converter.timeStampToDate(payload.startTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  containerId: payload.containerId,
-  nodeHttpAddress: payload.nodeHttpAddress,
-  nodeId: payload.nodeId,
-  state: payload.nodeId,
-  logsLink: payload.logsLink
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  // return expected is { data: [ {}, {} ] }
-  var normalizedArrayResponse = {};
-
-  // payload has apps : { app: [ {},{},{} ]  }
-  // need some error handling for ex apps or app may not be defined.
-  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
-return this.internalNormalizeSingleResponse(store, primaryModelClass,
-  singleApp, singleApp.id, requestType);
-  }, this);
-  return normalizedArrayResponse;
-}
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
deleted file mode 100644
index a038fff..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
+++ /dev/null
@@ -1,66 +0,0 @@
-import DS from 'ember-data';
-import Converter from 'yarn-ui/utils/converter';
-
-export default DS.JSONAPISerializer.extend({
-internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  if (payload.app) {
-payload = payload.app;  
-  }
-  
-  var fixedPayload = {
-id: id,
-type: primaryModelClass.modelName, // yarn-app
-attributes: {
-  appName: payload.name,
-  user: payload.user,
-  queue: payload.queue,
-  state: payload.state,
-  startTime: Converter.timeStampToDate(payload.startedTime),
-  elapsedTime: Converter.msToElapsedTime(payload.elapsedTime),
-  finishedTime: Converter.timeStampToDate(payload.finishedTime),
-  finalStatus: payload.finalStatus,
-  progress: payload.progress,
-  diagnostics: payload.diagnostics,
-  amContainerLogs: payload.amContainerLogs,
-  amHostHttpAddress: payload.amHostHttpAddress,
-  logAggregationStatus: payload.logAggregationStatus,
-  unmanagedApplication: payload.unmanagedApplication,
-  amNodeLabelExpression: payload.amNodeLabelExpression,
-  priority: payload.priority,
-  allocatedMB: payload.allocatedMB,
-  allocatedVCores: payload.allocatedVCores,
-  runningContainers: payload.runningContainers,
-  memorySeconds: payload.memorySeconds,
-  vcoreSeconds: payload.vcoreSeconds,
-  preemptedResourceMB: payload.preemptedResourceMB,
-  preemptedResourceVCores: payload.preemptedResourceVCores,
-  numNonAMContainerPreempted: payload.numNonAMContainerPreempted,
-  numAMContainerPreempted: payload.numAMContainerPreempted
-}
-  };
-
-  return fixedPayload;
-},
-
-normalizeSingleResponse(store, primaryModelClass, payload, id,
-  requestType) {
-  var p = this.internalNormalizeSingleResponse(store, 
-primaryModelClass, payload, id, requestType);
-  return { data: p };
-},
-
-normalizeArrayResponse(store, primaryModelClass, payload, id,
-  requestType) {

[40/50] [abbrv] hadoop git commit: YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via Sunil G)

2016-10-04 Thread sunilg
YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f30f89a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f30f89a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f30f89a5

Branch: refs/heads/YARN-3368
Commit: f30f89a501870c95c17aaf15a21f6376fb10bfed
Parents: 151d67e
Author: sunilg 
Authored: Thu Aug 25 23:21:29 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 59 +---
 .../src/main/webapp/ember-cli-build.js  |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  3 +-
 3 files changed, 17 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f89a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 2933a76..fca8d30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -35,7 +35,7 @@
 node
 v0.12.2
 2.10.0
-false
+false
   
 
   
@@ -60,19 +60,20 @@
   
 
   
- maven-clean-plugin
- 3.0.0
- 
-false
-
-   
-  
${basedir}/src/main/webapp/bower_components
-   
-   
-  
${basedir}/src/main/webapp/node_modules
-   
-
- 
+maven-clean-plugin
+3.0.0
+
+  ${keep-ui-build-cache}
+  false
+  
+
+  
${basedir}/src/main/webapp/bower_components
+
+
+  ${basedir}/src/main/webapp/node_modules
+
+  
+
   
 
   
@@ -126,21 +127,6 @@
 
   
   
-generate-sources
-bower --allow-root install
-
-  exec
-
-
-  ${webappDir}
-  bower
-  
---allow-root
-install
-  
-
-  
-  
 ember build
 generate-sources
 
@@ -158,21 +144,6 @@
 
   
   
-ember test
-generate-resources
-
-  exec
-
-
-  ${skipTests}
-  ${webappDir}
-  ember
-  
-test
-  
-
-  
-  
 cleanup tmp
 generate-sources
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f89a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index d21cc3e..7736c75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -22,7 +22,7 @@ var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
   var app = new EmberApp(defaults, {
-// Add options here
+hinting: false
   });
 
   
app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f89a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index baa473a..6a4eb16 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -9,8 +9,7 @@
   },
   "scripts": {
 "build": "ember build",
-"start": "ember server",
-"test": "ember test"
+"start": "ember server"
   },
   "repository": "",
   "engines": {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For a

[14/50] [abbrv] hadoop git commit: HDFS-10810. setreplication removing block from underconstrcution temporarily. Contributed by Brahma Reddy Battula

2016-10-04 Thread sunilg
HDFS-10810. setreplication removing block from underconstrcution temporarily. 
Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8078a5ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8078a5ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8078a5ef

Branch: refs/heads/YARN-3368
Commit: 8078a5efd0fe26b82c3768e06ccd2faddc619a7f
Parents: 853d65a
Author: Mingliang Liu 
Authored: Mon Oct 3 21:50:16 2016 -0400
Committer: Mingliang Liu 
Committed: Mon Oct 3 21:50:16 2016 -0400

--
 .../server/blockmanagement/BlockManager.java|  8 ++-
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 61 
 2 files changed, 66 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8078a5ef/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9b426bb..fa051b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4013,13 +4013,15 @@ public class BlockManager implements BlockStatsMXBean {
 return;
   }
   NumberReplicas repl = countNodes(block);
+  int pendingNum = pendingReconstruction.getNumReplicas(block);
   int curExpectedReplicas = getRedundancy(block);
-  if (isNeededReconstruction(block, repl.liveReplicas())) {
-neededReconstruction.update(block, repl.liveReplicas(),
+  if (!hasEnoughEffectiveReplicas(block, repl, pendingNum,
+  curExpectedReplicas)) {
+neededReconstruction.update(block, repl.liveReplicas() + pendingNum,
 repl.readOnlyReplicas(), repl.decommissionedAndDecommissioning(),
 curExpectedReplicas, curReplicasDelta, expectedReplicasDelta);
   } else {
-int oldReplicas = repl.liveReplicas()-curReplicasDelta;
+int oldReplicas = repl.liveReplicas() + pendingNum - curReplicasDelta;
 int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
 neededReconstruction.remove(block, oldReplicas, 
repl.readOnlyReplicas(),
 repl.decommissionedAndDecommissioning(), oldExpectedReplicas);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8078a5ef/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 2437e38..5477700 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +39,7 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
@@ -230,6 +232,65 @@ public class TestFileCorruption {
 
   }
 
+  @Test
+  public void testSetReplicationWhenBatchIBR() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 100);
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY,
+3);
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+
conf.setInt(DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY,
+1);
+DistributedFileSystem dfs;
+try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(3).build()) {
+  final int bufferSize = 1024; // 1024 Bytes each time
+  byte[] outBuffer = new byte[bufferSize];
+  dfs = cluster.getFileSystem();
+  String fileName = "/testSetRep1";
+  Path filePath = new Path(fileName

[02/50] [abbrv] hadoop git commit: YARN-5384. Expose priority in ReservationSystem submission APIs. (Sean Po via Subru).

2016-10-04 Thread sunilg
YARN-5384. Expose priority in ReservationSystem submission APIs. (Sean Po via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a3697de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a3697de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a3697de

Branch: refs/heads/YARN-3368
Commit: 3a3697deab3e339708deb66fb613d86ff9ae
Parents: 89bd6d2
Author: Subru Krishnan 
Authored: Fri Sep 30 19:41:43 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Sep 30 19:41:43 2016 -0700

--
 .../yarn/api/records/ReservationDefinition.java | 44 
 .../src/main/proto/yarn_protos.proto|  1 +
 .../impl/pb/ReservationDefinitionPBImpl.java| 31 ++
 .../webapp/dao/ReservationDefinitionInfo.java   | 11 +
 .../reservation/ReservationSystemTestUtil.java  | 10 -
 .../src/site/markdown/ResourceManagerRest.md|  3 ++
 6 files changed, 91 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index 8ef881b..bb9bca2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -38,7 +37,7 @@ public abstract class ReservationDefinition {
   @Unstable
   public static ReservationDefinition newInstance(long arrival, long deadline,
   ReservationRequests reservationRequests, String name,
-  String recurrenceExpression) {
+  String recurrenceExpression, Priority priority) {
 ReservationDefinition rDefinition =
 Records.newRecord(ReservationDefinition.class);
 rDefinition.setArrival(arrival);
@@ -46,6 +45,7 @@ public abstract class ReservationDefinition {
 rDefinition.setReservationRequests(reservationRequests);
 rDefinition.setReservationName(name);
 rDefinition.setRecurrenceExpression(recurrenceExpression);
+rDefinition.setPriority(priority);
 return rDefinition;
   }
 
@@ -53,8 +53,8 @@ public abstract class ReservationDefinition {
   @Unstable
   public static ReservationDefinition newInstance(long arrival, long deadline,
   ReservationRequests reservationRequests, String name) {
-ReservationDefinition rDefinition =
-newInstance(arrival, deadline, reservationRequests, name, "0");
+ReservationDefinition rDefinition = newInstance(arrival, deadline,
+reservationRequests, name, "0", Priority.UNDEFINED);
 return rDefinition;
   }
 
@@ -130,7 +130,7 @@ public abstract class ReservationDefinition {
* allocation in the scheduler
*/
   @Public
-  @Evolving
+  @Unstable
   public abstract String getReservationName();
 
   /**
@@ -142,7 +142,7 @@ public abstract class ReservationDefinition {
*  allocation in the scheduler
*/
   @Public
-  @Evolving
+  @Unstable
   public abstract void setReservationName(String name);
 
   /**
@@ -160,7 +160,7 @@ public abstract class ReservationDefinition {
* @return recurrence of this reservation
*/
   @Public
-  @Evolving
+  @Unstable
   public abstract String getRecurrenceExpression();
 
   /**
@@ -178,7 +178,35 @@ public abstract class ReservationDefinition {
* @param recurrenceExpression recurrence interval of this reservation
*/
   @Public
-  @Evolving
+  @Unstable
   public abstract void setRecurrenceExpression(String recurrenceExpression);
 
+  /**
+   * Get the priority for this reservation. A lower number for priority
+   * indicates a higher priority reservation. Recurring reservations are
+   * always higher priority than non-recurring reservations. Priority for
+   * non-recurring reservations are only compared with non-recurring
+   * reservations. Likewise for recurring reservations.
+   *
+   * @return int representing the priority of the reserved resource
+   * allocation in the scheduler
+   */
+  @Public
+  @Unstable
+  public abstract Priority getPriority();
+

[38/50] [abbrv] hadoop git commit: YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment (Sreenath Somarajapuram via Sunil G)

2016-10-04 Thread sunilg
YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment 
(Sreenath Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6195e4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6195e4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6195e4a

Branch: refs/heads/YARN-3368
Commit: f6195e4a8ca9229a8fd31d9a8bb92bcb06624472
Parents: d0e293b
Author: sunilg 
Authored: Tue Aug 30 20:58:35 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 19 ++-
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc |  4 +++
 .../src/main/webapp/.editorconfig   | 34 
 .../hadoop-yarn-ui/src/main/webapp/.ember-cli   |  9 ++
 .../hadoop-yarn-ui/src/main/webapp/.jshintrc| 32 ++
 .../src/main/webapp/.watchmanconfig |  3 ++
 6 files changed, 100 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6195e4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index fca8d30..b750a73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -30,7 +30,7 @@
   ${packaging.type}
 
   
-jar
+war
 src/main/webapp
 node
 v0.12.2
@@ -52,9 +52,26 @@
 src/main/webapp/bower.json
 src/main/webapp/package.json
 src/main/webapp/testem.json
+
+src/main/webapp/dist/**/*
+src/main/webapp/tmp/**/*
 src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/assets/images/*
 src/main/webapp/public/robots.txt
+
+public/assets/images/**/*
 public/crossdomain.xml
+
+src/main/webapp/.tmp/**/*
+src/main/webapp/.bowerrc
+src/main/webapp/.editorconfig
+src/main/webapp/.ember-cli
+src/main/webapp/.gitignore
+src/main/webapp/.jshintrc
+src/main/webapp/.travis.yml
+src/main/webapp/.watchmanconfig
+src/main/webapp/tests/.jshintrc
+src/main/webapp/blueprints/.jshintrc
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6195e4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
new file mode 100644
index 000..959e169
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -0,0 +1,4 @@
+{
+  "directory": "bower_components",
+  "analytics": false
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6195e4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
new file mode 100644
index 000..47c5438
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
@@ -0,0 +1,34 @@
+# EditorConfig helps developers define and maintain consistent
+# coding styles between different editors and IDEs
+# editorconfig.org
+
+root = true
+
+
+[*]
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+
+[*.js]
+indent_style = space
+indent_size = 2
+
+[*.hbs]
+insert_final_newline = false
+indent_style = space
+indent_size = 2
+
+[*.css]
+indent_style = space
+indent_size = 2
+
+[*.html]
+indent_style = space
+indent_size = 2
+
+[*.{diff,md}]
+trim_trailing_whitespace = false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6195e4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
new file mode 100644
index 000..ee64cfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
@@ -0,0 +1,9 @@
+{
+  /**
+Ember CLI sends analytics information by default. The data is completely
+anonymous, but there are tim

[32/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
new file mode 100644
index 000..66bf54a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -0,0 +1,207 @@
+
+
+http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd";>
+  
+hadoop-yarn
+org.apache.hadoop
+3.0.0-SNAPSHOT
+  
+  4.0.0
+  org.apache.hadoop
+  hadoop-yarn-ui
+  3.0.0-SNAPSHOT
+  Apache Hadoop YARN UI
+  ${packaging.type}
+
+  
+jar
+src/main/webapp
+node
+v0.12.2
+2.10.0
+false
+  
+
+  
+
+  
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/main/webapp/node_modules/**/*
+src/main/webapp/bower_components/**/*
+src/main/webapp/jsconfig.json
+src/main/webapp/bower.json
+src/main/webapp/package.json
+src/main/webapp/testem.json
+src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/robots.txt
+public/crossdomain.xml
+  
+
+  
+
+  
+ maven-clean-plugin
+ 3.0.0
+ 
+false
+
+   
+  
${basedir}/src/main/webapp/bower_components
+   
+   
+  
${basedir}/src/main/webapp/node_modules
+   
+
+ 
+  
+
+  
+
+  
+
+  yarn-ui
+
+  
+false
+  
+
+  
+war
+  
+
+  
+
+  
+  
+exec-maven-plugin
+org.codehaus.mojo
+
+  
+generate-sources
+npm install
+
+  exec
+
+
+  ${webappDir}
+  npm
+  
+install
+  
+
+  
+  
+generate-sources
+bower install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+generate-sources
+bower --allow-root install
+
+  exec
+
+
+  ${webappDir}
+  bower
+  
+--allow-root
+install
+  
+
+  
+  
+ember build
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  ember
+  
+build
+-prod
+--output-path
+${basedir}/target/dist
+  
+
+  
+  
+ember test
+generate-resources
+
+  exec
+
+
+  ${skipTests}
+  ${webappDir}
+  ember
+  
+test
+  
+
+  
+  
+cleanup tmp
+generate-sources
+
+  exec
+
+
+  ${webappDir}
+  rm
+  
+-rf
+tmp
+  
+
+  
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-war-plugin
+
+  ${basedir}/src/main/webapp/WEB-INF/web.xml
+  ${basedir}/target/dist
+
+  
+
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
deleted file mode 100644
index f591645..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/robots.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# http://www.robotstxt.org
-User-agent: *
-Disallow:

http://git-wip-us.apache.org/repo

[36/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)

2016-10-04 Thread sunilg
YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44e7b5b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44e7b5b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44e7b5b5

Branch: refs/heads/YARN-3368
Commit: 44e7b5b5f0906f3a5687bcc0f4a9c54e739f0251
Parents: 2303e14
Author: sunilg 
Authored: Wed Aug 24 16:10:19 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 BUILDING.txt|  2 +-
 .../src/site/markdown/YarnUI2.md| 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e7b5b5/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 4424579..908c366 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -130,7 +130,7 @@ Maven build goals:
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
-  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity, and it 
is for dev use only)
+  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
 
  Snappy build options:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44e7b5b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 575ebc7..ff48183 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -17,27 +17,31 @@
 
 Hadoop: YARN-UI V2
 =
-*This is a WIP project, nobody should use it in production.*
 
 Prerequisites
 -
 
-You will need the following things properly installed on your computer.
+If you run RM locally in your computer for test purpose, you need the 
following things properly installed.
 
-* Install Node.js with NPM: https://nodejs.org/download/
-* After Node.js installed, install bower: `npm install -g bower`.
-* Install Ember-cli: `npm install -g ember-cli`
+- Install Node.js with NPM: https://nodejs.org/download
+- After Node.js installed, install `corsproxy`: `npm install -g corsproxy`.
 
-BUILD
-
-* Please refer to BUILDING.txt in the top directory and pass -Pyarn-ui to 
build UI-related code
-* Execute `mvn test -Pyarn-ui` to run unit tests
 
-Try it
---
+Configurations
+-
+
+*In yarn-site.xml*
+
+| Configuration Property | Description |
+|: |: |
+| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
 
-* Packaging and deploying Hadoop in this branch
-* In 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/config.js`, 
change `timelineWebUrl` and `rmWebUrl` to your YARN RM/Timeline server web 
address. 
-* If you are running YARN RM in your localhost, you should update 
`localBaseUrl` to `localhost:1337/`, install `npm install -g corsproxy` and run 
`corsproxy` to avoid CORS errors. More details: 
`https://www.npmjs.com/package/corsproxy`. 
-* Run `ember serve` under 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/`
-* Visit your app at [http://localhost:4200](http://localhost:4200).
+*In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
+
+- Update timelineWebAddress and rmWebAddress to the actual addresses run 
resource manager and timeline server
+- If you run RM locally in you computer just for test purpose, you need to 
keep `corsproxy` running. Otherwise, you need to set `localBaseAddress` to 
empty.
+
+Use it
+-
+Open your browser, go to `rm-address:8288` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-5488. [YARN-3368] Applications table overflows beyond the page boundary(Harish Jaiprakash via Sunil G)

2016-10-04 Thread sunilg
YARN-5488. [YARN-3368] Applications table overflows beyond the page 
boundary(Harish Jaiprakash via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2303e14b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2303e14b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2303e14b

Branch: refs/heads/YARN-3368
Commit: 2303e14bfc05e497199b3961bc5e50d60f056330
Parents: 23ac516
Author: sunilg 
Authored: Fri Aug 12 14:51:03 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../src/main/webapp/app/styles/app.css  |  4 +
 .../src/main/webapp/app/templates/yarn-app.hbs  | 98 ++--
 2 files changed, 54 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2303e14b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index a68a0ac..da5b4bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -273,3 +273,7 @@ li a.navigation-link.ember-view {
   right: 20px;
   top: 3px;
 }
+
+.x-scroll {
+  overflow-x: scroll;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2303e14b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index 49c4bfd..9e92fc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -49,55 +49,57 @@
 
   
 Basic Info
-
-  
-
-  Application ID
-  Name
-  User
-  Queue
-  State
-  Final Status
-  Start Time
-  Elapsed Time
-  Finished Time
-  Priority
-  Progress
-  Is Unmanaged AM
-
-  
+
+  
+
+  
+Application ID
+Name
+User
+Queue
+State
+Final Status
+Start Time
+Elapsed Time
+Finished Time
+Priority
+Progress
+Is Unmanaged AM
+  
+
 
-  
-
-  {{model.app.id}}
-  {{model.app.appName}}
-  {{model.app.user}}
-  {{model.app.queue}}
-  {{model.app.state}}
-  
-
-  {{model.app.finalStatus}}
-
-  
-  {{model.app.startTime}}
-  {{model.app.elapsedTime}}
-  {{model.app.validatedFinishedTs}}
-  {{model.app.priority}}
-  
-
-  
-{{model.app.progress}}%
+
+  
+{{model.app.id}}
+{{model.app.appName}}
+{{model.app.user}}
+{{model.app.queue}}
+{{model.app.state}}
+
+  
+{{model.app.finalStatus}}
+  
+
+{{model.app.startTime}}
+{{model.app.elapsedTime}}
+{{model.app.validatedFinishedTs}}
+{{model.app.priority}}
+
+  
+
+  {{model.app.progress}}%
+
   
-
-  
-  {{model.app.unmanagedApplication}}
-
-  
-
+
+{{model.app.unmanagedApplication}}
+  
+
+  
+
   
 
   
@@ -2

[29/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
new file mode 100644
index 000..ca80ccd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
@@ -0,0 +1,58 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNodeContainers" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id}}
+
+  
+
+  
+Container ID
+Container State
+User
+Logs
+  
+
+
+  {{#if model.containers}}
+{{#each model.containers as |container|}}
+  {{#if container.isDummyContainer}}
+No containers found on this 
node
+  {{else}}
+
+  {{container.containerId}}
+  {{container.state}}
+  {{container.user}}
+  
+{{log-files-comma nodeId=model.nodeInfo.id
+nodeAddr=model.nodeInfo.addr
+containerId=container.containerId
+logFiles=container.containerLogFiles}}
+  
+
+  {{/if}}
+{{/each}}
+  {{/if}}
+
+  
+  {{simple-table table-id="node-containers-table" bFilter=true 
colsOrder="0,desc" colTypes="natural" colTargets="0"}}
+
+  
+
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 000..a036076
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,94 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNode" nodeId=model.rmNode.id nodeAddr=model.node.id}}
+
+  
+Node Information
+  
+
+  
+Total Vmem allocated for Containers
+{{divide num=model.node.totalVmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Vmem enforcement enabled
+{{model.node.vmemCheckEnabled}}
+  
+  
+Total Pmem allocated for Containers
+{{divide num=model.node.totalPmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Pmem enforcement enabled
+{{model.node.pmemCheckEnabled}}
+  
+  
+Total VCores allocated for Containers
+{{model.node.totalVCoresAllocatedContainers}}
+  
+  
+Node Healthy Status
+{{model.node.nodeHealthy}}

[13/50] [abbrv] hadoop git commit: HDFS-10918. Add a tool to get FileEncryptionInfo from CLI. Contributed by Xiao Chen.

2016-10-04 Thread sunilg
HDFS-10918. Add a tool to get FileEncryptionInfo from CLI. Contributed by Xiao 
Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/853d65a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/853d65a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/853d65a1

Branch: refs/heads/YARN-3368
Commit: 853d65a157362661ccab10379c2d82e780382f83
Parents: 736d33c
Author: Xiao Chen 
Authored: Mon Oct 3 16:01:54 2016 -0700
Committer: Xiao Chen 
Committed: Mon Oct 3 16:01:54 2016 -0700

--
 .../apache/hadoop/fs/FileEncryptionInfo.java| 21 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 30 +++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java| 14 +++
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   | 51 ++-
 .../src/site/markdown/TransparentEncryption.md  | 16 
 .../src/test/resources/testCryptoConf.xml   | 90 
 6 files changed, 221 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/853d65a1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
index 00ddfe8..1129e07 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
@@ -121,4 +121,25 @@ public class FileEncryptionInfo {
 builder.append("}");
 return builder.toString();
   }
+
+  /**
+   * A frozen version of {@link #toString()} to be backward compatible.
+   * When backward compatibility is not needed, use {@link #toString()}, which
+   * provides more info and is supposed to evolve.
+   * Don't change this method except for major revisions.
+   *
+   * NOTE:
+   * Currently this method is used by CLI for backward compatibility.
+   */
+  public String toStringStable() {
+StringBuilder builder = new StringBuilder("{");
+builder.append("cipherSuite: " + cipherSuite);
+builder.append(", cryptoProtocolVersion: " + version);
+builder.append(", edek: " + Hex.encodeHexString(edek));
+builder.append(", iv: " + Hex.encodeHexString(iv));
+builder.append(", keyName: " + keyName);
+builder.append(", ezKeyVersionName: " + ezKeyVersionName);
+builder.append("}");
+return builder.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/853d65a1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 24ffb40..548815f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSLinkResolver;
 import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemLinkResolver;
@@ -2204,6 +2205,35 @@ public class DistributedFileSystem extends FileSystem {
 return dfs.listEncryptionZones();
   }
 
+  /* HDFS only */
+  public FileEncryptionInfo getFileEncryptionInfo(final Path path)
+  throws IOException {
+Path absF = fixRelativePart(path);
+return new FileSystemLinkResolver() {
+  @Override
+  public FileEncryptionInfo doCall(final Path p) throws IOException {
+final HdfsFileStatus fi = dfs.getFileInfo(getPathName(p));
+if (fi == null) {
+  throw new FileNotFoundException("File does not exist: " + p);
+}
+return fi.getFileEncryptionInfo();
+  }
+
+  @Override
+  public FileEncryptionInfo next(final FileSystem fs, final Path p)
+  throws IOException {
+if (fs instanceof DistributedFileSystem) {
+  DistributedFileSystem myDfs = (DistributedFileSystem)fs;
+  return myDfs.getFileEncryptionInfo(p);
+}
+throw new UnsupportedOperationException(
+"Cannot call getFileEncryptionInfo"
++ " on a symlink to a non-DistributedFileS

[20/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 8ce4ffa..aae4177 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -16,55 +16,95 @@
  * limitations under the License.
 }}
 
-
-  {{queue-navigator model=model.queues selected=model.selected}}
+
+  {{em-breadcrumbs items=breadcrumbs}}
 
 
-
-  
-{{queue-configuration-table queue=model.selectedQueue}}
-  
+
+  
 
-  
-{{bar-chart data=model.selectedQueue.capacitiesBarChartData 
-title="Queue Capacities" 
-parentId="capacity-bar-chart"
-textWidth=150
-ratio=0.5
-maxHeight=350}}
-  
+
+  
+
+  Application
+
+
+  
+
+  {{#link-to 'yarn-queue' tagName="li"}}
+{{#link-to 'yarn-queue' model.selected}}Information
+{{/link-to}}
+  {{/link-to}}
+  {{#link-to 'yarn-queue-apps' tagName="li"}}
+{{#link-to 'yarn-queue-apps' model.selected}}Applications List
+{{/link-to}}
+  {{/link-to}}
+
+  
+
+  
+
 
-{{#if model.selectedQueue.hasUserUsages}}
-  
-{{donut-chart data=model.selectedQueue.userUsagesDonutChartData 
-title="User Usages" 
-showLabels=true
-parentId="userusage-donut-chart"
-maxHeight=350}}
-  
-{{/if}}
+
+  
+  
 
-  
-{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData 
-title="Running Apps" 
-showLabels=true
-parentId="numapplications-donut-chart"
-ratio=0.5
-maxHeight=350}}
-  
-
+
+  
+
+  Queue Information
+
+{{queue-configuration-table queue=model.selectedQueue}}
+  
+
 
-
+
+  
+
+  Queue Capacities
+
+
+  
+  {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+  title=""
+  parentId="capacity-bar-chart"
+  textWidth=170
+  ratio=0.55
+  maxHeight=350}}
+
+  
+
+
+{{#if model.selectedQueue.hasUserUsages}}
+  
+{{donut-chart data=model.selectedQueue.userUsagesDonutChartData
+title="User Usages"
+showLabels=true
+parentId="userusage-donut-chart"
+type="memory"
+ratio=0.6
+maxHeight=350}}
+  
+{{/if}}
+
+
+  
+
+  Running Apps
+
+
+  {{donut-chart 
data=model.selectedQueue.numOfApplicationsDonutChartData
+  showLabels=true
+  parentId="numapplications-donut-chart"
+  ratio=0.6
+  maxHeight=350}}
+
+  
+
+
+  
+
 
-
-  
-{{#if model.apps}}
-  {{app-table table-id="apps-table" arr=model.apps}}
-  {{simple-table table-id="apps-table" bFilter=true 
colTypes="elapsed-time" colTargets="7"}}
-{{else}}
-  Could not find any applications from this 
cluster
-{{/if}}
   
 
-
-{{outlet}}
\ No newline at end of file
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
new file mode 100644
index 000..e27341b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -0,0 +1,72 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on 

[09/50] [abbrv] hadoop git commit: HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.

2016-10-04 Thread sunilg
HDFS-10940. Reduce performance penalty of block caching when not used. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74420843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74420843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74420843

Branch: refs/heads/YARN-3368
Commit: 744208431f7365bf054e6b773b86af2583001e1d
Parents: 9002062
Author: Kihwal Lee 
Authored: Mon Oct 3 11:27:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Oct 3 11:27:23 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockManager.java  | 10 +-
 .../hadoop/hdfs/server/namenode/CacheManager.java  | 12 +++-
 .../server/namenode/FSDirStatAndListingOp.java | 17 +
 .../hdfs/server/namenode/TestCacheDirectives.java  | 10 ++
 4 files changed, 31 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 886984a..9b426bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -103,6 +103,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 
 import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 
@@ -1145,9 +1146,16 @@ public class BlockManager implements BlockStatsMXBean {
 fileSizeExcludeBlocksUnderConstruction, mode);
 isComplete = true;
   }
-  return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
+  LocatedBlocks locations = new LocatedBlocks(
+  fileSizeExcludeBlocksUnderConstruction,
   isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
   ecPolicy);
+  // Set caching information for the located blocks.
+  CacheManager cm = namesystem.getCacheManager();
+  if (cm != null) {
+cm.setCachedLocations(locations);
+  }
+  return locations;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index 366dd9b..24bf751 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -902,7 +903,16 @@ public final class CacheManager {
 return new BatchedListEntries(results, false);
   }
 
-  public void setCachedLocations(LocatedBlock block) {
+  public void setCachedLocations(LocatedBlocks locations) {
+// don't attempt lookups if there are no cached blocks
+if (cachedBlocks.size() > 0) {
+  for (LocatedBlock lb : locations.getLocatedBlocks()) {
+setCachedLocations(lb);
+  }
+}
+  }
+
+  private void setCachedLocations(LocatedBlock block) {
 CachedBlock cachedBlock =
 new CachedBlock(block.getBlock().getBlockId(),
 (short)0, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--

[06/50] [abbrv] hadoop git commit: YARN-4855. Should check if node exists when replace nodelabels. Contributeed by Tao Jie

2016-10-04 Thread sunilg
YARN-4855. Should check if node exists when replace nodelabels. Contributeed by 
Tao Jie


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e130c30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e130c30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e130c30

Branch: refs/heads/YARN-3368
Commit: 6e130c308cf1b97e8386b6a43c26d72d2850119c
Parents: 8285703
Author: Naganarasimha 
Authored: Mon Oct 3 02:02:26 2016 -0400
Committer: Naganarasimha 
Committed: Mon Oct 3 02:02:26 2016 -0400

--
 .../ReplaceLabelsOnNodeRequest.java |   8 ++
 ..._server_resourcemanager_service_protos.proto |   2 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java  |  39 ---
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |   3 +-
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java|  14 ++-
 .../server/resourcemanager/AdminService.java|  46 +
 .../resourcemanager/TestRMAdminService.java | 103 ++-
 7 files changed, 197 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
index 28e261a..1b8e687 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
@@ -44,4 +44,12 @@ public abstract class ReplaceLabelsOnNodeRequest {
   @Public
   @Evolving
   public abstract Map> getNodeToLabels();
+
+  @Public
+  @Evolving
+  public abstract void setFailOnUnknownNodes(boolean failOnUnknownNodes);
+
+  @Public
+  @Evolving
+  public abstract boolean getFailOnUnknownNodes();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index b9f30db..16d8097 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -99,10 +99,10 @@ message RemoveFromClusterNodeLabelsResponseProto {
 
 message ReplaceLabelsOnNodeRequestProto {
   repeated NodeIdToLabelsNameProto nodeToLabels = 1;
+  optional bool failOnUnknownNodes = 2;
 }
 
 message ReplaceLabelsOnNodeResponseProto {
-  
 }
 
 message UpdateNodeLabelsResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 7a898a1..640f8e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -130,11 +130,13 @@ public class RMAdminCLI extends HAAdmin {
   new UsageInfo(" (label splitted by \",\")",
   "remove from cluster node labels"))
   .put("-replaceLabelsOnNode",
-  new UsageInfo(
+  new UsageInfo("[-failOnUnknownNodes] " +
   "<\"node1[:port]=label1,label2 
node2[:port]=label1,label2\">",
-  "replace labels on nodes"
-  + " (please note that we do not support specifying 
multiple"
-  + " labels on a single host for now.)"))
+  "replace labels on nodes"
+  + " (please note that we do not support speci

[23/50] [abbrv] hadoop git commit: YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram via Sunil G)

2016-10-04 Thread sunilg
YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0e293b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0e293b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0e293b0

Branch: refs/heads/YARN-3368
Commit: d0e293b02fce31c26d4b3047531f8b5d93e458a9
Parents: f30f89a
Author: sunilg 
Authored: Tue Aug 30 20:27:59 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .gitignore | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e293b0/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 29b018f..848b256 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,8 +35,8 @@ 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/*
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webappnpm-debug.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapptestem.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/316a1a0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
new file mode 100644
index 000..d39885e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
@@ -0,0 +1,29 @@
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName,
+attributes: payload
+  };
+
+  return this._super(store, primaryModelClass, fixedPayload, id,
+requestType);
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = [
+this.normalizeSingleResponse(store, primaryModelClass,
+  payload.clusterMetrics, 1, requestType)
+  ];
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/316a1a0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
new file mode 100644
index 000..c5394d0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
@@ -0,0 +1,49 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  
+  if (payload.appAttempt) {
+payload = payload.appAttempt;  
+  }
+  
+  var fixedPayload = {
+id: payload.appAttemptId,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  startTime: Converter.timeStampToDate(payload.startTime),
+  finishedTime: Converter.timeStampToDate(payload.finishedTime),
+  containerId: payload.containerId,
+  nodeHttpAddress: payload.nodeHttpAddress,
+  nodeId: payload.nodeId,
+  state: payload.nodeId,
+  logsLink: payload.logsLink
+}
+  };
+
+  return fixedPayload;
+},
+
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var p = this.internalNormalizeSingleResponse(store, 
+primaryModelClass, payload, id, requestType);
+  return { data: p };
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
+return this.internalNormalizeSingleResponse(store, primaryModelClass,
+  singleApp, singleApp.id, requestType);
+  }, this);
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/316a1a0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
new file mode 100644
index 000..a038fff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
@@ -0,0 +1,66 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  if (payload.app) {
+payload = payload.app;  
+  }
+  
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  appName: payload.name,
+  user: payload.user,
+  queue: payload.queue,
+  state: payload.state,
+  startTime: Converter.timeStampToDate(payload.startedTime),
+  elapsedTime: Conver

[34/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
deleted file mode 100644
index 447533e..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/simple-table.js
+++ /dev/null
@@ -1,58 +0,0 @@
-import Ember from 'ember';
-
-export default Ember.Component.extend({
-  didInsertElement: function() {
-var paging = this.get("paging") ? true : this.get("paging");
-var ordering = this.get("ordering") ? true : this.get("ordering");
-var info = this.get("info") ? true : this.get("info");
-var bFilter = this.get("bFilter") ? true : this.get("bFilter");
-
-// Defines sorter for the columns if not default.
-// Can also specify a custom sorter.
-var i;
-var colDefs = [];
-if (this.get("colTypes")) {
-  var typesArr = this.get("colTypes").split(' ');
-  var targetsArr = this.get("colTargets").split(' ');
-  for (i = 0; i < typesArr.length; i++) {
-console.log(typesArr[i] + " " + targetsArr[i]);
-colDefs.push({
-  type: typesArr[i],
-  targets: parseInt(targetsArr[i])
-});
-  }
-}
-// Defines initial column and sort order.
-var orderArr = [];
-if (this.get("colsOrder")) {
-  var cols = this.get("colsOrder").split(' ');
-  for (i = 0; i < cols.length; i++) {
-var col = cols[i].split(',');
-if (col.length != 2) {
-  continue;
-}
-var order = col[1].trim();
-if (order != 'asc' && order != 'desc') {
-  continue;
-}
-var colOrder = [];
-colOrder.push(parseInt(col[0]));
-colOrder.push(order);
-orderArr.push(colOrder);
-  }
-}
-if (orderArr.length == 0) {
-  var defaultOrder = [0, 'asc'];
-  orderArr.push(defaultOrder);
-}
-console.log(orderArr[0]);
-Ember.$('#' + this.get('table-id')).DataTable({
-  "paging":   paging,
-  "ordering": ordering, 
-  "info": info,
-  "bFilter": bFilter,
-  "order": orderArr,
-  "columnDefs": colDefs
-});
-  }
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
deleted file mode 100644
index fe402bb..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/components/timeline-view.js
+++ /dev/null
@@ -1,250 +0,0 @@
-import Ember from 'ember';
-import Converter from 'yarn-ui/utils/converter';
-
-export default Ember.Component.extend({
-  canvas: {
-svg: undefined,
-h: 0,
-w: 0,
-tooltip: undefined
-  },
-
-  clusterMetrics: undefined,
-  modelArr: [],
-  colors: d3.scale.category10().range(),
-  _selected: undefined,
-
-  selected: function() {
-return this._selected;
-  }.property(),
-
-  tableComponentName: function() {
-return "app-attempt-table";
-  }.property(),
-
-  setSelected: function(d) {
-if (this._selected == d) {
-  return;
-}
-
-// restore color
-if (this._selected) {
-  var dom = d3.select("#timeline-bar-" + this._selected.get("id"));
-  dom.attr("fill", this.colors[0]);
-}
-
-this._selected = d;
-this.set("selected", d);
-dom = d3.select("#timeline-bar-" + d.get("id"));
-dom.attr("fill", this.colors[1]);
-  },
-
-  getPerItemHeight: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 30;
-} else if (arrSize < 100) {
-  return 10;
-} else {
-  return 2;
-}
-  },
-
-  getPerItemGap: function() {
-var arrSize = this.modelArr.length;
-
-if (arrSize < 20) {
-  return 5;
-} else if (arrSize < 100) {
-  return 1;
-} else {
-  return 1;
-}
-  },
-
-  getCanvasHeight: function() {
-return (this.getPerItemHeight() + this.getPerItemGap()) * 
this.modelArr.length + 200;
-  },
-
-  draw: function(start, end) {
-// get w/h of the svg
-var bbox = d3.select("#" + this.get("parent-id"))
-  .node()
-  .getBoundingClientRect();
-this.canvas.w = bbox.width;
-this.canvas.h = this.getCanvasHeight();
-
-this.canvas.svg = d3.select("#" + this.get("parent-id"))
-  .append("svg")
-  .attr("width", this.canvas.w)
-  .attr("height", this.canvas.h)
-  .attr("id", this.get("my-id"));
-this.renderTimeline(start, end);
-  },
-
-  renderTimeline: function(start, end) {
-var border = 30;
-var singleBarHeight = this.getPerItemHeight();

[01/50] [abbrv] hadoop git commit: HADOOP-13317. Add logs to KMS server-side to improve supportability. Contributed by Suraj Acharya. [Forced Update!]

2016-10-04 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 9ef829197 -> c439ddf4a (forced update)


HADOOP-13317. Add logs to KMS server-side to improve supportability. 
Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89bd6d29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89bd6d29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89bd6d29

Branch: refs/heads/YARN-3368
Commit: 89bd6d29a62afd7ed8ff87bcc29d17b1cb53dcb6
Parents: 2549ee9
Author: Xiao Chen 
Authored: Fri Sep 30 17:51:39 2016 -0700
Committer: Xiao Chen 
Committed: Fri Sep 30 17:51:39 2016 -0700

--
 .../hadoop/crypto/key/kms/server/KMS.java   | 76 +---
 1 file changed, 66 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bd6d29/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index f069fca..371f3f5 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 import javax.ws.rs.Consumes;
@@ -68,6 +70,8 @@ public class KMS {
   private KeyProviderCryptoExtension provider;
   private KMSAudit kmsAudit;
 
+  private static final Logger LOG = LoggerFactory.getLogger(KMS.class);
+
   public KMS() throws Exception {
 provider = KMSWebApp.getKeyProvider();
 kmsAudit= KMSWebApp.getKMSAudit();
@@ -77,7 +81,7 @@ public class KMS {
   KMSOp operation) throws AccessControlException {
 KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null);
   }
-  
+
   private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
   KMSOp operation, String key) throws AccessControlException {
 KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key);
@@ -100,6 +104,7 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   @SuppressWarnings("unchecked")
   public Response createKey(Map jsonKey) throws Exception {
+LOG.trace("Entering createKey Method.");
 KMSWebApp.getAdminCallsMeter().mark();
 UserGroupInformation user = HttpUserGroupInformation.get();
 final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
@@ -111,6 +116,9 @@ public class KMS {
  ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
 String description = (String)
 jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+LOG.debug("Creating key with name {}, cipher being used{}, " +
+"length of key {}, description of key {}", name, cipher,
+ length, description);
 Map attributes = (Map)
 jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
 if (material != null) {
@@ -151,6 +159,7 @@ public class KMS {
 String requestURL = KMSMDCFilter.getURL();
 int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
 requestURL = requestURL.substring(0, idx);
+LOG.trace("Exiting createKey Method.");
 return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
 .type(MediaType.APPLICATION_JSON)
 .header("Location", getKeyURI(requestURL, name)).entity(json).build();
@@ -160,11 +169,12 @@ public class KMS {
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response deleteKey(@PathParam("name") final String name)
   throws Exception {
+LOG.trace("Entering deleteKey method.");
 KMSWebApp.getAdminCallsMeter().mark();
 UserGroupInformation user = HttpUserGroupInformation.get();
 assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
 KMSClientProvider.checkNotEmpty(name, "name");
-
+LOG.debug("Deleting key with name {}.", name);
 user.doAs(new PrivilegedExceptionAction() {
   @Override
   public Void run() throws Exception {
@@ -175,7 +185,7 @@ public class KMS {
 });
 
 kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
-
+LOG.trace("Exiting deleteKey method.");
 return Response.ok().build();
   }
 
@@ -185,10 +195,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response rolloverKey(@PathParam

[31/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
new file mode 100644
index 000..f7ec020
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  // Map: 
+  map : undefined,
+
+  // Normalized data for d3
+  treeData: undefined,
+
+  // folded queues, folded[] == true means  is folded
+  foldedQueues: { },
+
+  // maxDepth
+  maxDepth: 0,
+
+  // num of leaf queue, folded queue is treated as leaf queue
+  numOfLeafQueue: 0,
+
+  // mainSvg
+  mainSvg: undefined,
+
+  // Init data
+  initData: function() {
+this.map = { };
+this.treeData = { };
+this.maxDepth = 0;
+this.numOfLeafQueue = 0;
+
+this.get("model")
+  .forEach(function(o) {
+this.map[o.id] = o;
+  }.bind(this));
+
+var selected = this.get("selected");
+
+this.initQueue("root", 1, this.treeData);
+  },
+
+  // get Children array of given queue
+  getChildrenNamesArray: function(q) {
+var namesArr = [];
+
+// Folded queue's children is empty
+if (this.foldedQueues[q.get("name")]) {
+  return namesArr;
+}
+
+var names = q.get("children");
+if (names) {
+  names.forEach(function(name) {
+namesArr.push(name);
+  });
+}
+
+return namesArr;
+  },
+
+  // Init queues
+  initQueue: function(queueName, depth, node) {
+if ((!queueName) || (!this.map[queueName])) {
+  // Queue is not existed
+  return;
+}
+
+if (depth > this.maxDepth) {
+  this.maxDepth = this.maxDepth + 1;
+}
+
+var queue = this.map[queueName];
+
+var names = this.getChildrenNamesArray(queue);
+
+node.name = queueName;
+node.parent = queue.get("parent");
+node.queueData = queue;
+
+if (names.length > 0) {
+  node.children = [];
+
+  names.forEach(function(name) {
+var childQueueData = {};
+node.children.push(childQueueData);
+this.initQueue(name, depth + 1, childQueueData);
+  }.bind(this));
+} else {
+  this.numOfLeafQueue = this.numOfLeafQueue + 1;
+}
+  },
+
+  update: function(source, root, tree, diagonal) {
+var duration = 300;
+var i = 0;
+
+// Compute the new tree layout.
+var nodes = tree.nodes(root).reverse();
+var links = tree.links(nodes);
+
+// Normalize for fixed-depth.
+nodes.forEach(function(d) { d.y = d.depth * 200; });
+
+// Update the nodes…
+var node = this.mainSvg.selectAll("g.node")
+  .data(nodes, function(d) { return d.id || (d.id = ++i); });
+
+// Enter any new nodes at the parent's previous position.
+var nodeEnter = node.enter().append("g")
+  .attr("class", "node")
+  .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
+  .on("click", function(d,i){
+if (d.queueData.get("name") != this.get("selected")) {
+document.location.href = "yarnQueue/" + d.queueData.get("name");
+}
+  }.bind(this));
+  // .on("click", click);
+
+nodeEnter.append("circle")
+  .attr("r", 1e-6)
+  .style("fill", function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap <= 60.0) {
+  return "LimeGreen";
+} else if (usedCap <= 100.0) {
+  return "DarkOrange";
+} else {
+  return "LightCoral";
+}
+  });
+
+// append percentage
+nodeEnter.append("text")
+  .attr("x", function(d) { return 0; })
+  .attr("dy", ".35em")
+  .attr("text-anchor", function(d) { return "middle"; })
+  .text(function(d) {
+var usedCap = d.queueData.get("usedCapacity");
+if (usedCap >= 100.0) {
+  return usedCa

[22/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-04 Thread sunilg
YARN-5321. [YARN-3368] Add resource usage for application by node managers 
(Wangda Tan via Sunil G)
YARN-5320. [YARN-3368] Add resource usage by applications and queues to cluster 
overview page  (Wangda Tan via Sunil G)
YARN-5322. [YARN-3368] Add a node heat chart map (Wangda Tan via Sunil G)
YARN-5347. [YARN-3368] Applications page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5348. [YARN-3368] Node details page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5346. [YARN-3368] Queues page improvements (Sreenath Somarajapuram via 
Sunil G)
YARN-5345. [YARN-3368] Cluster overview page improvements (Sreenath 
Somarajapuram via Sunil G)
YARN-5344. [YARN-3368] Generic UI improvements (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53874603
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53874603
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53874603

Branch: refs/heads/YARN-3368
Commit: 5387460330265c1d81a91e7ea789b10a42f2d292
Parents: 597b8a7
Author: Sunil 
Authored: Fri Jul 15 21:16:06 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../src/main/webapp/app/adapters/yarn-app.js|  14 +
 .../app/components/app-usage-donut-chart.js |  67 
 .../src/main/webapp/app/components/bar-chart.js |   5 +
 .../app/components/base-chart-component.js  |  55 ++-
 .../app/components/base-usage-donut-chart.js|  43 +++
 .../main/webapp/app/components/donut-chart.js   |  55 ++-
 .../main/webapp/app/components/nodes-heatmap.js | 209 +++
 ...er-app-memusage-by-nodes-stacked-barchart.js |  88 +
 ...app-ncontainers-by-nodes-stacked-barchart.js |  67 
 .../app/components/queue-usage-donut-chart.js   |  69 
 .../main/webapp/app/components/queue-view.js|   3 +-
 .../main/webapp/app/components/simple-table.js  |   9 +-
 .../webapp/app/components/stacked-barchart.js   | 198 +++
 .../main/webapp/app/components/timeline-view.js |   2 +-
 .../main/webapp/app/components/tree-selector.js |  43 ++-
 .../webapp/app/controllers/cluster-overview.js  |   9 +
 .../webapp/app/controllers/yarn-app-attempt.js  |  40 +++
 .../webapp/app/controllers/yarn-app-attempts.js |  40 +++
 .../src/main/webapp/app/controllers/yarn-app.js |  38 ++
 .../main/webapp/app/controllers/yarn-apps.js|   9 +
 .../webapp/app/controllers/yarn-node-apps.js|  39 +++
 .../app/controllers/yarn-node-containers.js |  39 +++
 .../main/webapp/app/controllers/yarn-node.js|  37 ++
 .../app/controllers/yarn-nodes-heatmap.js   |  36 ++
 .../main/webapp/app/controllers/yarn-nodes.js   |  33 ++
 .../webapp/app/controllers/yarn-queue-apps.js   |  46 +++
 .../main/webapp/app/controllers/yarn-queue.js   |  20 ++
 .../main/webapp/app/controllers/yarn-queues.js  |  34 ++
 .../webapp/app/controllers/yarn-services.js |  34 ++
 .../main/webapp/app/models/cluster-metric.js|   2 +-
 .../main/webapp/app/models/yarn-app-attempt.js  |  11 +
 .../src/main/webapp/app/models/yarn-app.js  |   4 +
 .../src/main/webapp/app/models/yarn-rm-node.js  |   7 +
 .../src/main/webapp/app/router.js   |  15 +-
 .../src/main/webapp/app/routes/application.js   |   2 +
 .../main/webapp/app/routes/cluster-overview.js  |   9 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  30 ++
 .../src/main/webapp/app/routes/yarn-app.js  |  17 +-
 .../src/main/webapp/app/routes/yarn-apps.js |   6 +-
 .../main/webapp/app/routes/yarn-apps/apps.js|  22 ++
 .../webapp/app/routes/yarn-apps/services.js |  22 ++
 .../src/main/webapp/app/routes/yarn-node.js |   1 +
 .../src/main/webapp/app/routes/yarn-nodes.js|   5 +-
 .../webapp/app/routes/yarn-nodes/heatmap.js |  22 ++
 .../main/webapp/app/routes/yarn-nodes/table.js  |  22 ++
 .../main/webapp/app/routes/yarn-queue-apps.js   |  36 ++
 .../src/main/webapp/app/routes/yarn-queues.js   |  38 ++
 .../webapp/app/serializers/yarn-app-attempt.js  |  19 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   8 +-
 .../webapp/app/serializers/yarn-container.js|  20 +-
 .../src/main/webapp/app/styles/app.css  | 139 ++--
 .../main/webapp/app/templates/application.hbs   |  99 --
 .../webapp/app/templates/cluster-overview.hbs   | 168 ++---
 .../app/templates/components/app-table.hbs  |  10 +-
 .../templates/components/node-menu-panel.hbs|   2 +-
 .../app/templates/components/nodes-heatmap.hbs  |  27 ++
 .../components/queue-configuration-table.hbs|   4 -
 .../templates/components/queue-navigator.hbs|  14 +-
 .../app/templates/components/timeline-view.hbs  |   3 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  13 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  57 +++
 .../src/main/webapp/app/templates/yarn-app.hbs  | 346 ---
 .../src/main/webapp/app/templates/yarn-apps.hb

[27/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
deleted file mode 100644
index 5877589..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-node', 'Unit | Model | Node', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.totalVmemAllocatedContainersMB);
-  assert.ok(model.vmemCheckEnabled);
-  assert.ok(model.pmemCheckEnabled);
-  assert.ok(model.nodeHealthy);
-  assert.ok(model.lastNodeUpdateTime);
-  assert.ok(model.healthReport);
-  assert.ok(model.nmStartupTime);
-  assert.ok(model.nodeManagerBuildVersion);
-  assert.ok(model.hadoopBuildVersion);
-});
-
-test('test fields', function(assert) {
-  let model = this.subject();
-
-  assert.expect(4);
-  Ember.run(function () {
-model.set("totalVmemAllocatedContainersMB", 4096);
-model.set("totalPmemAllocatedContainersMB", 2048);
-model.set("totalVCoresAllocatedContainers", 4);
-model.set("hadoopBuildVersion", "3.0.0-SNAPSHOT");
-assert.equal(model.get("totalVmemAllocatedContainersMB"), 4096);
-assert.equal(model.get("totalPmemAllocatedContainersMB"), 2048);
-assert.equal(model.get("totalVCoresAllocatedContainers"), 4);
-assert.equal(model.get("hadoopBuildVersion"), "3.0.0-SNAPSHOT");
-  });
-});
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6804e642/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
deleted file mode 100644
index 4fd2517..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-rm-node', 'Unit | Model | RMNode', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.rack);
-  assert.ok(model.state);
-  assert.ok(model.nodeHostName);
-  assert.ok(model.nodeHTTPAddress);
-  assert.ok(model.lastHealthUpdate);
-  assert.ok(model.healthReport);
-  assert.ok(model.numContainers);
-  assert.ok(model.usedMemoryMB);
-  assert.ok(model.availMemoryMB);
-  assert.ok(model.usedVirtualCores);
-  assert.ok(model.availableVirtualCores);
-  assert.ok(model.version);
-  assert.ok(model.nodeLabels);
-  assert.ok(model.nodeLabelsAsStr

[18/50] [abbrv] hadoop git commit: YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil G via wangda) YARN-5000. [YARN-3368] App attempt page is not loading when timeline ser

2016-10-04 Thread sunilg
YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil 
G via wangda)
YARN-5000. [YARN-3368] App attempt page is not loading when timeline server is 
not started (Sunil G via wangda)
YARN-5038. [YARN-3368] Application and Container pages shows wrong values when 
RM is stopped. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4955056f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4955056f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4955056f

Branch: refs/heads/YARN-3368
Commit: 4955056f7ffb99afffbe4e9bbe5b311bf341ebd5
Parents: 9481cb9
Author: Wangda Tan 
Authored: Tue May 17 22:28:24 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 LICENSE.txt |  2 +
 .../resources/assemblies/hadoop-yarn-dist.xml   |  7 ++
 .../hadoop/yarn/conf/YarnConfiguration.java | 23 ++
 .../src/main/resources/yarn-default.xml | 26 +++
 .../server/resourcemanager/ResourceManager.java | 76 +---
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  4 +-
 .../webapp/app/adapters/yarn-app-attempt.js |  4 +-
 .../webapp/app/adapters/yarn-container-log.js   |  2 +-
 .../main/webapp/app/adapters/yarn-node-app.js   | 10 ++-
 .../webapp/app/adapters/yarn-node-container.js  | 10 ++-
 .../src/main/webapp/app/adapters/yarn-node.js   |  5 +-
 .../main/webapp/app/components/timeline-view.js | 17 +++--
 .../main/webapp/app/components/tree-selector.js |  4 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js|  6 +-
 .../src/main/webapp/app/helpers/node-name.js| 46 
 .../main/webapp/app/models/yarn-app-attempt.js  | 72 ++-
 .../src/main/webapp/app/models/yarn-app.js  | 14 
 .../main/webapp/app/models/yarn-container.js|  7 ++
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 +-
 .../webapp/app/serializers/yarn-app-attempt.js  |  5 +-
 .../src/main/webapp/app/serializers/yarn-app.js | 11 ++-
 .../webapp/app/serializers/yarn-container.js|  3 +-
 .../webapp/app/serializers/yarn-node-app.js |  5 +-
 .../app/serializers/yarn-node-container.js  |  5 +-
 .../main/webapp/app/serializers/yarn-rm-node.js |  5 +-
 .../main/webapp/app/templates/application.hbs   | 21 +-
 .../templates/components/app-attempt-table.hbs  | 22 +-
 .../app/templates/components/app-table.hbs  |  8 +--
 .../templates/components/container-table.hbs|  4 +-
 .../templates/components/node-menu-panel.hbs| 44 
 .../app/templates/components/timeline-view.hbs  |  2 +-
 .../src/main/webapp/app/templates/error.hbs |  2 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  2 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  9 ++-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +-
 .../webapp/app/templates/yarn-node-apps.hbs | 12 ++--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  | 12 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 .../main/webapp/app/templates/yarn-nodes.hbs| 10 ++-
 .../main/webapp/app/templates/yarn-queue.hbs|  8 ++-
 .../src/main/webapp/config/environment.js   |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  2 +
 .../webapp/tests/unit/helpers/node-name-test.js | 28 
 47 files changed, 486 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4955056f/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 45b6cdf..5efbd14 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1882,6 +1882,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - datatables v1.10.8 (https://datatables.net/)
  - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
  - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
+ - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
  - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
  - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
  - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
@@ -1895,6 +1896,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - ember-cli-sri v1.2.1 (https://github.com/jonathanKingston/ember-cli-sri) - 
Authored by Jonathan Kingston

[19/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)

2016-10-04 Thread sunilg
YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/151d67e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/151d67e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/151d67e8

Branch: refs/heads/YARN-3368
Commit: 151d67e81c6700f6cda346d00a9fe700b709be93
Parents: 44e7b5b
Author: sunilg 
Authored: Wed Aug 24 16:28:34 2016 +0530
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 LICENSE.txt | 84 ++--
 1 file changed, 51 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/151d67e8/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 5efbd14..05743fe 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1869,35 +1869,53 @@ be bound by any additional provisions that may appear 
in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
 
-For Apache Hadoop YARN Web UI component: 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/
--
-The Apache Hadoop YARN Web UI component bundles the following files under the 
MIT License:
-
- - ember v2.2.0 (http://emberjs.com/) - Copyright (c) 2014 Yehuda Katz, Tom 
Dale and Ember.js contributors
- - ember-data v2.1.0 (https://github.com/emberjs/data) - Copyright (C) 
2011-2014 Tilde, Inc. and contributors, Portions Copyright (C) 2011 
LivingSocial Inc.
- - ember-resolver v2.0.3 (https://github.com/ember-cli/ember-resolver) - 
Copyright (c) 2013 Stefan Penner and Ember App Kit Contributors
- - bootstrap v3.3.6 (http://getbootstrap.com) - Copyright (c) 2011-2014 
Twitter, Inc
- - jquery v2.1.4 (http://jquery.org) - Copyright 2005, 2014 jQuery Foundation 
and other contributors
- - jquery-ui v1.11.4 (http://jqueryui.com/) - Copyright 2014 jQuery Foundation 
and other contributors
- - datatables v1.10.8 (https://datatables.net/)
- - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
- - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
- - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
- - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
- - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
- - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
- - ember-cli-dependency-checker v1.2.0 
(https://github.com/quaertym/ember-cli-dependency-checker) - Authored by Emre 
Unal
- - ember-cli-htmlbars v1.0.2 (https://github.com/ember-cli/ember-cli-htmlbars) 
- Authored by Robert Jackson 
- - ember-cli-htmlbars-inline-precompile v0.3.1 
(https://github.com/pangratz/ember-cli-htmlbars-inline-precompile) - Authored 
by Clemens Müller 
- - ember-cli-ic-ajax v0.2.1 (https://github.com/rwjblue/ember-cli-ic-ajax) - 
Authored by Robert Jackson 
- - ember-cli-inject-live-reload v1.4.0 
(https://github.com/rwjblue/ember-cli-inject-live-reload) - Authored by Robert 
Jackson 
- - ember-cli-qunit v1.2.1 (https://github.com/ember-cli/ember-cli-qunit) - 
Authored by Robert Jackson 
- - ember-cli-release v0.2.8 (https://github.com/lytics/ember-cli-release) - 
Authored by Robert Jackson 
- - ember-cli-sri v1.2.1 (https://github.com/jonathanKingston/ember-cli-sri) - 
Authored by Jonathan Kingston
- - ember-cli-uglify v1.2.0 (github.com/ember-cli/ember-cli-uglify) - Authored 
by Robert Jackson 
- - ember-d3 v0.1.0 (https://github.com/brzpegasus/ember-d3) - Authored by 
Estelle DeBlois
- - ember-truth-helpers v1.2.0 
(https://github.com/jmurphyau/ember-truth-helpers)
- - select2 v4.0.0 (https://select2.github.io/)
+The binary distribution of this product bundles these dependencies under the
+following license:
+bootstrap v3.3.6
+broccoli-asset-rev v2.4.2
+broccoli-funnel v1.0.1
+datatables v1.10.8
+em-helpers v0.5.13
+em-table v0.1.6
+ember v2.2.0
+ember-array-contains-helper v1.0.2
+ember-bootstrap v0.5.1
+ember-cli v1.13.13
+ember-cli-app-version v1.0.0
+ember-cli-babel v5.1.6
+ember-cli-content-security-policy v0.4.0
+ember-cli-dependency-checker v1.2.0
+ember-cli-htmlbars v1.0.2
+ember-cli-htmlbars-inline-precompile v0.3.1
+ember-cli-ic-ajax v0.2.1
+ember-cli-inject-live-reload v1.4.0
+ember-cli-jquery-ui v0.0.20
+ember-cli-qunit v1.2.1
+ember-cli-release v0.2.8
+ember-cli-shims v0.0.6
+ember-cli-sri v1.2.1
+ember-cli-test-loader v0.2.1
+ember-cli-uglify v1.2.0
+ember-d3 v0.1.0
+ember-data v2.1.0
+ember-disable-proxy-controllers v1.0.1
+em

[21/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-04 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
index ff49403..b945451 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
@@ -20,7 +20,9 @@ import Ember from 'ember';
 
 export default Ember.Route.extend({
   model() {
-var apps = this.store.findAll('yarn-app');
-return apps;
+return Ember.RSVP.hash({
+  apps: this.store.findAll('yarn-app'),
+  clusterMetrics: this.store.findAll('ClusterMetric'),
+});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53874603/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
index 6e57388..64a1b3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
@@ -22,6 +22,7 @@ export default Ember.Route.extend({
   model(param) {
 // Fetches data from both NM and RM. RM is queried to get node usage info.
 return Ember.RSVP.hash({
+  nodeInfo: { id: param.node_id, addr: param.node_addr },
   node: this.store.findRecord('yarn-node', param.node_addr),
   rmNode: this.store.findRecord('yarn-rm-node', param.node_id)
 });

http://git-wip-us.apache

[11/50] [abbrv] hadoop git commit: HDFS-10690. Optimize insertion/removal of replica in ShortCircuitCache. Contributed by Fenghua Hu.

2016-10-04 Thread sunilg
HDFS-10690. Optimize insertion/removal of replica in ShortCircuitCache. 
Contributed by Fenghua Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/607705c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/607705c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/607705c4

Branch: refs/heads/YARN-3368
Commit: 607705c488fa5263d851cee578a2d319e6e52ecd
Parents: de7a0a9
Author: Xiaoyu Yao 
Authored: Mon Oct 3 10:53:21 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 3 10:53:21 2016 -0700

--
 .../hdfs/shortcircuit/ShortCircuitCache.java| 88 
 .../hadoop/fs/TestEnhancedByteBufferAccess.java | 17 ++--
 .../shortcircuit/TestShortCircuitCache.java |  9 +-
 3 files changed, 69 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/607705c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 62ade70..bd02a97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -26,13 +26,14 @@ import java.nio.MappedByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.TreeMap;
+import java.util.NoSuchElementException;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
+import org.apache.commons.collections.map.LinkedMap;
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
@@ -107,16 +108,20 @@ public class ShortCircuitCache implements Closeable {
 
 int numDemoted = demoteOldEvictableMmaped(curMs);
 int numPurged = 0;
-Long evictionTimeNs = (long) 0;
+Long evictionTimeNs;
 while (true) {
-  Entry entry =
-  evictable.ceilingEntry(evictionTimeNs);
-  if (entry == null) break;
-  evictionTimeNs = entry.getKey();
+  Object eldestKey;
+  try {
+eldestKey = evictable.firstKey();
+  } catch (NoSuchElementException e) {
+break;
+  }
+  evictionTimeNs = (Long)eldestKey;
   long evictionTimeMs =
   TimeUnit.MILLISECONDS.convert(evictionTimeNs, 
TimeUnit.NANOSECONDS);
   if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) 
break;
-  ShortCircuitReplica replica = entry.getValue();
+  ShortCircuitReplica replica = (ShortCircuitReplica)evictable.get(
+  eldestKey);
   if (LOG.isTraceEnabled()) {
 LOG.trace("CacheCleaner: purging " + replica + ": " +
 StringUtils.getStackTrace(Thread.currentThread()));
@@ -263,11 +268,11 @@ public class ShortCircuitCache implements Closeable {
   private CacheCleaner cacheCleaner;
 
   /**
-   * Tree of evictable elements.
+   * LinkedMap of evictable elements.
*
* Maps (unique) insertion time in nanoseconds to the element.
*/
-  private final TreeMap evictable = new TreeMap<>();
+  private final LinkedMap evictable = new LinkedMap();
 
   /**
* Maximum total size of the cache, including both mmapped and
@@ -281,12 +286,11 @@ public class ShortCircuitCache implements Closeable {
   private long maxNonMmappedEvictableLifespanMs;
 
   /**
-   * Tree of mmaped evictable elements.
+   * LinkedMap of mmaped evictable elements.
*
* Maps (unique) insertion time in nanoseconds to the element.
*/
-  private final TreeMap evictableMmapped =
-  new TreeMap<>();
+  private final LinkedMap evictableMmapped = new LinkedMap();
 
   /**
* Maximum number of mmaped evictable elements.
@@ -482,13 +486,16 @@ public class ShortCircuitCache implements Closeable {
   private int demoteOldEvictableMmaped(long now) {
 int numDemoted = 0;
 boolean needMoreSpace = false;
-Long evictionTimeNs = (long) 0;
+Long evictionTimeNs;
 
 while (true) {
-  Entry entry =
-  evictableMmapped.ceilingEntry(evictionTimeNs);
-  if (entry == null) break;
-  evictionTimeNs = entry.getKey();
+  Object eldestKey;
+  try {
+eldestKey

[04/50] [abbrv] hadoop git commit: Revert "HDFS-10923. Make InstrumentedLock require ReentrantLock."

2016-10-04 Thread sunilg
Revert "HDFS-10923. Make InstrumentedLock require ReentrantLock."

This reverts commit c7ce6fdc20fe053f0bb3bcf900ffc0e1db6feee5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe9ebe20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe9ebe20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe9ebe20

Branch: refs/heads/YARN-3368
Commit: fe9ebe20ab113567f0777c11cb48ce0d3ce587a8
Parents: c7ce6fd
Author: Arpit Agarwal 
Authored: Fri Sep 30 23:11:51 2016 -0700
Committer: Arpit Agarwal 
Committed: Fri Sep 30 23:11:51 2016 -0700

--
 .../apache/hadoop/hdfs/InstrumentedLock.java| 185 ++
 .../hadoop/hdfs/InstrumentedReentrantLock.java  | 195 ---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hadoop/hdfs/TestInstrumentedLock.java   | 166 
 .../hdfs/TestInstrumentedReentrantLock.java | 177 -
 5 files changed, 353 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
new file mode 100644
index 000..6279e95
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a debugging class that can be used by callers to track
+ * whether a specifc lock is being held for too long and periodically
+ * log a warning and stack trace, if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ *
+ * A new instance of InstrumentedLock can be created for each object
+ * that needs to be instrumented.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedLock implements Lock {
+
+  private final Lock lock;
+  private final Log logger;
+  private final String name;
+  private final Timer clock;
+
+  /** Minimum gap between two lock warnings. */
+  private final long minLoggingGap;
+  /** Threshold for detecting long lock held time. */
+  private final long lockWarningThreshold;
+
+  // Tracking counters for lock statistics.
+  private volatile long lockAcquireTimestamp;
+  private final AtomicLong lastLogTimestamp;
+  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+
+  /**
+   * Create a instrumented lock instance which logs a warning message
+   * when lock held time is above given threshold.
+   *
+   * @param name the identifier of the lock object
+   * @param logger this class does not have its own logger, will log to the
+   *   given logger instead
+   * @param minLoggingGapMs  the minimum time gap between two log messages,
+   * this is to avoid spamming to many logs
+   * @param lockWarningThresholdMs the time threshold to view lock held
+   *   time as being "too long"
+   */
+  public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
+  long lockWarningThresholdMs) {
+this(name, logger, new ReentrantLock(),
+minLoggingGapMs, lockWarningThresholdMs);
+  }
+
+  public InstrumentedLock(String name, Lo

[12/50] [abbrv] hadoop git commit: YARN-4767. Network issues can cause persistent RM UI outage. (Daniel Templeton via kasha)

2016-10-04 Thread sunilg
YARN-4767. Network issues can cause persistent RM UI outage. (Daniel Templeton 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736d33cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736d33cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736d33cd

Branch: refs/heads/YARN-3368
Commit: 736d33cddd88a0cec925a451940b2523999a9c51
Parents: 607705c
Author: Karthik Kambatla 
Authored: Mon Oct 3 14:35:57 2016 -0700
Committer: Karthik Kambatla 
Committed: Mon Oct 3 14:35:57 2016 -0700

--
 .../hadoop/yarn/webapp/YarnWebParams.java   |   1 +
 .../resourcemanager/webapp/ErrorBlock.java  |  39 +++
 .../server/resourcemanager/webapp/RMWebApp.java |   1 +
 .../webapp/RedirectionErrorPage.java|  47 
 .../resourcemanager/webapp/RmController.java|   4 +
 .../webapp/TestRedirectionErrorPage.java|  68 +
 .../server/webproxy/WebAppProxyServlet.java | 274 ++-
 .../server/webproxy/amfilter/AmIpFilter.java|  64 +++--
 .../server/webproxy/TestWebAppProxyServlet.java |  24 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |  29 +-
 10 files changed, 454 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
index a34273c..ee9100f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
@@ -41,4 +41,5 @@ public interface YarnWebParams {
   String NODE_LABEL = "node.label";
   String WEB_UI_TYPE = "web.ui.type";
   String NEXT_REFRESH_INTERVAL = "next.refresh.interval";
+  String ERROR_MESSAGE = "error.message";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
new file mode 100644
index 000..963e53f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.ERROR_MESSAGE;
+
+/**
+ * This class is used to display an error message to the user in the UI.
+ */
+public class ErrorBlock extends HtmlBlock {
+  @Inject
+  ErrorBlock(ViewContext ctx) {
+super(ctx);
+  }
+
+  @Override
+  protected void render(Block html) {
+html.p()._($(ERROR_MESSAGE))._();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop

[10/50] [abbrv] hadoop git commit: MAPREDUCE-6638. Do not attempt to recover progress from previous job attempts if spill encryption is enabled. (Haibo Chen via kasha)

2016-10-04 Thread sunilg
MAPREDUCE-6638. Do not attempt to recover progress from previous job attempts 
if spill encryption is enabled. (Haibo Chen via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de7a0a92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de7a0a92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de7a0a92

Branch: refs/heads/YARN-3368
Commit: de7a0a92ca1983b35ca4beb7ab712fd700a9e6e0
Parents: 7442084
Author: Karthik Kambatla 
Authored: Mon Oct 3 10:30:22 2016 -0700
Committer: Karthik Kambatla 
Committed: Mon Oct 3 10:30:22 2016 -0700

--
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 90 ++--
 .../hadoop/mapreduce/v2/app/TestRecovery.java   | 66 ++
 2 files changed, 129 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7a0a92/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index d94f8a5..4a8a90e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -149,7 +149,6 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
 import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.log4j.LogManager;
 
@@ -1303,44 +1302,77 @@ public class MRAppMaster extends CompositeService {
   }
 
   private void processRecovery() throws IOException{
-if (appAttemptID.getAttemptId() == 1) {
-  return;  // no need to recover on the first attempt
+boolean attemptRecovery = shouldAttemptRecovery();
+boolean recoverySucceeded = true;
+if (attemptRecovery) {
+  LOG.info("Attempting to recover.");
+  try {
+parsePreviousJobHistory();
+  } catch (IOException e) {
+LOG.warn("Unable to parse prior job history, aborting recovery", e);
+recoverySucceeded = false;
+  }
+}
+
+if (!isFirstAttempt() && (!attemptRecovery || !recoverySucceeded)) {
+  amInfos.addAll(readJustAMInfos());
+}
+  }
+
+  private boolean isFirstAttempt() {
+return appAttemptID.getAttemptId() == 1;
+  }
+
+  /**
+   * Check if the current job attempt should try to recover from previous
+   * job attempts if any.
+   */
+  private boolean shouldAttemptRecovery() throws IOException {
+if (isFirstAttempt()) {
+  return false;  // no need to recover on the first attempt
 }
 
 boolean recoveryEnabled = getConfig().getBoolean(
 MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
 MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);
+if (!recoveryEnabled) {
+  LOG.info("Not attempting to recover. Recovery disabled. To enable " +
+  "recovery, set " + MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE);
+  return false;
+}
 
 boolean recoverySupportedByCommitter = isRecoverySupported();
+if (!recoverySupportedByCommitter) {
+  LOG.info("Not attempting to recover. Recovery is not supported by " +
+  committer.getClass() + ". Use an OutputCommitter that supports" +
+  " recovery.");
+  return false;
+}
 
-// If a shuffle secret was not provided by the job client then this app
-// attempt will generate one.  However that disables recovery if there
-// are reducers as the shuffle secret would be app attempt specific.
-int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
+int reducerCount = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
+
+// If a shuffle secret was not provided by the job client, one will be
+// generated in this job attempt. However, that disables recovery if
+// there are reducers as the shuffle secret would be job attempt specific.
 boolean shuffleKeyValidForRecovery =
 TokenCache.getShuffleSecretKey(jobCredentials) != null;
+if (reducerCount > 0 && !shuffleKeyValidForRecovery) {
+  LOG.info("Not attempting to recover. The shuffle key is invalid for " +
+  "recovery.");
+  return false;
+  

[26/50] [abbrv] hadoop git commit: YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. (Sunil G via wangda)

2016-10-04 Thread sunilg
YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9481cb9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9481cb9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9481cb9c

Branch: refs/heads/YARN-3368
Commit: 9481cb9cdfa220f9cc3948078ece6c45c0d5b6bc
Parents: 506416a
Author: Wangda Tan 
Authored: Mon May 9 11:29:59 2016 -0700
Committer: sunilg 
Committed: Tue Oct 4 20:48:08 2016 +0530

--
 .../main/webapp/app/components/tree-selector.js |  4 +--
 .../main/webapp/app/controllers/application.js  | 16 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js| 12 
 .../main/webapp/app/models/yarn-app-attempt.js  |  2 +-
 .../src/main/webapp/app/router.js   | 32 ++--
 .../src/main/webapp/app/routes/index.js |  2 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 ++--
 .../src/main/webapp/app/routes/yarn-app.js  |  4 +--
 .../src/main/webapp/app/routes/yarn-apps.js |  2 +-
 .../webapp/app/routes/yarn-container-log.js |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../main/webapp/app/routes/yarn-node-apps.js|  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../webapp/app/routes/yarn-node-containers.js   |  2 +-
 .../src/main/webapp/app/routes/yarn-node.js |  4 +--
 .../src/main/webapp/app/routes/yarn-nodes.js|  2 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  6 ++--
 .../main/webapp/app/routes/yarn-queues/index.js |  2 +-
 .../app/routes/yarn-queues/queues-selector.js   |  2 +-
 .../app/templates/components/app-table.hbs  |  4 +--
 .../webapp/app/templates/yarn-container-log.hbs |  2 +-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +--
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 28 files changed, 66 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9481cb9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index f7ec020..698c253 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -126,7 +126,7 @@ export default Ember.Component.extend({
   .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
   .on("click", function(d,i){
 if (d.queueData.get("name") != this.get("selected")) {
-document.location.href = "yarnQueue/" + d.queueData.get("name");
+document.location.href = "yarn-queue/" + d.queueData.get("name");
 }
   }.bind(this));
   // .on("click", click);
@@ -176,7 +176,7 @@ export default Ember.Component.extend({
   .attr("r", 20)
   .attr("href", 
 function(d) {
-  return "yarnQueues/" + d.queueData.get("name");
+  return "yarn-queues/" + d.queueData.get("name");
 })
   .style("stroke", function(d) {
 if (d.queueData.get("name") == this.get("selected")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9481cb9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 3c68365..2effb13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -29,25 +29,25 @@ export default Ember.Controller.extend({
   outputMainMenu: function(){
 var path = this.get('currentPath');
 var html = 'Queues' +
+html = html + '>Queues' +
 '(current)Applications' +
+html = html + '>Applications' +
 '(current)Cluster OverviewCluster Overview(current)Nodes' +
+html = html + '>Nodes' +
 '(current)';
 return Ember.Strin

[05/50] [abbrv] hadoop git commit: YARN-5678. Log demand as demand in FSLeafQueue and FSParentQueue. (Yufei Gu via kasha)

2016-10-04 Thread sunilg
YARN-5678. Log demand as demand in FSLeafQueue and FSParentQueue. (Yufei Gu via 
kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82857037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82857037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82857037

Branch: refs/heads/YARN-3368
Commit: 82857037b6e960dccdaf9e6b1b238411498a0dfe
Parents: fe9ebe2
Author: Karthik Kambatla 
Authored: Sun Oct 2 22:09:43 2016 -0700
Committer: Karthik Kambatla 
Committed: Sun Oct 2 22:09:43 2016 -0700

--
 .../server/resourcemanager/scheduler/fair/FSLeafQueue.java | 2 +-
 .../server/resourcemanager/scheduler/fair/FSParentQueue.java   | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82857037/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index a6adb47..9d5bbe5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -295,7 +295,7 @@ public class FSLeafQueue extends FSQueue {
 Resource toAdd = sched.getDemand();
 if (LOG.isDebugEnabled()) {
   LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
-  + "; Total resource consumption for " + getName() + " now "
+  + "; Total resource demand for " + getName() + " now "
   + demand);
 }
 demand = Resources.add(demand, toAdd);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82857037/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index e58c3f1..d05390b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -158,13 +158,13 @@ public class FSParentQueue extends FSQueue {
   for (FSQueue childQueue : childQueues) {
 childQueue.updateDemand();
 Resource toAdd = childQueue.getDemand();
+demand = Resources.add(demand, toAdd);
+demand = Resources.componentwiseMin(demand, maxShare);
 if (LOG.isDebugEnabled()) {
   LOG.debug("Counting resource from " + childQueue.getName() + " " +
-  toAdd + "; Total resource consumption for " + getName() +
+  toAdd + "; Total resource demand for " + getName() +
   " now " + demand);
 }
-demand = Resources.add(demand, toAdd);
-demand = Resources.componentwiseMin(demand, maxShare);
 if (Resources.equals(demand, maxShare)) {
   break;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDFS-10934. TestDFSShell#testStat fails intermittently. Contributed by Eric Badger

2016-10-04 Thread sunilg
HDFS-10934. TestDFSShell#testStat fails intermittently. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61e3d13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61e3d13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61e3d13

Branch: refs/heads/YARN-3368
Commit: f61e3d13e9610cbd09886359553f27d6480f6735
Parents: 8078a5e
Author: Mingliang Liu 
Authored: Mon Oct 3 22:34:41 2016 -0400
Committer: Mingliang Liu 
Committed: Mon Oct 3 22:34:41 2016 -0400

--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61e3d13/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index fc90db5..558bcda 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2127,11 +2127,11 @@ public class TestDFSShell {
   fmt.setTimeZone(TimeZone.getTimeZone("UTC"));
   final Path testDir1 = new Path("testStat", "dir1");
   dfs.mkdirs(testDir1);
-  final FileStatus status1 = dfs.getFileStatus(testDir1);
-  final String mtime1 = fmt.format(new 
Date(status1.getModificationTime()));
   final Path testFile2 = new Path(testDir1, "file2");
   DFSTestUtil.createFile(dfs, testFile2, 2 * blockSize, (short) 3, 0);
-  final FileStatus status2 = dfs.getFileStatus(testDir1);
+  final FileStatus status1 = dfs.getFileStatus(testDir1);
+  final String mtime1 = fmt.format(new 
Date(status1.getModificationTime()));
+  final FileStatus status2 = dfs.getFileStatus(testFile2);
   final String mtime2 = fmt.format(new 
Date(status2.getModificationTime()));
 
   final ByteArrayOutputStream out = new ByteArrayOutputStream();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by Jagadesh Kiran N

2016-10-04 Thread sunilg
HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by 
Jagadesh Kiran N


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ea0210c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ea0210c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ea0210c

Branch: refs/heads/YARN-3368
Commit: 5ea0210c7fea1324973e7f69256bc53f31ee4cf7
Parents: f61e3d1
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:33:22 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:33:22 2016 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ea0210c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index e6e0fbb..32401dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -716,7 +716,7 @@ public class DFSAdmin extends FsShell {
   }
   
   /**
-   * Allow snapshot on a directory.
+   * Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.

2016-10-04 Thread sunilg
HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90020624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90020624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90020624

Branch: refs/heads/YARN-3368
Commit: 90020624b05230ad4a7fbd666d0177ecb107a4d6
Parents: 0da54e8
Author: Kihwal Lee 
Authored: Mon Oct 3 09:13:04 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Oct 3 09:13:04 2016 -0500

--
 .../org/apache/hadoop/hdfs/server/namenode/INodesInPath.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90020624/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 04d3bda..f05fa37 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -278,6 +278,8 @@ public class INodesInPath {
   }
 
   private final byte[][] path;
+  private final String pathname;
+
   /**
* Array with the specified number of INodes resolved for a given path.
*/
@@ -306,6 +308,7 @@ public class INodesInPath {
 Preconditions.checkArgument(inodes != null && path != null);
 this.inodes = inodes;
 this.path = path;
+this.pathname = DFSUtil.byteArray2PathString(path);
 this.isRaw = isRaw;
 this.isSnapshot = isSnapshot;
 this.snapshotId = snapshotId;
@@ -366,7 +369,7 @@ public class INodesInPath {
 
   /** @return the full path in string form */
   public String getPath() {
-return DFSUtil.byteArray2PathString(path);
+return pathname;
   }
 
   public String getParentPath() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HDFS-10923. Make InstrumentedLock require ReentrantLock.

2016-10-04 Thread sunilg
HDFS-10923. Make InstrumentedLock require ReentrantLock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ce6fdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ce6fdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ce6fdc

Branch: refs/heads/YARN-3368
Commit: c7ce6fdc20fe053f0bb3bcf900ffc0e1db6feee5
Parents: 3a3697d
Author: Arpit Agarwal 
Authored: Fri Sep 30 23:00:06 2016 -0700
Committer: Arpit Agarwal 
Committed: Fri Sep 30 23:00:06 2016 -0700

--
 .../apache/hadoop/hdfs/InstrumentedLock.java| 185 --
 .../hadoop/hdfs/InstrumentedReentrantLock.java  | 195 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hadoop/hdfs/TestInstrumentedLock.java   | 166 
 .../hdfs/TestInstrumentedReentrantLock.java | 177 +
 5 files changed, 374 insertions(+), 353 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
deleted file mode 100644
index 6279e95..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This is a debugging class that can be used by callers to track
- * whether a specifc lock is being held for too long and periodically
- * log a warning and stack trace, if so.
- *
- * The logged warnings are throttled so that logs are not spammed.
- *
- * A new instance of InstrumentedLock can be created for each object
- * that needs to be instrumented.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class InstrumentedLock implements Lock {
-
-  private final Lock lock;
-  private final Log logger;
-  private final String name;
-  private final Timer clock;
-
-  /** Minimum gap between two lock warnings. */
-  private final long minLoggingGap;
-  /** Threshold for detecting long lock held time. */
-  private final long lockWarningThreshold;
-
-  // Tracking counters for lock statistics.
-  private volatile long lockAcquireTimestamp;
-  private final AtomicLong lastLogTimestamp;
-  private final AtomicLong warningsSuppressed = new AtomicLong(0);
-
-  /**
-   * Create a instrumented lock instance which logs a warning message
-   * when lock held time is above given threshold.
-   *
-   * @param name the identifier of the lock object
-   * @param logger this class does not have its own logger, will log to the
-   *   given logger instead
-   * @param minLoggingGapMs  the minimum time gap between two log messages,
-   * this is to avoid spamming to many logs
-   * @param lockWarningThresholdMs the time threshold to view lock held
-   *   time as being "too long"
-   */
-  public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
-  long lockWarningThresholdMs) {
-this(name, logger, new ReentrantLock(),
-minLoggingGapMs, lockWarningThresholdMs);
-  }
-
-  public InstrumentedLock(String name, Log logger, Lock lock,
-  long minLoggingGapMs, long lockWarningTh

[07/50] [abbrv] hadoop git commit: YARN-5672. FairScheduler: Wrong queue name in log when adding application. (Wilfred Spiegelenburg via kasha)

2016-10-04 Thread sunilg
YARN-5672. FairScheduler: Wrong queue name in log when adding application. 
(Wilfred Spiegelenburg via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da54e88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da54e88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da54e88

Branch: refs/heads/YARN-3368
Commit: 0da54e8848764c71a31473516d23ada582013f8c
Parents: 6e130c3
Author: Karthik Kambatla 
Authored: Mon Oct 3 06:03:46 2016 -0700
Committer: Karthik Kambatla 
Committed: Mon Oct 3 06:03:46 2016 -0700

--
 .../server/resourcemanager/scheduler/fair/FairScheduler.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da54e88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 310f2f9..920052f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -668,11 +668,12 @@ public class FairScheduler extends
 queue.getMetrics().submitApp(user);
 
 LOG.info("Accepted application " + applicationId + " from user: " + user
-+ ", in queue: " + queueName + ", currently num of applications: "
-+ applications.size());
++ ", in queue: " + queue.getName()
++ ", currently num of applications: " + applications.size());
 if (isAppRecovering) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(applicationId + " is recovering. Skip notifying 
APP_ACCEPTED");
+LOG.debug(applicationId
++ " is recovering. Skip notifying APP_ACCEPTED");
   }
 } else {
   rmContext.getDispatcher().getEventHandler()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed by Surendra Singh Lilhore

2016-10-04 Thread brahma
HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed 
by Surendra Singh Lilhore

(cherry picked from commit ef7f06f7d1561db13bd3b07a5f62815ec29e1cdf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/560e524f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/560e524f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/560e524f

Branch: refs/heads/branch-2
Commit: 560e524f93ff5ad4cbefc583dc04974e08bbf6d6
Parents: c9df3c5
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:46:42 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:51:40 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/560e524f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index ddfa461..fdb8d2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -172,7 +172,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 *   HTTP POST
 * [`APPEND`](#Append_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
 * [`CONCAT`](#Concat_Files) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
-* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
+* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate)
 *   HTTP DELETE
 * [`DELETE`](#Delete_a_FileDirectory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete)
 * [`DELETESNAPSHOT`](#Delete_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed by Surendra Singh Lilhore

2016-10-04 Thread brahma
HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed 
by Surendra Singh Lilhore

(cherry picked from commit ef7f06f7d1561db13bd3b07a5f62815ec29e1cdf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/253e2442
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/253e2442
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/253e2442

Branch: refs/heads/branch-2.8
Commit: 253e2442e4164ae603f9b49b8750326c762d7a34
Parents: 1ae35b6
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:46:42 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:53:12 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/253e2442/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 1257cf8..c8b23fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -169,7 +169,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 *   HTTP POST
 * [`APPEND`](#Append_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
 * [`CONCAT`](#Concat_Files) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
-* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
+* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate)
 *   HTTP DELETE
 * [`DELETE`](#Delete_a_FileDirectory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete)
 * [`DELETESNAPSHOT`](#Delete_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed by Surendra Singh Lilhore

2016-10-04 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c9df3c5f2 -> 560e524f9
  refs/heads/branch-2.8 1ae35b6ff -> 253e2442e
  refs/heads/trunk 5ea0210c7 -> ef7f06f7d


HDFS-10947. Correct the API name for truncate in webhdfs document. Contributed 
by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef7f06f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef7f06f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef7f06f7

Branch: refs/heads/trunk
Commit: ef7f06f7d1561db13bd3b07a5f62815ec29e1cdf
Parents: 5ea0210
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:46:42 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:46:42 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef7f06f7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 546f99e..f904bda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -172,7 +172,7 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 *   HTTP POST
 * [`APPEND`](#Append_to_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
 * [`CONCAT`](#Concat_Files) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
-* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
+* [`TRUNCATE`](#Truncate_a_File) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate)
 *   HTTP DELETE
 * [`DELETE`](#Delete_a_FileDirectory) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete)
 * [`DELETESNAPSHOT`](#Delete_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by Jagadesh Kiran N

2016-10-04 Thread brahma
HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by 
Jagadesh Kiran N

(cherry picked from commit 5ea0210c7fea1324973e7f69256bc53f31ee4cf7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9df3c5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9df3c5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9df3c5f

Branch: refs/heads/branch-2
Commit: c9df3c5f217b0607156e37fc098dc9a07b824af6
Parents: 612aa0c
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:33:22 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:37:43 2016 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9df3c5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 4b624a5..bd3ed15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -716,7 +716,7 @@ public class DFSAdmin extends FsShell {
   }
   
   /**
-   * Allow snapshot on a directory.
+   * Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by Jagadesh Kiran N

2016-10-04 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 612aa0cc0 -> c9df3c5f2
  refs/heads/branch-2.8 438402a71 -> 1ae35b6ff
  refs/heads/trunk f61e3d13e -> 5ea0210c7


HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by 
Jagadesh Kiran N


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ea0210c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ea0210c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ea0210c

Branch: refs/heads/trunk
Commit: 5ea0210c7fea1324973e7f69256bc53f31ee4cf7
Parents: f61e3d1
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:33:22 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:33:22 2016 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ea0210c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index e6e0fbb..32401dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -716,7 +716,7 @@ public class DFSAdmin extends FsShell {
   }
   
   /**
-   * Allow snapshot on a directory.
+   * Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by Jagadesh Kiran N

2016-10-04 Thread brahma
HDFS-10944. Correct the javadoc of dfsadmin#disallowSnapshot. Contributed by 
Jagadesh Kiran N

(cherry picked from commit 5ea0210c7fea1324973e7f69256bc53f31ee4cf7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ae35b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ae35b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ae35b6f

Branch: refs/heads/branch-2.8
Commit: 1ae35b6fff78c66c8dd25a71b7788e65d78f
Parents: 438402a
Author: Brahma Reddy Battula 
Authored: Tue Oct 4 18:33:22 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Oct 4 18:39:31 2016 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ae35b6f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 6256092..af28ec2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -713,7 +713,7 @@ public class DFSAdmin extends FsShell {
   }
   
   /**
-   * Allow snapshot on a directory.
+   * Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org