[44/50] [abbrv] hadoop git commit: MAPREDUCE-6370. Made the timeline service v2 test driver write event ID. Contributed by Li Lu.

2015-08-25 Thread sjlee
MAPREDUCE-6370. Made the timeline service v2 test driver write event ID. 
Contributed by Li Lu.

(cherry picked from commit 827633ee9fee26e3e15343cbccb0b7905ae02170)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40d9d469
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40d9d469
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40d9d469

Branch: refs/heads/YARN-2928
Commit: 40d9d4690568476fb43c006983a91304434aa066
Parents: 09a8b7b
Author: Zhijie Shen zjs...@apache.org
Authored: Fri May 22 00:00:05 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:47:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java| 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d9d469/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9c66a5e..e6348b9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -21,6 +21,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   BUG FIXES
 
+MAPREDUCE-6370. Made the timeline service v2 test driver write event ID.
+(Li Lu via zjshen)
+
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40d9d469/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
index 4ef0a14..625c32a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SimpleEntityWriter.java
@@ -97,6 +97,7 @@ class SimpleEntityWriter extends EntityWriter {
 entity.addInfo(PERF_TEST, payLoad);
 // add an event
 TimelineEvent event = new TimelineEvent();
+event.setId(foo_event_id);
 event.setTimestamp(System.currentTimeMillis());
 event.addInfo(foo_event, test);
 entity.addEvent(event);



[14/50] [abbrv] hadoop git commit: MAPREDUCE-6335. Created MR job based performance test driver for the timeline service v2. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
MAPREDUCE-6335. Created MR job based performance test driver for the timeline 
service v2. Contributed by Sangjin Lee.

(cherry picked from commit b689f5d43d3f5434a30fe52f1a7e12e1fc5c71f4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f0b1cae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f0b1cae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f0b1cae

Branch: refs/heads/YARN-2928
Commit: 8f0b1cae5e1fd467efa6cc773ff744b27f05a2b3
Parents: 8e58f94
Author: Zhijie Shen zjs...@apache.org
Authored: Tue Apr 28 19:46:01 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:47:10 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../mapred/TimelineServicePerformanceV2.java| 298 +++
 .../apache/hadoop/test/MapredTestDriver.java|   3 +
 3 files changed, 304 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f0b1cae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5ac0d3b..2805780 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -9,6 +9,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history
 events and counters. (Junping Du via zjshen)
 
+MAPREDUCE-6335. Created MR job based performance test driver for the
+timeline service v2. (Sangjin Lee via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f0b1cae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
new file mode 100644
index 000..de46617
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.SleepJob.SleepInputFormat;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.AppLevelTimelineCollector;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+
+public class TimelineServicePerformanceV2 extends Configured 

[18/50] [abbrv] hadoop git commit: YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. Contributed by Sangjin Lee.

2015-08-25 Thread sjlee
YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. 
Contributed by Sangjin Lee.

(cherry picked from commit b059dd4882fd759e4762cc11c019be4b68fb74c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e93fa602
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e93fa602
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e93fa602

Branch: refs/heads/YARN-2928
Commit: e93fa6023bf6c154aa4817a45020d385fefed22a
Parents: 7a68cda
Author: Junping Du junping...@apache.org
Authored: Wed May 13 11:54:24 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:47:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../collectormanager/NMCollectorService.java|  5 +++
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../application/TestApplication.java|  3 +-
 .../collector/NodeTimelineCollectorManager.java | 46 ++--
 5 files changed, 34 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b06502..ec9abc9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -70,6 +70,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3529. Added mini HBase cluster and Phoenix support to timeline service
 v2 unit tests. (Li Lu via zjshen)
 
+YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
+Sangjin Lee via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index dc5601f..db79ee5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -81,6 +81,11 @@ public class NMCollectorService extends CompositeService 
implements
 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
 server.start();
+collectorServerAddress = conf.updateConnectAddr(
+YarnConfiguration.NM_BIND_HOST,
+YarnConfiguration.NM_COLLECTOR_SERVICE_ADDRESS,
+YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_ADDRESS,
+server.getListenerAddress());
 // start remaining services
 super.serviceStart();
 LOG.info(NMCollectorService started at  + collectorServerAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93fa602/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 4dd9fa6..aa9b102 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -878,7 +878,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
 TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
 long flowRunId = 0L;
 if (flowRunIdStr != null  !flowRunIdStr.isEmpty()) {
-  flowRunId = Long.valueOf(flowRunIdStr);
+  flowRunId = Long.parseLong(flowRunIdStr);
   

[09/50] [abbrv] hadoop git commit: YARN-3374. Collector's web server should randomly bind an available port. Contributed by Zhijie Shen

2015-08-25 Thread sjlee
YARN-3374. Collector's web server should randomly bind an available port. 
Contributed by Zhijie Shen

(cherry picked from commit 3aa898e734a1e4368ddf1d0bbd31f9b4de53ceba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9c81ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9c81ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9c81ac5

Branch: refs/heads/YARN-2928
Commit: a9c81ac5729e3e2cb39210d4679a8da9ca5c4e4e
Parents: e8b5ab6
Author: Junping Du junping...@apache.org
Authored: Thu Apr 2 11:59:59 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:38:44 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../hadoop/yarn/conf/YarnConfiguration.java |  1 +
 .../collector/TimelineCollectorManager.java | 20 ++--
 .../collector/TestTimelineCollectorManager.java | 12 
 4 files changed, 26 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ed5dc88..76fa0a8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -44,6 +44,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3040. Make putEntities operation be aware of the app's context. 
(Zhijie Shen 
 via junping_du)
 
+YARN-3374. Collector's web server should randomly bind an available port. (
+Zhijie Shen via junping_du)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 51f2b2d..1ba7f36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1515,6 +1515,7 @@ public class YarnConfiguration extends Configuration {
   /** The listening endpoint for the timeline service application.*/
   public static final String TIMELINE_SERVICE_BIND_HOST =
   TIMELINE_SERVICE_PREFIX + bind-host;
+  public static final String DEFAULT_TIMELINE_SERVICE_BIND_HOST = 0.0.0.0;
 
   /** The number of threads to handle client RPC API requests. */
   public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9c81ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 909027e..5f23c25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -210,22 +210,17 @@ public class TimelineCollectorManager extends 
CompositeService {
*/
   private void startWebApp() {
 Configuration conf = getConfig();
-// use the same ports as the old ATS for now; we could create new 
properties
-// for the new timeline service if needed
-String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
-this.timelineRestServerBindAddress = WebAppUtils.getResolvedAddress(
-NetUtils.createSocketAddr(bindAddress));
-LOG.info(Instantiating the per-node collector webapp at  +
-timelineRestServerBindAddress);
+String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+

[08/50] [abbrv] hadoop git commit: YARN-3333. Rename TimelineAggregator etc. to TimelineCollector. Contributed by Sangjin Lee

2015-08-25 Thread sjlee
YARN-. Rename TimelineAggregator etc. to TimelineCollector. Contributed by 
Sangjin Lee

(cherry picked from commit dda84085cabd8fdf143b380e54e1730802fd9912)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63c7210c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63c7210c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63c7210c

Branch: refs/heads/YARN-2928
Commit: 63c7210c248be9a8e65b249b0593f1ace6003db5
Parents: 32acd9b
Author: Junping Du junping...@apache.org
Authored: Thu Mar 19 11:49:07 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:38:43 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   5 +-
 .../hadoop-yarn/hadoop-yarn-api/pom.xml |   4 +
 .../api/protocolrecords/AllocateResponse.java   |  20 +-
 .../timelineservice/TimelineWriteResponse.java  |  20 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  20 +-
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../pom.xml |  10 +
 .../distributedshell/ApplicationMaster.java |  54 ++--
 .../applications/distributedshell/Client.java   |   8 +-
 .../distributedshell/TestDistributedShell.java  |  10 +-
 .../hadoop/yarn/client/api/AMRMClient.java  |   6 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |   4 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |  20 +-
 .../impl/pb/AllocateResponsePBImpl.java |  16 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |   2 +-
 .../client/api/impl/TimelineClientImpl.java |  32 +--
 .../src/main/resources/yarn-default.xml |  14 +-
 .../hadoop/yarn/TestContainerLaunchRPC.java |   2 +-
 .../hadoop/yarn/api/TestAllocateResponse.java   |  12 +-
 .../hadoop-yarn-server-common/pom.xml   |   2 +-
 .../api/AggregatorNodemanagerProtocol.java  |  56 
 .../api/AggregatorNodemanagerProtocolPB.java|  33 ---
 .../api/CollectorNodemanagerProtocol.java   |  57 
 .../api/CollectorNodemanagerProtocolPB.java |  33 +++
 ...gregatorNodemanagerProtocolPBClientImpl.java |  94 ---
 ...ollectorNodemanagerProtocolPBClientImpl.java |  94 +++
 ...regatorNodemanagerProtocolPBServiceImpl.java |  61 
 ...llectorNodemanagerProtocolPBServiceImpl.java |  59 
 .../protocolrecords/NodeHeartbeatRequest.java   |  13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   8 +-
 .../ReportNewAggregatorsInfoRequest.java|  53 
 .../ReportNewAggregatorsInfoResponse.java   |  32 ---
 .../ReportNewCollectorInfoRequest.java  |  53 
 .../ReportNewCollectorInfoResponse.java |  32 +++
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  58 ++--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  60 ++--
 .../ReportNewAggregatorsInfoRequestPBImpl.java  | 142 --
 .../ReportNewAggregatorsInfoResponsePBImpl.java |  74 -
 .../pb/ReportNewCollectorInfoRequestPBImpl.java | 142 ++
 .../ReportNewCollectorInfoResponsePBImpl.java   |  74 +
 .../server/api/records/AppAggregatorsMap.java   |  33 ---
 .../server/api/records/AppCollectorsMap.java|  46 +++
 .../impl/pb/AppAggregatorsMapPBImpl.java| 151 --
 .../records/impl/pb/AppCollectorsMapPBImpl.java | 151 ++
 .../proto/aggregatornodemanager_protocol.proto  |  29 --
 .../proto/collectornodemanager_protocol.proto   |  29 ++
 .../yarn_server_common_service_protos.proto |  18 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java| 116 
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  24 +-
 .../hadoop/yarn/server/nodemanager/Context.java |  14 +-
 .../yarn/server/nodemanager/NodeManager.java|  56 ++--
 .../nodemanager/NodeStatusUpdaterImpl.java  |  11 +-
 .../aggregatormanager/NMAggregatorService.java  | 113 
 .../collectormanager/NMCollectorService.java| 110 
 .../application/ApplicationImpl.java|   9 +-
 .../ApplicationMasterService.java   |  12 +-
 .../resourcemanager/ResourceTrackerService.java |  72 ++---
 .../server/resourcemanager/rmapp/RMApp.java |  22 +-
 .../rmapp/RMAppAggregatorUpdateEvent.java   |  36 ---
 .../rmapp/RMAppCollectorUpdateEvent.java|  37 +++
 .../resourcemanager/rmapp/RMAppEventType.java   |   4 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  60 ++--
 .../applicationsmanager/MockAsm.java|   6 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |   8 +-
 .../hadoop-yarn-server-tests/pom.xml|   5 +
 .../TestTimelineServiceClientIntegration.java   |  52 +++-
 .../hadoop-yarn-server-timelineservice/pom.xml  |  10 +
 .../aggregator/AppLevelTimelineAggregator.java  |  57 
 .../PerNodeTimelineAggregatorsAuxService.java   | 211 --
 .../aggregator/TimelineAggregator.java  | 122 
 .../TimelineAggregatorWebService.java   | 180 

[33/50] [abbrv] hadoop git commit: YARN-3836. add equals and hashCode to TimelineEntity and other classes in the data model (Li Lu via sjlee)

2015-08-25 Thread sjlee
YARN-3836. add equals and hashCode to TimelineEntity and other classes in the 
data model (Li Lu via sjlee)

(cherry picked from commit 2d4a8f4563c06339717ca9410b2794754603fba3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cced5944
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cced5944
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cced5944

Branch: refs/heads/YARN-2928
Commit: cced59444b880ed60bf5f288ef692ca6fca198ad
Parents: e281987
Author: Sangjin Lee sj...@apache.org
Authored: Thu Jul 9 20:50:48 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:47:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../records/timelineservice/TimelineEntity.java | 89 +++-
 .../records/timelineservice/TimelineEvent.java  | 41 -
 .../records/timelineservice/TimelineMetric.java | 30 +++
 .../TestTimelineServiceRecords.java | 36 +++-
 5 files changed, 195 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cced5944/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d77ad59..6e10926 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -87,6 +87,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3706. Generalize native HBase writer for additional tables (Joep
 Rottinghuis via sjlee)
 
+YARN-3836. add equals and hashCode to TimelineEntity and other classes in
+the data model (Li Lu via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cced5944/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 60fba85..9ef2d90 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -31,11 +31,25 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+/**
+ * The basic timeline entity data structure for timeline service v2. Timeline
+ * entity objects are not thread safe and should not be accessed concurrently.
+ * All collection members will be initialized into empty collections. Two
+ * timeline entities are equal iff. their type and id are identical.
+ *
+ * All non-primitive type, non-collection members will be initialized into 
null.
+ * User should set the type and id of a timeline entity to make it valid (can 
be
+ * checked by using the {@link #isValid()} method). Callers to the getters
+ * should perform null checks for non-primitive type, non-collection members.
+ *
+ * Callers are recommended not to alter the returned collection objects from 
the
+ * getters.
+ */
 @XmlRootElement(name = entity)
 @XmlAccessorType(XmlAccessType.NONE)
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
-public class TimelineEntity {
+public class TimelineEntity implements ComparableTimelineEntity {
   protected final static String SYSTEM_INFO_KEY_PREFIX = SYSTEM_INFO_;
 
   @XmlRootElement(name = identifier)
@@ -77,6 +91,41 @@ public class TimelineEntity {
   type=' + type + '\'' +
   , id=' + id + '\'' + ];
 }
+
+@Override
+public int hashCode() {
+  final int prime = 31;
+  int result = 1;
+  result = prime * result + ((id == null) ? 0 : id.hashCode());
+  result =
+prime * result + ((type == null) ? 0 : type.hashCode());
+  return result;
+}
+
+@Override
+public boolean equals(Object obj) {
+  if (this == obj)
+return true;
+  if (!(obj instanceof Identifier)) {
+return false;
+  }
+  Identifier other = (Identifier) obj;
+  if (id == null) {
+if (other.getId() != null) {
+  return false;
+}
+  } else if (!id.equals(other.getId())) {
+return false;
+  }
+  if (type == null) {
+if (other.getType() != null) {
+  return false;
+}
+  } else if (!type.equals(other.getType())) {
+return false;
+  }
+  return true;
+}
   }
 
   private TimelineEntity real;
@@ -471,6 +520,44

[46/50] [abbrv] hadoop git commit: YARN-3045. Implement NM writing container lifecycle events to Timeline Service v2. Contributed by Naganarasimha G R.

2015-08-25 Thread sjlee
YARN-3045. Implement NM writing container lifecycle events to Timeline Service 
v2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/702a2142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/702a2142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/702a2142

Branch: refs/heads/YARN-2928
Commit: 702a2142b15c45f12aa79c28c5148984e0452ed3
Parents: 97f211b
Author: Junping Du junping...@apache.org
Authored: Tue Aug 18 04:31:45 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Tue Aug 25 10:52:44 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|  16 +-
 .../distributedshell/TestDistributedShell.java  |  28 +-
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  13 +
 .../containermanager/ContainerManagerImpl.java  |  48 ++-
 .../ApplicationContainerFinishedEvent.java  |  17 +-
 .../containermanager/container/Container.java   |   3 +
 .../container/ContainerImpl.java|  29 +-
 .../monitor/ContainersMonitorImpl.java  | 108 +-
 .../timelineservice/NMTimelineEvent.java|  31 ++
 .../timelineservice/NMTimelineEventType.java|  24 ++
 .../timelineservice/NMTimelinePublisher.java| 376 +++
 .../nodemanager/TestNodeStatusUpdater.java  |  24 +-
 .../containermanager/TestAuxServices.java   |   4 +-
 .../TestContainerManagerRecovery.java   |   8 +
 .../application/TestApplication.java|   6 +-
 .../container/TestContainer.java|   2 +-
 .../nodemanager/webapp/MockContainer.java   |   6 +
 .../nodemanager/webapp/TestNMWebServer.java |   3 +-
 .../PerNodeTimelineCollectorsAuxService.java|  16 +-
 ...TestPerNodeTimelineCollectorsAuxService.java |   9 +
 22 files changed, 636 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3e1f212..492a098 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -91,6 +91,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3904. Refactor timelineservice.storage to add support to online and
 offline aggregation writers (Li Lu via sjlee)
 
+YARN-3045. Implement NM writing container lifecycle events to Timeline
+Service v2. (Naganarasimha G R via junping_du)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 691170e..62e60a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -110,6 +110,16 @@
 Bug pattern=BC_UNCONFIRMED_CAST /
   /Match
 
+  !-- Object cast is based on the event type --
+  Match
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.metrics.AbstractTimelineServicePublisher
 /
+ Bug pattern=BC_UNCONFIRMED_CAST /
+  /Match
+
+  Match
+Class 
name=org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher$ApplicationEventHandler
 /
+ Bug pattern=BC_UNCONFIRMED_CAST /
+  /Match
 
   !-- Ignore intentional switch fallthroughs --
   Match
@@ -505,10 +515,4 @@
 /Or
 Bug pattern=URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD /
   /Match
-
-  !-- Object cast is based on the event type --
-  Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.metrics.AbstractTimelineServicePublisher
 /
- Bug pattern=BC_UNCONFIRMED_CAST /
-  /Match
 /FindBugsFilter

http://git-wip-us.apache.org/repos/asf/hadoop/blob/702a2142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell

Git Push Summary

2015-08-25 Thread sjlee
Repository: hadoop
Updated Tags:  refs/tags/feature_YARN-2928_2015-08-24 [created] a73d4b204


hadoop git commit: YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via sjlee)

2015-08-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 9d1494733 - 3c36922d7


YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via 
sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c36922d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c36922d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c36922d

Branch: refs/heads/YARN-2928
Commit: 3c36922d70987b7459c36bf4a61db768dade170f
Parents: 9d14947
Author: Sangjin Lee sj...@apache.org
Authored: Mon Aug 24 17:36:31 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon Aug 24 17:36:31 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../containermanager/ContainerManagerImpl.java  | 33 +++-
 .../metrics/TimelineServiceV2Publisher.java |  2 --
 .../TestSystemMetricsPublisherForV2.java|  8 -
 4 files changed, 28 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36922d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a3c89e3..a974fff 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)
 
+YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R
+via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36922d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 2ea2ec1..50f2dfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -896,21 +896,24 @@ public class ContainerManagerImpl extends 
CompositeService implements
 if (flowRunIdStr != null  !flowRunIdStr.isEmpty()) {
   flowRunId = Long.parseLong(flowRunIdStr);
 }
-Application application = new ApplicationImpl(dispatcher, user,
-flowName, flowVersion, flowRunId, applicationID, credentials, 
context);
-if (null == context.getApplications().putIfAbsent(applicationID,
-  application)) {
-  LOG.info(Creating a new application reference for app  + 
applicationID);
-  LogAggregationContext logAggregationContext =
-  containerTokenIdentifier.getLogAggregationContext();
-  MapApplicationAccessType, String appAcls =
-  container.getLaunchContext().getApplicationACLs();
-  context.getNMStateStore().storeApplication(applicationID,
-  buildAppProto(applicationID, user, credentials, appAcls,
-logAggregationContext));
-  dispatcher.getEventHandler().handle(
-new ApplicationInitEvent(applicationID, appAcls,
-  logAggregationContext));
+if (!context.getApplications().containsKey(applicationID)) {
+  Application application =
+  new ApplicationImpl(dispatcher, user, flowName, flowVersion,
+  flowRunId, applicationID, credentials, context);
+  if (context.getApplications().putIfAbsent(applicationID,
+  application) == null) {
+LOG.info(Creating a new application reference for app 
++ applicationID);
+LogAggregationContext logAggregationContext =
+containerTokenIdentifier.getLogAggregationContext();
+MapApplicationAccessType, String appAcls =
+container.getLaunchContext().getApplicationACLs();
+context.getNMStateStore().storeApplication(applicationID,
+buildAppProto(applicationID, user, credentials, appAcls,
+logAggregationContext));
+dispatcher.getEventHandler().handle(new

hadoop git commit: YARN-3814. REST API implementation for getting raw entities in TimelineReader (Naganarasimha G R via sjlee)

2015-08-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 b4bc51002 - 9d1494733


YARN-3814. REST API implementation for getting raw entities in TimelineReader 
(Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d149473
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d149473
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d149473

Branch: refs/heads/YARN-2928
Commit: 9d1494733e29ca07e97aff7f85f6693663802cf7
Parents: b4bc510
Author: Sangjin Lee sj...@apache.org
Authored: Fri Aug 21 19:10:23 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Fri Aug 21 19:10:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../reader/TimelineReaderManager.java   |  41 ++
 .../reader/TimelineReaderServer.java|   2 +-
 .../reader/TimelineReaderWebServices.java   | 245 +-
 .../storage/FileSystemTimelineReaderImpl.java   |   5 +
 .../reader/TestTimelineReaderWebServices.java   | 456 ++-
 6 files changed, 741 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d149473/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d274b24..a3c89e3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -97,6 +97,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4025. Deal with byte representations of Longs in writer code.
 (Sangjin Lee and Vrushali C via junping_du)
 
+YARN-3814. REST API implementation for getting raw entities in
+TimelineReader (Naganarasimha G R via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d149473/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
index 5573185..7fafd82 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
@@ -18,10 +18,18 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 
 @Private
 @Unstable
@@ -33,4 +41,37 @@ public class TimelineReaderManager extends AbstractService {
 super(TimelineReaderManager.class.getName());
 this.reader = timelineReader;
   }
+
+  /**
+   * Get a set of entities matching given predicates. The meaning of each
+   * argument has been documented with {@link TimelineReader#getEntities}.
+   *
+   * @see TimelineReader#getEntities
+   */
+  SetTimelineEntity getEntities(String userId, String clusterId,
+  String flowId, Long flowRunId, String appId, String entityType,
+  Long limit, Long createdTimeBegin, Long createdTimeEnd,
+  Long modifiedTimeBegin, Long modifiedTimeEnd,
+  MapString, SetString relatesTo, MapString, SetString isRelatedTo,
+  MapString, Object infoFilters, MapString, String configFilters,
+  SetString  metricFilters, SetString eventFilters,
+  EnumSetField fieldsToRetrieve) throws IOException {
+return reader.getEntities(userId, clusterId, flowId, flowRunId, appId,
+entityType, limit, createdTimeBegin, createdTimeEnd, modifiedTimeBegin,
+modifiedTimeEnd, relatesTo, isRelatedTo, infoFilters, configFilters,
+metricFilters

hadoop git commit: YARN-3904. Refactor timelineservice.storage to add support to online and offline aggregation writers (Li Lu via sjlee)

2015-08-17 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 f40c73548 - a029ce10c


YARN-3904. Refactor timelineservice.storage to add support to online and 
offline aggregation writers (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a029ce10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a029ce10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a029ce10

Branch: refs/heads/YARN-2928
Commit: a029ce10c9908f576df70dc489b6d11275de83ed
Parents: f40c735
Author: Sangjin Lee sj...@apache.org
Authored: Mon Aug 17 16:48:58 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon Aug 17 16:48:58 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   7 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../storage/OfflineAggregationWriter.java   |  66 +++
 .../PhoenixOfflineAggregationWriterImpl.java| 356 +
 .../storage/PhoenixTimelineWriterImpl.java  | 530 ---
 .../storage/TimelineSchemaCreator.java  |  45 +-
 .../storage/common/OfflineAggregationInfo.java  | 110 
 ...TestPhoenixOfflineAggregationWriterImpl.java | 162 ++
 .../storage/TestPhoenixTimelineWriterImpl.java  | 152 --
 .../storage/TestTimelineWriterImpl.java |  74 ---
 11 files changed, 754 insertions(+), 761 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a029ce10/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6df4ac5..ba9fc8f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -88,6 +88,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3906. Split the application table from the entity table. (Sangjin Lee 
 via junping_du)
 
+YARN-3904. Refactor timelineservice.storage to add support to online and
+offline aggregation writers (Li Lu via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a029ce10/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5583cd6..691170e 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -488,13 +488,12 @@
   !-- Ignore SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING 
warnings for Timeline Phoenix storage. --
   !-- Since we're using dynamic columns, we have to generate SQL statements 
dynamically --
   Match
-Class 
name=org.apache.hadoop.yarn.server.timelineservice.storage.PhoenixTimelineWriterImpl
 /
+Class 
name=org.apache.hadoop.yarn.server.timelineservice.storage.PhoenixOfflineAggregationWriterImpl
 /
 Or
   Method name=storeEntityVariableLengthFields /
-  Method name=storeEvents /
-  Method name=storeMetrics /
-  Method name=write /
+  Method name=writeAggregatedEntity /
 /Or
+Bug pattern=SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING /
   /Match
   
   !-- Following fields are used in ErrorsAndWarningsBlock, which is not a 
part of analysis of findbugs --

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a029ce10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cec2760..5b33319 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1722,6 +1722,16 @@ public class YarnConfiguration extends Configuration {
   public static final longDEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME =
   7*24*60*60*1000; // 7 days
 
+  // Timeline service v2 offlien aggregation related keys
+  public static final String TIMELINE_OFFLINE_AGGREGATION_PREFIX =
+  YarnConfiguration.TIMELINE_SERVICE_PREFIX + aggregation.offline.;
+  public static final String PHOENIX_OFFLINE_STORAGE_CONN_STR
+  = TIMELINE_OFFLINE_AGGREGATION_PREFIX

[20/43] hadoop git commit: HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.

2015-08-14 Thread sjlee
HDFS-7885. Datanode should not trust the generation stamp provided by client. 
Contributed by Tsz Wo Nicholas Sze.

(cherry picked from commit 24db0812be64e83a48ade01fc1eaaeaedad4dec0)
(cherry picked from commit 994dadb9ba0a3b87b6548e6e0801eadd26554d55)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc5c649
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc5c649
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc5c649

Branch: refs/heads/sjlee/hdfs-merge
Commit: 0bc5c6495a7feb4365af0ce5fe48fc87b7e1749f
Parents: e1af1ac
Author: Jing Zhao ji...@apache.org
Authored: Fri Mar 6 10:55:56 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:32:45 2015 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 15 +
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java | 63 
 2 files changed, 78 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc5c649/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0d9f096..0c2337e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2276,6 +2276,21 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
   throws IOException {
+synchronized(this) {
+  final Replica replica = volumeMap.get(block.getBlockPoolId(),
+  block.getBlockId());
+  if (replica == null) {
+throw new ReplicaNotFoundException(block);
+  }
+  if (replica.getGenerationStamp()  block.getGenerationStamp()) {
+throw new IOException(
+Replica generation stamp  block generation stamp, block=
++ block + , replica= + replica);
+  } else if (replica.getGenerationStamp()  block.getGenerationStamp()) {
+block.setGenerationStamp(replica.getGenerationStamp());
+  }
+}
+
 File datafile = getBlockFile(block);
 File metafile = FsDatasetUtil.getMetaFile(datafile, 
block.getGenerationStamp());
 BlockLocalPathInfo info = new BlockLocalPathInfo(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc5c649/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index cb50539..1c4134f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.BeforeClass;
@@ -153,4 +158,62 @@ public class TestBlockReaderLocalLegacy {
 Arrays.equals(orig, buf);
 cluster.shutdown();
   }
+
+  @Test(timeout=2)
+  public void testBlockReaderLocalLegacyWithAppend() throws Exception {
+final short REPL_FACTOR = 1;
+final HdfsConfiguration conf = getConfiguration(null);
+conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
+
+final MiniDFSCluster cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
+
+final DistributedFileSystem dfs

[03/43] hadoop git commit: HDFS-7263. Snapshot read can reveal future bytes for appended files. Contributed by Tao Luo. (cherry picked from commit 8bfef590295372a48bd447b1462048008810ee17)

2015-08-14 Thread sjlee
HDFS-7263. Snapshot read can reveal future bytes for appended files. 
Contributed by Tao Luo.
(cherry picked from commit 8bfef590295372a48bd447b1462048008810ee17)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3827a1ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3827a1ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3827a1ac

Branch: refs/heads/sjlee/hdfs-merge
Commit: 3827a1acdbc4f9fec3179dcafa614734b5fa31bc
Parents: 1aa9e34
Author: Tao Luo tao@wandisco.com
Authored: Wed Oct 29 20:20:11 2014 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:25:12 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +-
 .../snapshot/TestSnapshotFileLength.java| 42 +++-
 2 files changed, 34 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3827a1ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index ff65ebc..db06d3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -801,7 +801,8 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
   int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
   if (locatedBlocks.isLastBlockComplete()) {
-realLen = (int) Math.min(realLen, locatedBlocks.getFileLength());
+realLen = (int) Math.min(realLen,
+locatedBlocks.getFileLength() - pos);
   }
   int result = readBuffer(strategy, off, realLen, corruptedBlockMap);
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3827a1ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 32534f0..98aafc1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -21,7 +21,10 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 
 
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,6 +58,8 @@ public class TestSnapshotFileLength {
 
   @Before
   public void setUp() throws Exception {
+conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCKSIZE);
+conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCKSIZE);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
   .build();
 cluster.waitActive();
@@ -81,40 +86,57 @@ public class TestSnapshotFileLength {
 
 int bytesRead;
 byte[] buffer = new byte[BLOCKSIZE * 8];
+int origLen = BLOCKSIZE + 1;
+int toAppend = BLOCKSIZE;
 FSDataInputStream fis = null;
 FileStatus fileStatus = null;
 
 // Create and write a file.
 Path file1 = new Path(sub, file1Name);
-DFSTestUtil.createFile(hdfs, file1, 0, REPLICATION, SEED);
-DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, 
SEED);
+DFSTestUtil.appendFile(hdfs, file1, origLen);
 
 // Create a snapshot on the parent directory.
 hdfs.allowSnapshot(sub);
 hdfs.createSnapshot(sub, snapshot1);
 
-// Write more data to the file.
-DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+Path file1snap1
+= SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
+
+// Append to the file.
+FSDataOutputStream out = hdfs.append(file1);
+try {
+  AppendTestUtil.write(out, 0, toAppend);
+  // Test reading from snapshot of file that is open for append
+  byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
+  assertThat(Wrong data size in snapshot

[21/43] hadoop git commit: HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe)

2015-08-14 Thread sjlee
HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin 
P. McCabe)

(cherry picked from commit a17584936cc5141e3f5612ac3ecf35e27968e439)
(cherry picked from commit 7779f38e68ca4e0f7ac08eb7e5f4801b89979d02)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65ae3e2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65ae3e2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65ae3e2f

Branch: refs/heads/sjlee/hdfs-merge
Commit: 65ae3e2ff16ce1114a0115ff916837b0173b77f1
Parents: 0bc5c64
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Jan 20 20:11:09 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:59:56 2015 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 +
 .../datanode/fsdataset/impl/FsVolumeList.java   |  8 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java   | 37 ++--
 3 files changed, 49 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ae3e2f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0c2337e..cbcf6b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -336,7 +336,7 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 
 StorageType storageType = location.getStorageType();
 final FsVolumeImpl fsVolume = new FsVolumeImpl(
-this, sd.getStorageUuid(), dir, this.conf, storageType);
+this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
 final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
 ArrayListIOException exceptions = Lists.newArrayList();
 
@@ -379,19 +379,19 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
*/
   @Override
   public synchronized void removeVolumes(CollectionStorageLocation volumes) {
-SetFile volumeSet = new HashSetFile();
+SetString volumeSet = new HashSetString();
 for (StorageLocation sl : volumes) {
-  volumeSet.add(sl.getFile());
+  volumeSet.add(sl.getFile().getAbsolutePath());
 }
 for (int idx = 0; idx  dataStorage.getNumStorageDirs(); idx++) {
   Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  if (volumeSet.contains(sd.getRoot())) {
-String volume = sd.getRoot().toString();
+  String volume = sd.getRoot().getAbsolutePath();
+  if (volumeSet.contains(volume)) {
 LOG.info(Removing  + volume +  from FsDataset.);
 
 // Disable the volume from the service.
 asyncDiskService.removeVolume(sd.getCurrentDir());
-this.volumes.removeVolume(volume);
+this.volumes.removeVolume(sd.getRoot());
 
 // Removed all replica information for the blocks on the volume. Unlike
 // updating the volumeMap in addVolume(), this operation does not scan
@@ -401,7 +401,9 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
   for (IteratorReplicaInfo it = volumeMap.replicas(bpid).iterator();
   it.hasNext(); ) {
 ReplicaInfo block = it.next();
-if (block.getVolume().getBasePath().equals(volume)) {
+String absBasePath =
+  new File(block.getVolume().getBasePath()).getAbsolutePath();
+if (absBasePath.equals(volume)) {
   invalidate(bpid, block);
   blocks.add(block);
   it.remove();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65ae3e2f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 9483444..b17b90b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs

[13/43] hadoop git commit: HDFS-7575. Upgrade should generate a unique storage ID for each volume. (Contributed by Arpit Agarwal)

2015-08-14 Thread sjlee
HDFS-7575. Upgrade should generate a unique storage ID for each volume. 
(Contributed by Arpit Agarwal)

(cherry picked from commit 1d9d166c0beb56aa45e65f779044905acff25d88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca8e1b07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca8e1b07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca8e1b07

Branch: refs/heads/sjlee/hdfs-merge
Commit: ca8e1b0739b6653833f9bc8990ab126420703f66
Parents: e9a2825
Author: Arpit Agarwal a...@apache.org
Authored: Thu Jan 22 14:08:20 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:23:17 2015 -0700

--
 .../hdfs/server/datanode/DataStorage.java   |  35 +++--
 .../hdfs/server/protocol/DatanodeStorage.java   |  19 ++-
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java|  19 ++-
 .../hadoop/hdfs/TestDatanodeLayoutUpgrade.java  |   2 +-
 ...estDatanodeStartupFixesLegacyStorageIDs.java | 139 +++
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 .../testUpgradeFrom22FixesStorageIDs.tgz| Bin 0 - 3260 bytes
 .../testUpgradeFrom22FixesStorageIDs.txt|  25 
 .../testUpgradeFrom22via26FixesStorageIDs.tgz   | Bin 0 - 3635 bytes
 .../testUpgradeFrom22via26FixesStorageIDs.txt   |  25 
 .../testUpgradeFrom26PreservesStorageIDs.tgz| Bin 0 - 3852 bytes
 .../testUpgradeFrom26PreservesStorageIDs.txt|  25 
 14 files changed, 274 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca8e1b07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 8863724..fc4a682 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -142,11 +143,20 @@ public class DataStorage extends Storage {
 this.datanodeUuid = newDatanodeUuid;
   }
 
-  /** Create an ID for this storage. */
-  public synchronized void createStorageID(StorageDirectory sd) {
-if (sd.getStorageUuid() == null) {
+  /** Create an ID for this storage.
+   * @return true if a new storage ID was generated.
+   * */
+  public synchronized boolean createStorageID(
+  StorageDirectory sd, boolean regenerateStorageIds) {
+final String oldStorageID = sd.getStorageUuid();
+if (oldStorageID == null || regenerateStorageIds) {
   sd.setStorageUuid(DatanodeStorage.generateUuid());
+  LOG.info(Generated new storageID  + sd.getStorageUuid() +
+   for directory  + sd.getRoot() +
+  (oldStorageID == null ?  : ( to replace  + oldStorageID)));
+  return true;
 }
+return false;
   }
 
   /**
@@ -677,20 +687,25 @@ public class DataStorage extends Storage {
   + sd.getRoot().getCanonicalPath() + : namenode clusterID = 
   + nsInfo.getClusterID() + ; datanode clusterID =  + 
getClusterID());
 }
-
-// After addition of the federation feature, ctime check is only 
-// meaningful at BlockPoolSliceStorage level. 
 
-// regular start up. 
+// Clusters previously upgraded from layout versions earlier than
+// ADD_DATANODE_AND_STORAGE_UUIDS failed to correctly generate a
+// new storage ID. We check for that and fix it now.
+boolean haveValidStorageId =
+DataNodeLayoutVersion.supports(
+LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, 
layoutVersion) 
+DatanodeStorage.isValidStorageId(sd.getStorageUuid());
+
+// regular start up.
 if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION) {
-  createStorageID(sd);
+  createStorageID(sd, !haveValidStorageId);
   return; // regular startup
 }
-
+
 // do upgrade
 if (this.layoutVersion

[18/43] hadoop git commit: HDFS-7763. fix zkfc hung issue due to not catching exception in a corner case. Contributed by Liang Xie.

2015-08-14 Thread sjlee
HDFS-7763. fix zkfc hung issue due to not catching exception in a corner case. 
Contributed by Liang Xie.

(cherry picked from commit 7105ebaa9f370db04962a1e19a67073dc080433b)
(cherry picked from commit efb7e287f45c6502f293456034a37d9209a917be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd70e4db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd70e4db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd70e4db

Branch: refs/heads/sjlee/hdfs-merge
Commit: fd70e4db105e140fc3d60042abb3f598c9afd13f
Parents: d5ddc34
Author: Andrew Wang w...@apache.org
Authored: Tue Feb 24 15:31:13 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:25:12 2015 -0700

--
 .../apache/hadoop/hdfs/tools/DFSZKFailoverController.java   | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd70e4db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index a42b1e3..85f77f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -176,8 +176,13 @@ public class DFSZKFailoverController extends 
ZKFailoverController {
 new HdfsConfiguration(), args);
 DFSZKFailoverController zkfc = DFSZKFailoverController.create(
 parser.getConfiguration());
-
-System.exit(zkfc.run(parser.getRemainingArgs()));
+int retCode = 0;
+try {
+  retCode = zkfc.run(parser.getRemainingArgs());
+} catch (Throwable t) {
+  LOG.fatal(Got a fatal error, exiting now, t);
+}
+System.exit(retCode);
   }
 
   @Override



[23/43] hadoop git commit: HDFS-7587. Edit log corruption can happen if append fails with a quota violation. Contributed by Jing Zhao.

2015-08-14 Thread sjlee
HDFS-7587. Edit log corruption can happen if append fails with a quota 
violation. Contributed by Jing Zhao.

Committed Ming Ma's 2.6 patch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f0bb5d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f0bb5d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f0bb5d3

Branch: refs/heads/sjlee/hdfs-merge
Commit: 7f0bb5d3fe0db2e6b9354c8d8a1b603f2390184f
Parents: c723f3b
Author: Jing Zhao ji...@apache.org
Authored: Wed Mar 18 18:51:14 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 09:02:46 2015 -0700

--
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 86 +++-
 .../hdfs/server/namenode/INodesInPath.java  |  4 +
 .../namenode/TestDiskspaceQuotaUpdate.java  | 42 ++
 5 files changed, 119 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f0bb5d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 9ca50c4..95877ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -267,6 +267,10 @@ public class FSDirectory implements Closeable {
 }
   }
 
+  boolean shouldSkipQuotaChecks() {
+return skipQuotaCheck;
+  }
+
   /** Enable quota verification */
   void enableQuotaChecks() {
 skipQuotaCheck = false;
@@ -1738,7 +1742,7 @@ public class FSDirectory implements Closeable {
* update quota of each inode and check to see if quota is exceeded. 
* See {@link #updateCount(INodesInPath, long, long, boolean)}
*/ 
-  private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
+  void updateCountNoQuotaCheck(INodesInPath inodesInPath,
   int numOfINodes, long nsDelta, long dsDelta) {
 assert hasWriteLock();
 try {
@@ -1877,7 +1881,7 @@ public class FSDirectory implements Closeable {
*  Pass null if a node is not being moved.
* @throws QuotaExceededException if quota limit is exceeded.
*/
-  private static void verifyQuota(INode[] inodes, int pos, long nsDelta,
+  static void verifyQuota(INode[] inodes, int pos, long nsDelta,
   long dsDelta, INode commonAncestor) throws QuotaExceededException {
 if (nsDelta = 0  dsDelta = 0) {
   // if quota is being freed or not being consumed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f0bb5d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 7dfe688..cb5afbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -387,7 +387,7 @@ public class FSEditLogLoader {
 for append);
   }
   LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
-  oldFile, addCloseOp.clientName, addCloseOp.clientMachine, false, 
iip.getLatestSnapshotId(), false);
+  iip, addCloseOp.clientName, addCloseOp.clientMachine, false, 
false);
   newFile = INodeFile.valueOf(fsDir.getINode(path),
   path, true);
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f0bb5d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5541637..c92b431 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2872,8 +2872,8 @@ public class

[15/43] hadoop git commit: HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode to register successfully with only one NameNode.(Contributed by Vinayakumar B)

2015-08-14 Thread sjlee
HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode 
to register successfully with only one NameNode.(Contributed by Vinayakumar B)

(cherry picked from commit 3d15728ff5301296801e541d9b23bd1687c4adad)
(cherry picked from commit a1bf7aecf7d018c5305fa3bd7a9e3ef9af3155c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1e65de5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1e65de5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1e65de5

Branch: refs/heads/sjlee/hdfs-merge
Commit: c1e65de57e8ef760586e28cd37397ea9a7ac7944
Parents: 21d8b22
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Feb 10 10:43:08 2015 +0530
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:58:34 2015 -0700

--
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1e65de5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 6bdb68a..62ba1ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.util.Time.now;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
@@ -802,6 +803,10 @@ class BPServiceActor implements Runnable {
 // Use returned registration from namenode with updated fields
 bpRegistration = bpNamenode.registerDatanode(bpRegistration);
 break;
+  } catch(EOFException e) {  // namenode might have just restarted
+LOG.info(Problem connecting to server:  + nnAddr +  :
++ e.getLocalizedMessage());
+sleepAndLogInterrupts(1000, connecting to server);
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info(Problem connecting to server:  + nnAddr);
 sleepAndLogInterrupts(1000, connecting to server);



[02/43] hadoop git commit: HDFS-7235. DataNode#transferBlock should report blocks that don't exist using reportBadBlock (yzhang via cmccabe) (cherry picked from commit ac9ab037e9a9b03e4fa9bd471d3ab994

2015-08-14 Thread sjlee
HDFS-7235. DataNode#transferBlock should report blocks that don't exist using 
reportBadBlock (yzhang via cmccabe)
(cherry picked from commit ac9ab037e9a9b03e4fa9bd471d3ab9940beb53fb)

(cherry picked from commit 842a54a5f66e76eb79321b66cc3b8820fe66c5cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1aa9e34c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1aa9e34c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1aa9e34c

Branch: refs/heads/sjlee/hdfs-merge
Commit: 1aa9e34c5106c496ffd390f6b2c822d387fb1908
Parents: f94aa4d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Oct 28 16:41:22 2014 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:21:17 2015 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 59 +++-
 .../UnexpectedReplicaStateException.java| 45 +++
 .../server/datanode/fsdataset/FsDatasetSpi.java | 28 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 54 --
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 46 ---
 .../org/apache/hadoop/hdfs/TestReplication.java | 32 ---
 .../server/datanode/SimulatedFSDataset.java | 43 --
 7 files changed, 267 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aa9e34c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index fe57bc3..badb845 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -56,7 +56,9 @@ import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.EOFException;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -1773,30 +1775,59 @@ public class DataNode extends ReconfigurableBase
   int getXmitsInProgress() {
 return xmitsInProgress.get();
   }
-
+
+  private void reportBadBlock(final BPOfferService bpos,
+  final ExtendedBlock block, final String msg) {
+FsVolumeSpi volume = getFSDataset().getVolume(block);
+bpos.reportBadBlocks(
+block, volume.getStorageID(), volume.getStorageType());
+LOG.warn(msg);
+  }
+
   private void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets,
   StorageType[] xferTargetStorageTypes) throws IOException {
 BPOfferService bpos = getBPOSForBlock(block);
 DatanodeRegistration bpReg = 
getDNRegistrationForBP(block.getBlockPoolId());
-
-if (!data.isValidBlock(block)) {
-  // block does not exist or is under-construction
+
+boolean replicaNotExist = false;
+boolean replicaStateNotFinalized = false;
+boolean blockFileNotExist = false;
+boolean lengthTooShort = false;
+
+try {
+  data.checkBlock(block, block.getNumBytes(), ReplicaState.FINALIZED);
+} catch (ReplicaNotFoundException e) {
+  replicaNotExist = true;
+} catch (UnexpectedReplicaStateException e) {
+  replicaStateNotFinalized = true;
+} catch (FileNotFoundException e) {
+  blockFileNotExist = true;
+} catch (EOFException e) {
+  lengthTooShort = true;
+} catch (IOException e) {
+  // The IOException indicates not being able to access block file,
+  // treat it the same here as blockFileNotExist, to trigger 
+  // reporting it as a bad block
+  blockFileNotExist = true;  
+}
+
+if (replicaNotExist || replicaStateNotFinalized) {
   String errStr = Can't send invalid block  + block;
   LOG.info(errStr);
-  
   bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errStr);
   return;
 }
-
-// Check if NN recorded length matches on-disk length 
-long onDiskLength = data.getLength(block);
-if (block.getNumBytes()  onDiskLength) {
-  FsVolumeSpi volume = getFSDataset().getVolume(block);
+if (blockFileNotExist) {
+  // Report back to NN bad block caused by non-existent block file.
+  reportBadBlock(bpos, block, Can't replicate block  + block
+  +  because the block file doesn't exist, or is not accessible);
+  return;
+}
+if (lengthTooShort) {
+  // Check if NN recorded length matches on-disk length 
   // Shorter on-disk len

[11/43] hadoop git commit: HDFS-7596. NameNode should prune dead storages from storageMap. Contributed by Arpit Agarwal.

2015-08-14 Thread sjlee
HDFS-7596. NameNode should prune dead storages from storageMap. Contributed by 
Arpit Agarwal.

(cherry picked from commit ef3c3a832c2f0c1e5ccdda2ff8ef84902912955f)
(cherry picked from commit 75e4e55e12b2faa521af7c23fddcba06a9ce661d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc637d6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc637d6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc637d6e

Branch: refs/heads/sjlee/hdfs-merge
Commit: cc637d6ece64dfeb89e78c7e9766836149e098be
Parents: 96f0813
Author: cnauroth cnaur...@apache.org
Authored: Sat Jan 10 09:18:33 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:21:37 2015 -0700

--
 .../blockmanagement/DatanodeDescriptor.java |  42 ++-
 .../blockmanagement/TestBlockManager.java   |   6 +-
 .../TestNameNodePrunesMissingStorages.java  | 121 +++
 3 files changed, 165 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc637d6e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index cdaab64..a407fe8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -418,6 +418,46 @@ public class DatanodeDescriptor extends DatanodeInfo {
 if (checkFailedStorages) {
   updateFailedStorage(failedStorageInfos);
 }
+
+if (storageMap.size() != reports.length) {
+  pruneStorageMap(reports);
+}
+  }
+
+  /**
+   * Remove stale storages from storageMap. We must not remove any storages
+   * as long as they have associated block replicas.
+   */
+  private void pruneStorageMap(final StorageReport[] reports) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Number of storages reported in heartbeat= + reports.length +
+; Number of storages in storageMap= + storageMap.size());
+}
+
+HashMapString, DatanodeStorageInfo excessStorages;
+
+synchronized (storageMap) {
+  // Init excessStorages with all known storages.
+  excessStorages = new HashMapString, DatanodeStorageInfo(storageMap);
+
+  // Remove storages that the DN reported in the heartbeat.
+  for (final StorageReport report : reports) {
+excessStorages.remove(report.getStorage().getStorageID());
+  }
+
+  // For each remaining storage, remove it if there are no associated
+  // blocks.
+  for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
+if (storageInfo.numBlocks() == 0) {
+  storageMap.remove(storageInfo.getStorageID());
+  LOG.info(Removed storage  + storageInfo +  from DataNode + this);
+} else if (LOG.isDebugEnabled()) {
+  // This can occur until all block reports are received.
+  LOG.debug(Deferring removal of stale storage  + storageInfo +
+ with  + storageInfo.numBlocks() +  blocks);
+}
+  }
+}
   }
 
   private void updateFailedStorage(
@@ -749,8 +789,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
 // For backwards compatibility, make sure that the type and
 // state are updated. Some reports from older datanodes do
 // not include these fields so we may have assumed defaults.
-// This check can be removed in the next major release after
-// 2.4.
 storage.updateFromStorage(s);
 storageMap.put(storage.getStorageID(), storage);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc637d6e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index b444ccc..5beb811 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -573,11 +573,13 @@ public class TestBlockManager {
 reset

[04/43] hadoop git commit: HDFS-7035. Make adding a new data directory to the DataNode an atomic operation and improve error handling (Lei Xu via Colin P. McCabe) (cherry picked from commit a9331fe9b0

2015-08-14 Thread sjlee
HDFS-7035. Make adding a new data directory to the DataNode an atomic operation 
and improve error handling (Lei Xu via Colin P. McCabe)
(cherry picked from commit a9331fe9b071fdcdae0c6c747d7b6b306142e671)

(cherry picked from commit ec2621e907742aad0264c5f533783f0f18565880)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d79a5849
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d79a5849
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d79a5849

Branch: refs/heads/sjlee/hdfs-merge
Commit: d79a584cdb0bc315938b80ed71b4f2dcb720
Parents: 3827a1a
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Oct 30 17:31:23 2014 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:31:34 2015 -0700

--
 .../hadoop/hdfs/server/common/Storage.java  |  15 +
 .../hadoop/hdfs/server/common/StorageInfo.java  |   4 +
 .../server/datanode/BlockPoolSliceStorage.java  | 168 +---
 .../hadoop/hdfs/server/datanode/DataNode.java   | 109 --
 .../hdfs/server/datanode/DataStorage.java   | 382 ++-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   6 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 161 +++-
 .../server/datanode/SimulatedFSDataset.java |   7 +-
 .../datanode/TestDataNodeHotSwapVolumes.java| 108 +-
 .../hdfs/server/datanode/TestDataStorage.java   |  26 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  27 +-
 11 files changed, 575 insertions(+), 438 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d79a5849/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 735e0c1..14b52ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -821,6 +821,21 @@ public abstract class Storage extends StorageInfo {
   }
 
   /**
+   * Returns true if the storage directory on the given directory is already
+   * loaded.
+   * @param root the root directory of a {@link StorageDirectory}
+   * @throws IOException if failed to get canonical path.
+   */
+  protected boolean containsStorageDir(File root) throws IOException {
+for (StorageDirectory sd : storageDirs) {
+  if (sd.getRoot().getCanonicalPath().equals(root.getCanonicalPath())) {
+return true;
+  }
+}
+return false;
+  }
+
+  /**
* Return true if the layout of the given storage directory is from a version
* of Hadoop prior to the introduction of the current and previous
* directories which allow upgrade and rollback.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d79a5849/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
index 50c8044..a3f82ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
@@ -192,6 +192,10 @@ public class StorageInfo {
 namespaceID = nsId;
   }
 
+  public void setServiceLayoutVersion(int lv) {
+this.layoutVersion = lv;
+  }
+
   public int getServiceLayoutVersion() {
 return storageType == NodeType.DATA_NODE ? 
HdfsConstants.DATANODE_LAYOUT_VERSION
 : HdfsConstants.NAMENODE_LAYOUT_VERSION;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d79a5849/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 8333bb4..8c819a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org

[01/43] hadoop git commit: HDFS-7213. processIncrementalBlockReport performance degradation. Contributed by Eric Payne. (cherry picked from commit e226b5b40d716b6d363c43a8783766b72734e347)

2015-08-14 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/sjlee/hdfs-merge [created] fb1bf424b


HDFS-7213. processIncrementalBlockReport performance degradation.
Contributed by Eric Payne.
(cherry picked from commit e226b5b40d716b6d363c43a8783766b72734e347)

(cherry picked from commit 946463efefec9031cacb21d5a5367acd150ef904)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f94aa4d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f94aa4d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f94aa4d2

Branch: refs/heads/sjlee/hdfs-merge
Commit: f94aa4d25c2f96faf5164e807c2c3eb031e9a1fe
Parents: 4239513
Author: Kihwal Lee kih...@apache.org
Authored: Tue Oct 28 14:55:16 2014 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:21:09 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f94aa4d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 37df223..17112bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3093,9 +3093,11 @@ public class BlockManager {
 +  is received from  + nodeID);
   }
 }
-blockLog.debug(*BLOCK* NameNode.processIncrementalBlockReport:  + from 
+if (blockLog.isDebugEnabled()) {
+  blockLog.debug(*BLOCK* NameNode.processIncrementalBlockReport:  + 
from 
 + nodeID +  receiving:  + receiving + ,  +  received:  + received
 + ,  +  deleted:  + deleted);
+}
   }
 
   /**



[24/43] hadoop git commit: HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade starts (Zhe Zhang via Colin P. McCabe)

2015-08-14 Thread sjlee
HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade 
starts (Zhe Zhang via Colin P. McCabe)

(cherry picked from commit 43b41f22411439c5e23629197fb2fde45dcf0f0f)
(cherry picked from commit 219eb22c1571f76df32967a930049d983cbf5024)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03798416
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03798416
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03798416

Branch: refs/heads/sjlee/hdfs-merge
Commit: 03798416bfe27383c52e4d9f632fe9fa168c6e95
Parents: 7f0bb5d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Mar 18 18:48:54 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 09:47:33 2015 -0700

--
 .../hadoop/hdfs/server/namenode/FSImage.java|  2 +-
 .../server/namenode/FileJournalManager.java |  2 +-
 .../hdfs/server/namenode/NNUpgradeUtil.java | 44 --
 .../org/apache/hadoop/hdfs/TestDFSUpgrade.java  | 48 +++-
 4 files changed, 90 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03798416/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 9b72421..51efb51 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -393,7 +393,7 @@ public class FSImage implements Closeable {
 for (IteratorStorageDirectory it = storage.dirIterator(false); 
it.hasNext();) {
   StorageDirectory sd = it.next();
   try {
-NNUpgradeUtil.doPreUpgrade(sd);
+NNUpgradeUtil.doPreUpgrade(conf, sd);
   } catch (Exception e) {
 LOG.error(Failed to move aside pre-upgrade storage  +
 in image directory  + sd.getRoot(), e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03798416/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 101c42c..2df052b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -585,7 +585,7 @@ public class FileJournalManager implements JournalManager {
   public void doPreUpgrade() throws IOException {
 LOG.info(Starting upgrade of edits directory  + sd.getRoot());
 try {
- NNUpgradeUtil.doPreUpgrade(sd);
+ NNUpgradeUtil.doPreUpgrade(conf, sd);
 } catch (IOException ioe) {
  LOG.error(Failed to move aside pre-upgrade storage  +
  in image directory  + sd.getRoot(), ioe);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03798416/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
index 546480d..c63da20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.File;
+import java.io.FilenameFilter;
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -99,15

[29/43] hadoop git commit: HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock for a very long time (sinago via cmccabe)

2015-08-14 Thread sjlee
HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock 
for a very long time (sinago via cmccabe)

(cherry picked from commit 28bebc81db8bb6d1bc2574de7564fe4c595cfe09)
(cherry picked from commit a827089905524e10638c783ba908a895d621911d)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3a3092c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3a3092c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3a3092c

Branch: refs/heads/sjlee/hdfs-merge
Commit: c3a3092c37926eca75ea149c4c061742f6599b40
Parents: c6b68a8
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Apr 6 08:54:46 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 11:17:20 2015 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 67 +---
 1 file changed, 44 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3a3092c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index f24d644..e352ea3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1180,30 +1180,51 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipeline createTemporary(StorageType 
storageType,
-  ExtendedBlock b) throws IOException {
-ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
-if (replicaInfo != null) {
-  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
-   replicaInfo instanceof ReplicaInPipeline) {
-// Stop the previous writer
-((ReplicaInPipeline)replicaInfo)
-  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
-invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
-  } else {
-throw new ReplicaAlreadyExistsException(Block  + b +
- already exists in state  + replicaInfo.getState() +
- and thus cannot be created.);
+  public ReplicaInPipeline createTemporary(
+  StorageType storageType, ExtendedBlock b) throws IOException {
+long startTimeMs = Time.monotonicNow();
+long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
+ReplicaInfo lastFoundReplicaInfo = null;
+do {
+  synchronized (this) {
+ReplicaInfo currentReplicaInfo =
+volumeMap.get(b.getBlockPoolId(), b.getBlockId());
+if (currentReplicaInfo == lastFoundReplicaInfo) {
+  if (lastFoundReplicaInfo != null) {
+invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo 
});
+  }
+  FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
+  // create a temporary file to hold block in the designated volume
+  File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
+  ReplicaInPipeline newReplicaInfo =
+  new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
+  f.getParentFile(), 0);
+  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
+  return newReplicaInfo;
+} else {
+  if (!(currentReplicaInfo.getGenerationStamp()  b
+  .getGenerationStamp()  currentReplicaInfo instanceof 
ReplicaInPipeline)) {
+throw new ReplicaAlreadyExistsException(Block  + b
++  already exists in state  + currentReplicaInfo.getState()
++  and thus cannot be created.);
+  }
+  lastFoundReplicaInfo = currentReplicaInfo;
+}
   }
-}
-
-FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
-// create a temporary file to hold block in the designated volume
-File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
-ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
-b.getGenerationStamp(), v, f.getParentFile(), 0);
-volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
-return newReplicaInfo;
+
+  // Hang too long, just bail out. This is not supposed

[36/43] hadoop git commit: HDFS-7278. Add a command that allows sysadmins to manually trigger full block reports from a DN (cmccabe) (cherry picked from commit baf794dc404ac54f4e8332654eadfac1bebacb8f

2015-08-14 Thread sjlee
HDFS-7278. Add a command that allows sysadmins to manually trigger full block 
reports from a DN (cmccabe)
(cherry picked from commit baf794dc404ac54f4e8332654eadfac1bebacb8f)

(cherry picked from commit 5f3d967aaefa0b20ef1586b4048b8fa5345d2618)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a776ef5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a776ef5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a776ef5a

Branch: refs/heads/sjlee/hdfs-merge
Commit: a776ef5ad2876b9acf6cf89824c306783f7759f1
Parents: 995382c
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Oct 27 09:53:16 2014 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 18:15:50 2015 -0700

--
 .../hadoop/hdfs/client/BlockReportOptions.java  |  59 
 .../hdfs/protocol/ClientDatanodeProtocol.java   |   7 +
 ...tDatanodeProtocolServerSideTranslatorPB.java |  18 +++
 .../ClientDatanodeProtocolTranslatorPB.java |  16 +++
 .../hdfs/server/datanode/BPServiceActor.java|  17 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  14 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  53 
 .../src/main/proto/ClientDatanodeProtocol.proto |  10 ++
 .../src/site/apt/HDFSCommands.apt.vm|   6 +
 .../server/datanode/TestTriggerBlockReport.java | 134 +++
 10 files changed, 334 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a776ef5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
new file mode 100644
index 000..07f4836
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Options that can be specified when manually triggering a block report.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BlockReportOptions {
+  private final boolean incremental;
+
+  private BlockReportOptions(boolean incremental) {
+this.incremental = incremental;
+  }
+
+  public boolean isIncremental() {
+return incremental;
+  }
+
+  public static class Factory {
+private boolean incremental = false;
+
+public Factory() {
+}
+
+public Factory setIncremental(boolean incremental) {
+  this.incremental = incremental;
+  return this;
+}
+
+public BlockReportOptions build() {
+  return new BlockReportOptions(incremental);
+}
+  }
+
+  @Override
+  public String toString() {
+return BlockReportOptions{incremental= + incremental + };
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a776ef5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 9cd5ccd..1dcc196 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import

[31/43] hadoop git commit: HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee. (cherry picked from commit 285b31e75e51ec8e3a796c2cb0208739368ca9b8)

2015-08-14 Thread sjlee
HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.
(cherry picked from commit 285b31e75e51ec8e3a796c2cb0208739368ca9b8)

(cherry picked from commit 7e622076d41a85fc9a8600fb270564a085f5cd83)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ef5e0b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ef5e0b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ef5e0b1

Branch: refs/heads/sjlee/hdfs-merge
Commit: 1ef5e0b18066ca949adcf4c55a41f186c47e7264
Parents: de21de7
Author: Kihwal Lee kih...@apache.org
Authored: Wed Apr 8 15:39:25 2015 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 15:30:45 2015 -0700

--
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++-
 .../server/namenode/ContentSummaryComputationContext.java | 10 +++---
 .../apache/hadoop/hdfs/server/namenode/FSDirectory.java   | 10 +-
 3 files changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ef5e0b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fd313bb..85b740e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -272,7 +272,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_LIST_LIMIT = dfs.ls.limit;
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = 
dfs.content-summary.limit;
-  public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
+  public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
+  public static final String  DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = 
dfs.content-summary.sleep-microsec;
+  public static final longDFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = 
dfs.datanode.failed.volumes.tolerated;
   public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 
0;
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = 
dfs.datanode.synconclose;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ef5e0b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index dab64ec..17e16ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -29,6 +29,8 @@ public class ContentSummaryComputationContext {
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
+  private long sleepMilliSec = 0;
+  private int sleepNanoSec = 0;
 
   /**
* Constructor
@@ -40,17 +42,19 @@ public class ContentSummaryComputationContext {
*no limit (i.e. no yielding)
*/
   public ContentSummaryComputationContext(FSDirectory dir,
-  FSNamesystem fsn, long limitPerRun) {
+  FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
 this.dir = dir;
 this.fsn = fsn;
 this.limitPerRun = limitPerRun;
 this.nextCountLimit = limitPerRun;
 this.counts = Content.Counts.newInstance();
+this.sleepMilliSec = sleepMicroSec/1000;
+this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
 
   /** Constructor for blocking computation. */
   public ContentSummaryComputationContext() {
-this(null, null, 0);
+this(null, null, 0, 1000);
   }
 
   /** Return current yield count */
@@ -101,7 +105,7 @@ public class ContentSummaryComputationContext

[38/43] hadoop git commit: HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts. (cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)

2015-08-14 Thread sjlee
HDFS-8404. Pending block replication can get stuck using older genstamp. 
Contributed by Nathan Roberts.
(cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)

(cherry picked from commit 536b9ee6d6e5b8430fda23cbdcfd859c299fa8ad)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d5e60fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d5e60fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d5e60fa

Branch: refs/heads/sjlee/hdfs-merge
Commit: 2d5e60fa12a62463cd54f1b6b0fcb2ccdbd82c42
Parents: 470019e
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 13:06:48 2015 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 18:37:38 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 17 ++--
 .../blockmanagement/TestPendingReplication.java | 98 +++-
 2 files changed, 105 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d5e60fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bb54402..bcf50b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1695,13 +1695,18 @@ public class BlockManager {
   namesystem.writeLock();
   try {
 for (int i = 0; i  timedOutItems.length; i++) {
+  /*
+   * Use the blockinfo from the blocksmap to be certain we're working
+   * with the most up-to-date block information (e.g. genstamp).
+   */
+  BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]);
+  if (bi == null) {
+continue;
+  }
   NumberReplicas num = countNodes(timedOutItems[i]);
-  if (isNeededReplication(timedOutItems[i], 
getReplication(timedOutItems[i]),
- num.liveReplicas())) {
-neededReplications.add(timedOutItems[i],
-   num.liveReplicas(),
-   num.decommissionedReplicas(),
-   getReplication(timedOutItems[i]));
+  if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) 
{
+neededReplications.add(bi, num.liveReplicas(),
+num.decommissionedReplicas(), getReplication(bi));
   }
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d5e60fa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index c63badc..085d5de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * This class tests the internals of PendingReplicationBlocks.java,
@@ -52,13 +53,11 @@ public class TestPendingReplication {
   private static final int DFS_REPLICATION_INTERVAL = 1;
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 5;
-
   @Test
   public void testPendingReplication() {
 PendingReplicationBlocks pendingReplications;
 pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000);
 pendingReplications.start();
-
 //
 // Add 10 blocks to pendingReplications.
 //
@@ -140,8 +139,7 @@ public class TestPendingReplication

[41/43] hadoop git commit: HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina)

2015-08-14 Thread sjlee
HDFS-8270. create() always retried with hardcoded timeout when file already 
exists with open lease (Contributed by J.Andreina)

(cherry picked from commit 54f83d9bd917e8641e902c5f0695e65ded472f9a)
(cherry picked from commit 066e45bcb667bb0c37ef70fd297b24e4f26383eb)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db40aecd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db40aecd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db40aecd

Branch: refs/heads/sjlee/hdfs-merge
Commit: db40aecd8b0acf0ff054541dabf5113b542041e5
Parents: fad2a06
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jun 3 12:11:46 2015 +0530
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 23:52:01 2015 -0700

--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java | 16 
 .../org/apache/hadoop/hdfs/TestFileCreation.java|  3 +--
 2 files changed, 1 insertion(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db40aecd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index b261220..8da00b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -42,7 +42,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient.Conf;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
@@ -68,7 +67,6 @@ import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.SecurityUtil;
@@ -425,22 +423,8 @@ public class NameNodeProxies {
 
 if (withRetries) { // create the proxy with retries
 
-  RetryPolicy createPolicy = RetryPolicies
-  .retryUpToMaximumCountWithFixedSleep(5,
-  HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-
-  MapClass? extends Exception, RetryPolicy remoteExceptionToPolicyMap 
- = new HashMapClass? extends Exception, RetryPolicy();
-  remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
-  createPolicy);
-
-  RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
-  defaultPolicy, remoteExceptionToPolicyMap);
   MapString, RetryPolicy methodNameToPolicyMap 
  = new HashMapString, RetryPolicy();
-
-  methodNameToPolicyMap.put(create, methodPolicy);
-
   ClientProtocol translatorProxy =
 new ClientNamenodeProtocolTranslatorPB(proxy);
   return (ClientProtocol) RetryProxy.create(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db40aecd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 3a399f3..8e88b62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -408,9 +408,8 @@ public class TestFileCreation {
 GenericTestUtils.assertExceptionContains(already being created by,
 abce);
   }
-  // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
   assertCounter(AlreadyBeingCreatedExceptionNumOps,
-  6L, getMetrics(metricsName));
+  1L, getMetrics(metricsName));
   FSDataOutputStream stm2 = fs2.create(p, true);
   stm2.write(2);
   stm2.close();



[42/43] hadoop git commit: HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. McCabe)

2015-08-14 Thread sjlee
HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links 
to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. 
McCabe)

(cherry picked from commit 7b424f938c3c306795d574792b086d84e4f06425)
(cherry picked from commit cbd11681ce8a51d187d91748b67a708681e599de)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1b4e69b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1b4e69b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1b4e69b

Branch: refs/heads/sjlee/hdfs-merge
Commit: e1b4e69bf23022af3125e1c6dc4ac05c89e1418f
Parents: db40aec
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Jun 22 14:37:10 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 23:59:46 2015 -0700

--
 .../hdfs/server/namenode/NNUpgradeUtil.java   | 18 ++
 1 file changed, 2 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b4e69b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
index c01b11d..a4d9580 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
+import java.nio.file.Files;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -130,23 +131,8 @@ public abstract class NNUpgradeUtil {
 
 for (String s : fileNameList) {
   File prevFile = new File(tmpDir, s);
-  Preconditions.checkState(prevFile.canRead(),
-  Edits log file  + s +  is not readable.);
   File newFile = new File(curDir, prevFile.getName());
-  Preconditions.checkState(newFile.createNewFile(),
-  Cannot create new edits log file in  + curDir);
-  EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
-  EditLogFileOutputStream out =
-  new EditLogFileOutputStream(conf, newFile, 512*1024);
-  FSEditLogOp logOp = in.nextValidOp();
-  while (logOp != null) {
-out.write(logOp);
-logOp = in.nextOp();
-  }
-  out.setReadyToFlush();
-  out.flushAndSync(true);
-  out.close();
-  in.close();
+  Files.createLink(newFile.toPath(), prevFile.toPath());
 }
   }
 



[27/43] hadoop git commit: HDFS-7960. The full block report should prune zombie storages even if they're not empty. Contributed by Colin McCabe and Eddy Xu.

2015-08-14 Thread sjlee
HDFS-7960. The full block report should prune zombie storages even if they're 
not empty. Contributed by Colin McCabe and Eddy Xu.

(cherry picked from commit 50ee8f4e67a66aa77c5359182f61f3e951844db6)
(cherry picked from commit 2f46ee50bd4efc82ba3d30bd36f7637ea9d9714e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03d4af39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03d4af39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03d4af39

Branch: refs/heads/sjlee/hdfs-merge
Commit: 03d4af39e794dc03d764122077b434d658b6405e
Parents: 4c64877
Author: Andrew Wang w...@apache.org
Authored: Mon Mar 23 22:00:34 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 10:54:26 2015 -0700

--
 .../DatanodeProtocolClientSideTranslatorPB.java |   5 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  15 +++
 .../server/blockmanagement/BlockManager.java|  55 +++-
 .../blockmanagement/DatanodeDescriptor.java |  51 ++-
 .../blockmanagement/DatanodeStorageInfo.java|  13 +-
 .../hdfs/server/datanode/BPServiceActor.java|  34 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  12 +-
 .../server/protocol/BlockReportContext.java |  52 +++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  10 +-
 .../src/main/proto/DatanodeProtocol.proto   |  14 ++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../TestNameNodePrunesMissingStorages.java  | 135 ++-
 .../server/datanode/BlockReportTestBase.java|   4 +-
 .../server/datanode/TestBPOfferService.java |  10 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   4 +-
 .../datanode/TestDataNodeVolumeFailure.java |   3 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   7 +-
 .../TestNNHandlesBlockReportPerStorage.java |   7 +-
 .../TestNNHandlesCombinedBlockReport.java   |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |   4 +-
 24 files changed, 422 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03d4af39/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 46023ec..e169d0e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -156,7 +157,8 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
 
   @Override
   public DatanodeCommand blockReport

[12/43] hadoop git commit: HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade shutdown command. Contributed by Eric Payne. (cherry picked from commit 6bbf9fdd041d2413dd78e2bce51abae1

2015-08-14 Thread sjlee
HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade shutdown 
command. Contributed by Eric Payne.
(cherry picked from commit 6bbf9fdd041d2413dd78e2bce51abae15f3334c2)

(cherry picked from commit 33534a0c9aef5024aa6f340e7ee24930c8fa8ed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9a28251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9a28251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9a28251

Branch: refs/heads/sjlee/hdfs-merge
Commit: e9a28251ee46e64e1b99b2dd54b0432bdc0b9578
Parents: cc637d6
Author: Kihwal Lee kih...@apache.org
Authored: Mon Jan 12 15:38:17 2015 -0600
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:22:58 2015 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++---
 .../hdfs/server/datanode/TestDataNodeExit.java  | 16 
 2 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9a28251/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 3dc0c3b..3ecc4a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1627,9 +1627,13 @@ public class DataNode extends ReconfigurableBase
 // in order to avoid any further acceptance of requests, but the peers
 // for block writes are not closed until the clients are notified.
 if (dataXceiverServer != null) {
-  xserver.sendOOBToPeers();
-  ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill();
-  this.dataXceiverServer.interrupt();
+  try {
+xserver.sendOOBToPeers();
+((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill();
+this.dataXceiverServer.interrupt();
+  } catch (Throwable e) {
+// Ignore, since the out of band messaging is advisory.
+  }
 }
 
 // Interrupt the checkDiskErrorThread and terminate it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9a28251/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
index 9d59496..c067b07 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 
@@ -32,6 +33,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /** 
  * Tests if DataNode process exits if all Block Pool services exit. 
@@ -88,4 +90,18 @@ public class TestDataNodeExit {
 stopBPServiceThreads(2, dn);
 assertFalse(DataNode should exit, dn.isDatanodeUp());
   }
+
+  @Test
+  public void testSendOOBToPeers() throws Exception {
+DataNode dn = cluster.getDataNodes().get(0);
+DataXceiverServer spyXserver = Mockito.spy(dn.getXferServer());
+NullPointerException e = new NullPointerException();
+Mockito.doThrow(e).when(spyXserver).sendOOBToPeers();
+dn.xserver = spyXserver;
+try {
+  dn.shutdown();
+} catch (Throwable t) {
+  fail(DataNode shutdown should not have thrown exception  + t);
+}
+  }
 }



[33/43] hadoop git commit: HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart. (surendra singh lilhore via Xiaoyu Yao)

2015-08-14 Thread sjlee
HDFS-8219. setStoragePolicy with folder behavior is different after cluster 
restart. (surendra singh lilhore via Xiaoyu Yao)

(cherry picked from commit 0100b155019496d077f958904de7d385697d65d9)
(cherry picked from commit e68e8b3b5cff85bfd8bb5b00b9033f63577856d6)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b054cb68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b054cb68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b054cb68

Branch: refs/heads/sjlee/hdfs-merge
Commit: b054cb68fa0fc6d1e9e77ac84575731e7d1ec0c7
Parents: b4e227e
Author: Xiaoyu Yao x...@apache.org
Authored: Tue May 5 13:41:14 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 16:05:28 2015 -0700

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  2 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 43 
 2 files changed, 44 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b054cb68/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 20aaf07..0154ed9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -721,7 +721,7 @@ public class FSEditLog implements LogsPurgeable {
   .setClientMachine(
   newNode.getFileUnderConstructionFeature().getClientMachine())
   .setOverwrite(overwrite)
-  .setStoragePolicyId(newNode.getStoragePolicyID());
+  .setStoragePolicyId(newNode.getLocalStoragePolicyID());
 
 AclFeature f = newNode.getAclFeature();
 if (f != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b054cb68/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index d053a79..8ac25db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -26,6 +26,7 @@ import java.util.*;
 
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.*;
@@ -1173,4 +1174,46 @@ public class TestBlockStoragePolicy {
   cluster.shutdown();
 }
   }
+
+  @Test
+  public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
+//HDFS8219
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(REPLICATION)
+.storageTypes(
+new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
+.build();
+cluster.waitActive();
+final DistributedFileSystem fs = cluster.getFileSystem();
+try {
+  final String file = /testScheduleWithinSameNode/file;
+  Path dir = new Path(/testScheduleWithinSameNode);
+  fs.mkdirs(dir);
+  // 2. Set Dir policy
+  fs.setStoragePolicy(dir, COLD);
+  // 3. Create file
+  final FSDataOutputStream out = fs.create(new Path(file));
+  out.writeChars(testScheduleWithinSameNode);
+  out.close();
+  // 4. Set Dir policy
+  fs.setStoragePolicy(dir, HOT);
+  HdfsFileStatus status = fs.getClient().getFileInfo(file);
+  // 5. get file policy, it should be parent policy.
+  Assert
+  .assertTrue(
+  File storage policy should be HOT,
+  status.getStoragePolicy() == HOT);
+  // 6. restart NameNode for reloading edits logs.
+  cluster.restartNameNode(true);
+  // 7. get file policy, it should be parent policy.
+  status = fs.getClient().getFileInfo(file);
+  Assert
+  .assertTrue(
+  File storage policy should be HOT,
+  status.getStoragePolicy() == HOT);
+
+} finally {
+  cluster.shutdown();
+}
+  }
 }



[30/43] hadoop git commit: HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)

2015-08-14 Thread sjlee
HDFS-8072. Reserved RBW space is not released if client terminates while 
writing block. (Arpit Agarwal)

(cherry picked from commit f0324738c9db4f45d2b1ec5cfb46c5f2b7669571)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de21de7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de21de7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de21de7e

Branch: refs/heads/sjlee/hdfs-merge
Commit: de21de7e2243ef8a89082121d838b88e3c10f05b
Parents: c3a3092
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 8 11:38:21 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 11:21:29 2015 -0700

--
 .../hdfs/server/datanode/BlockReceiver.java |  1 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  6 ++
 .../datanode/ReplicaInPipelineInterface.java|  5 ++
 .../server/datanode/SimulatedFSDataset.java |  4 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 67 +---
 5 files changed, 74 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de21de7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 75f1c36..2a6b46a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -808,6 +808,7 @@ class BlockReceiver implements Closeable {
   }
 
 } catch (IOException ioe) {
+  replicaInfo.releaseAllBytesReserved();
   if (datanode.isRestarting()) {
 // Do not throw if shutting down for restart. Otherwise, it will cause
 // premature termination of responder.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de21de7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 6a26640..cc55f85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -148,6 +148,12 @@ public class ReplicaInPipeline extends ReplicaInfo
 return bytesReserved;
   }
   
+  @Override
+  public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
+getVolume().releaseReservedSpace(bytesReserved);
+bytesReserved = 0;
+  }
+
   @Override // ReplicaInPipelineInterface
   public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] 
lastChecksum) {
 this.bytesOnDisk = dataLength;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de21de7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
index 7f08b81..0263d0f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
@@ -45,6 +45,11 @@ public interface ReplicaInPipelineInterface extends Replica {
   void setBytesAcked(long bytesAcked);
   
   /**
+   * Release any disk space reserved for this replica.
+   */
+  public void releaseAllBytesReserved();
+
+  /**
* store the checksum for the last chunk along with the data length
* @param dataLength number of bytes on disk
* @param lastChecksum - checksum bytes for the last chunk

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de21de7e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

[37/43] hadoop git commit: HDFS-8245. Standby namenode doesn't process DELETED_BLOCK if the addblock request is in edit log. Contributed by Rushabh S Shah. (cherry picked from commit 2d4ae3d18bc530fa9

2015-08-14 Thread sjlee
HDFS-8245. Standby namenode doesn't process DELETED_BLOCK if the addblock 
request is in edit log. Contributed by Rushabh S Shah.
(cherry picked from commit 2d4ae3d18bc530fa9f81ee616db8af3395705fb9)

(cherry picked from commit f264a5aeede7e144af11f5357c7f901993de8e12)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/470019e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/470019e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/470019e9

Branch: refs/heads/sjlee/hdfs-merge
Commit: 470019e9b88e0fcede926442b91d102b595c7ace
Parents: a776ef5
Author: Kihwal Lee kih...@apache.org
Authored: Fri May 8 16:37:26 2015 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 18:21:24 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 24 -
 .../server/datanode/TestBlockReplacement.java   | 97 
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  4 -
 3 files changed, 118 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/470019e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e271d55..bb54402 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2287,8 +2287,15 @@ public class BlockManager {
   if (LOG.isDebugEnabled()) {
 LOG.debug(Processing previouly queued message  + rbi);
   }
-  processAndHandleReportedBlock(rbi.getStorageInfo(), 
-  rbi.getBlock(), rbi.getReportedState(), null);
+  if (rbi.getReportedState() == null) {
+// This is a DELETE_BLOCK request
+DatanodeStorageInfo storageInfo = rbi.getStorageInfo();
+removeStoredBlock(rbi.getBlock(),
+storageInfo.getDatanodeDescriptor());
+  } else {
+processAndHandleReportedBlock(rbi.getStorageInfo(),
+rbi.getBlock(), rbi.getReportedState(), null);
+  }
 }
   }
   
@@ -2984,6 +2991,17 @@ public class BlockManager {
 }
   }
 
+  private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
+  DatanodeDescriptor node) {
+if (shouldPostponeBlocksFromFuture 
+namesystem.isGenStampInFuture(block)) {
+  queueReportedBlock(storageInfo, block, null,
+  QUEUE_REASON_FUTURE_GENSTAMP);
+  return;
+}
+removeStoredBlock(block, node);
+  }
+
   /**
* Modify (block--datanode) map. Possibly generate replication tasks, if the
* removed block is still valid.
@@ -3171,7 +3189,7 @@ public class BlockManager {
 for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
   switch (rdbi.getStatus()) {
   case DELETED_BLOCK:
-removeStoredBlock(rdbi.getBlock(), node);
+removeStoredBlock(storageInfo, rdbi.getBlock(), node);
 deleted++;
 break;
   case RECEIVED_BLOCK:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470019e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index e0d7964..86b77d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -42,7 +42,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -51,8 +53,11 @@ import

[22/43] hadoop git commit: HDFS-7830. DataNode does not release the volume lock when adding a volume fails. (Lei Xu via Colin P. McCabe)

2015-08-14 Thread sjlee
HDFS-7830. DataNode does not release the volume lock when adding a volume 
fails. (Lei Xu via Colin P. McCabe)

(cherry picked from commit 5c1036d598051cf6af595740f1ab82092b0b6554)
(cherry picked from commit eefca23e8c5e474de1e25bf2ec8a5b266bbe8cfe)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c723f3b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c723f3b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c723f3b1

Branch: refs/heads/sjlee/hdfs-merge
Commit: c723f3b1bd9eab261ab5edca33c4dae5ce3d0d30
Parents: 65ae3e2
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Mar 10 18:20:25 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 00:06:16 2015 -0700

--
 .../hadoop/hdfs/server/common/Storage.java  |  2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 ++-
 .../datanode/TestDataNodeHotSwapVolumes.java| 34 ++
 .../fsdataset/impl/FsDatasetTestUtil.java   | 49 
 .../fsdataset/impl/TestFsDatasetImpl.java   | 41 
 5 files changed, 109 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c723f3b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 14b52ce..8d0129a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -672,7 +672,7 @@ public abstract class Storage extends StorageInfo {
  */
 public void lock() throws IOException {
   if (isShared()) {
-LOG.info(Locking is disabled);
+LOG.info(Locking is disabled for  + this.root);
 return;
   }
   FileLock newLock = tryLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c723f3b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cbcf6b8..f24d644 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -46,6 +46,7 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
@@ -322,6 +323,12 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 LOG.info(Added volume -  + dir + , StorageType:  + storageType);
   }
 
+  @VisibleForTesting
+  public FsVolumeImpl createFsVolume(String storageUuid, File currentDir,
+  StorageType storageType) throws IOException {
+return new FsVolumeImpl(this, storageUuid, currentDir, conf, storageType);
+  }
+
   @Override
   public void addVolume(final StorageLocation location,
   final ListNamespaceInfo nsInfos)
@@ -335,8 +342,8 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 final Storage.StorageDirectory sd = builder.getStorageDirectory();
 
 StorageType storageType = location.getStorageType();
-final FsVolumeImpl fsVolume = new FsVolumeImpl(
-this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
+final FsVolumeImpl fsVolume =
+createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
 final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
 ArrayListIOException exceptions = Lists.newArrayList();
 
@@ -352,6 +359,11 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl

[39/43] hadoop git commit: HDFS-8431. hdfs crypto class not found in Windows. Contributed by Anu Engineer.

2015-08-14 Thread sjlee
HDFS-8431. hdfs crypto class not found in Windows. Contributed by Anu Engineer.

(cherry picked from commit 50eeea13000f0c82e0567410f0f8b611248f8c1b)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd

(cherry picked from commit 25db34127811fbadb9a698fa3a76e24d426fb0f6)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77a10e76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77a10e76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77a10e76

Branch: refs/heads/sjlee/hdfs-merge
Commit: 77a10e76e99c14cd26ebb3664304f6ed9cc7bf65
Parents: 2d5e60f
Author: cnauroth cnaur...@apache.org
Authored: Wed May 27 22:54:00 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 18:41:50 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77a10e76/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 69424ed..453a023 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -47,7 +47,7 @@ if %1 == --config (
   goto print_usage
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies crypto
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -159,6 +159,10 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
   goto :eof
 
+:crypto
+  set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if %1 == --config (
@@ -207,6 +211,7 @@ goto :eof
   @echo   lsSnapshottableDir   list all snapshottable dirs owned by the 
current user
   @echoUse -help to see options
   @echo   cacheadmin   configure the HDFS cache
+  @echo   crypto   configure HDFS encryption zones
   @echo   moverrun a utility to move block replicas across 
storage types
   @echo   storagepolicies  get all the existing block storage policies
   @echo.



[43/43] hadoop git commit: HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write files rather than the entire DFSClient. (mingma)

2015-08-14 Thread sjlee
HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write 
files rather than the entire DFSClient. (mingma)

(cherry picked from commit fbd88f1062f3c4b208724d208e3f501eb196dfab)
(cherry picked from commit 516bbf1c20547dc513126df0d9f0934bb65c10c7)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb1bf424
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb1bf424
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb1bf424

Branch: refs/heads/sjlee/hdfs-merge
Commit: fb1bf424bdad20fff7ab390ce75c4bec558e7e6d
Parents: e1b4e69
Author: Ming Ma min...@apache.org
Authored: Thu Jul 16 12:33:57 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Fri Aug 14 00:06:13 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 16 +
 .../org/apache/hadoop/hdfs/LeaseRenewer.java| 12 +++-
 .../hadoop/hdfs/TestDFSClientRetries.java   | 66 +++-
 3 files changed, 76 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1bf424/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ad24a0d..20f9d00 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -903,23 +903,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   void closeConnectionToNamenode() {
 RPC.stopProxy(namenode);
   }
-  
-  /** Abort and release resources held.  Ignore all errors. */
-  void abort() {
-clientRunning = false;
-closeAllFilesBeingWritten(true);
-try {
-  // remove reference to this client and stop the renewer,
-  // if there is no more clients under the renewer.
-  getLeaseRenewer().closeClient(this);
-} catch (IOException ioe) {
-   LOG.info(Exception occurred while aborting the client  + ioe);
-}
-closeConnectionToNamenode();
-  }
 
   /** Close/abort all files being written. */
-  private void closeAllFilesBeingWritten(final boolean abort) {
+  public void closeAllFilesBeingWritten(final boolean abort) {
 for(;;) {
   final long inodeId;
   final DFSOutputStream out;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1bf424/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
index f8f337c..855b539 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
@@ -211,6 +211,12 @@ class LeaseRenewer {
 return renewal;
   }
 
+  /** Used for testing only. */
+  @VisibleForTesting
+  public synchronized void setRenewalTime(final long renewal) {
+this.renewal = renewal;
+  }
+
   /** Add a client. */
   private synchronized void addClient(final DFSClient dfsc) {
 for(DFSClient c : dfsclients) {
@@ -450,8 +456,12 @@ class LeaseRenewer {
   + (elapsed/1000) +  seconds.  Aborting ..., ie);
   synchronized (this) {
 while (!dfsclients.isEmpty()) {
-  dfsclients.get(0).abort();
+  DFSClient dfsClient = dfsclients.get(0);
+  dfsClient.closeAllFilesBeingWritten(true);
+  closeClient(dfsClient);
 }
+//Expire the current LeaseRenewer thread.
+emptyTime = 0;
   }
   break;
 } catch (IOException ie) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1bf424/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 382ad48..0a39cb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b

[34/43] hadoop git commit: HDFS-7980. Incremental BlockReport will dramatically slow down namenode startup. Contributed by Walter Su

2015-08-14 Thread sjlee
HDFS-7980. Incremental BlockReport will dramatically slow down namenode 
startup.  Contributed by Walter Su

(cherry picked from commit 4e1f2eb3955a97a70cf127dc97ae49201a90f5e0)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a28c6a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a28c6a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a28c6a3

Branch: refs/heads/sjlee/hdfs-merge
Commit: 5a28c6a37cab5f1061b6ed9536341da537d51b5a
Parents: b054cb6
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu May 7 11:36:35 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 16:32:37 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a28c6a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e5d97d1..e271d55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1815,7 +1815,7 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  if (storageInfo.numBlocks() == 0) {
+  if (storageInfo.getBlockReportCount() == 0) {
 // The first block report can be processed a lot more efficiently than
 // ordinary block reports.  This shortens restart times.
 processFirstBlockReport(storageInfo, newReport);
@@ -2038,7 +2038,7 @@ public class BlockManager {
   final BlockListAsLongs report) throws IOException {
 if (report == null) return;
 assert (namesystem.hasWriteLock());
-assert (storageInfo.numBlocks() == 0);
+assert (storageInfo.getBlockReportCount() == 0);
 BlockReportIterator itBR = report.getBlockReportIterator();
 
 while(itBR.hasNext()) {
@@ -2451,14 +2451,14 @@ public class BlockManager {
 }
 
 // just add it
-storageInfo.addBlock(storedBlock);
+boolean added = storageInfo.addBlock(storedBlock);
 
 // Now check for completion of blocks and safe block count
 int numCurrentReplica = countLiveNodes(storedBlock);
 if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
  numCurrentReplica = minReplication) {
   completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
-} else if (storedBlock.isComplete()) {
+} else if (storedBlock.isComplete()  added) {
   // check whether safe replication is reached for the block
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()



[28/43] hadoop git commit: HDFS-7742. Favoring decommissioning node for replication can cause a block to stay underreplicated for long periods. Contributed by Nathan Roberts. (cherry picked from commi

2015-08-14 Thread sjlee
HDFS-7742. Favoring decommissioning node for replication can cause a block to 
stay
underreplicated for long periods. Contributed by Nathan Roberts.
(cherry picked from commit 04ee18ed48ceef34598f954ff40940abc9fde1d2)

(cherry picked from commit c4cedfc1d601127430c70ca8ca4d4e2ee2d1003d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6b68a82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6b68a82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6b68a82

Branch: refs/heads/sjlee/hdfs-merge
Commit: c6b68a82adea8de488b255594d35db8e01f5fc8f
Parents: 03d4af3
Author: Kihwal Lee kih...@apache.org
Authored: Mon Mar 30 10:11:25 2015 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 10:58:04 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 10 ++---
 .../blockmanagement/TestBlockManager.java   | 42 
 2 files changed, 47 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b68a82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 69f3e46..e5d97d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1652,7 +1652,8 @@ public class BlockManager {
   // If so, do not select the node as src node
   if ((nodesCorrupt != null)  nodesCorrupt.contains(node))
 continue;
-  if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY
+  if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY 
+   !node.isDecommissionInProgress() 
node.getNumberOfBlocksToBeReplicated() = maxReplicationStreams)
   {
 continue; // already reached replication limit
@@ -1667,13 +1668,12 @@ public class BlockManager {
   // never use already decommissioned nodes
   if(node.isDecommissioned())
 continue;
-  // we prefer nodes that are in DECOMMISSION_INPROGRESS state
-  if(node.isDecommissionInProgress() || srcNode == null) {
+
+  // We got this far, current node is a reasonable choice
+  if (srcNode == null) {
 srcNode = node;
 continue;
   }
-  if(srcNode.isDecommissionInProgress())
-continue;
   // switch to a different node randomly
   // this to prevent from deterministically selecting the same node even
   // if the node failed to replicate the block on previous iterations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b68a82/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index ddb6143..7eec52d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -536,6 +536,48 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testFavorDecomUntilHardLimit() throws Exception {
+bm.maxReplicationStreams = 0;
+bm.replicationStreamsHardLimit = 1;
+
+long blockId = 42; // arbitrary
+Block aBlock = new Block(blockId, 0, 0);
+ListDatanodeDescriptor origNodes = getNodes(0, 1);
+// Add the block to the first node.
+addBlockOnNodes(blockId,origNodes.subList(0,1));
+origNodes.get(0).startDecommission();
+
+ListDatanodeDescriptor cntNodes = new LinkedListDatanodeDescriptor();
+ListDatanodeStorageInfo liveNodes = new 
LinkedListDatanodeStorageInfo();
+
+assertNotNull(Chooses decommissioning source node for a normal 
replication
++  if all available source nodes have reached their replication
++  limits below the hard limit.,
+bm.chooseSourceDatanode(
+aBlock,
+cntNodes,
+liveNodes,
+new NumberReplicas(),
+UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
+
+
+// Increase the replication count to test replication count  hard limit

[25/43] hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

2015-08-14 Thread sjlee
HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

(cherry picked from commit 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c9a7461
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c9a7461
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c9a7461

Branch: refs/heads/sjlee/hdfs-merge
Commit: 2c9a7461ec2ceba5885e95bc79f8dcbfd198df60
Parents: 0379841
Author: yliu y...@apache.org
Authored: Thu Mar 19 23:24:55 2015 +0800
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 09:58:07 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 41 
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +++-
 2 files changed, 47 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c9a7461/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d26cc52..5a38351 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1931,6 +1931,47 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfo block, 
+  long oldGenerationStamp, long oldNumBytes, 
+  DatanodeStorageInfo[] newStorages) throws IOException {
+assert namesystem.hasWriteLock();
+BlockToMarkCorrupt b = null;
+if (block.getGenerationStamp() != oldGenerationStamp) {
+  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  genstamp does not match  + oldGenerationStamp
+  +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+} else if (block.getNumBytes() != oldNumBytes) {
+  b = new BlockToMarkCorrupt(block,
+  length does not match  + oldNumBytes
+  +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
+} else {
+  return;
+}
+
+for (DatanodeStorageInfo storage : getStorages(block)) {
+  boolean isCorrupt = true;
+  if (newStorages != null) {
+for (DatanodeStorageInfo newStorage : newStorages) {
+  if (newStorage!= null  storage.equals(newStorage)) {
+isCorrupt = false;
+break;
+  }
+}
+  }
+  if (isCorrupt) {
+blockLog.info(BLOCK* markBlockReplicasAsCorrupt: mark block replica +
+b +  on  + storage.getDatanodeDescriptor() +
+ as corrupt because the dn is not in the new committed  +
+storage list.);
+markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+  }
+}
+  }
+
+  /**
* processFirstBlockReport is intended only for processing initial block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c9a7461/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c92b431..fa52981 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4791,6 +4791,8 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
   throw new IOException(Block (= + lastblock + ) not found);
 }
   }
+  final long oldGenerationStamp = storedBlock.getGenerationStamp();
+  final long oldNumBytes = storedBlock.getNumBytes

[35/43] hadoop git commit: HDFS-7894. Rolling upgrade readiness is not updated in jmx until query command is issued. Contributed by Brahma Reddy Battula. (cherry picked from commit 6f622672b62aa8d7190

2015-08-14 Thread sjlee
HDFS-7894. Rolling upgrade readiness is not updated in jmx until query command 
is issued. Contributed by Brahma Reddy Battula.
(cherry picked from commit 6f622672b62aa8d719060063ef0e47480cdc8655)

(cherry picked from commit 802a5775f3522c57c60ae29ecb9533dbbfecfe76)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/995382c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/995382c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/995382c5

Branch: refs/heads/sjlee/hdfs-merge
Commit: 995382c5234ad6c07f327e5d1f2a1c7e391a0b60
Parents: 5a28c6a
Author: Kihwal Lee kih...@apache.org
Authored: Fri May 8 09:32:07 2015 -0500
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 16:35:27 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 23 ++--
 1 file changed, 21 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/995382c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5f396f7..2c6a65d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8417,11 +8417,30 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 
   @Override  // NameNodeMXBean
   public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
+if (!isRollingUpgrade()) {
+  return null;
+}
 RollingUpgradeInfo upgradeInfo = getRollingUpgradeInfo();
-if (upgradeInfo != null) {
+if (upgradeInfo.createdRollbackImages()) {
   return new RollingUpgradeInfo.Bean(upgradeInfo);
 }
-return null;
+readLock();
+try {
+  // check again after acquiring the read lock.
+  upgradeInfo = getRollingUpgradeInfo();
+  if (upgradeInfo == null) {
+return null;
+  }
+  if (!upgradeInfo.createdRollbackImages()) {
+boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage();
+upgradeInfo.setCreatedRollbackImages(hasRollbackImage);
+  }
+} catch (IOException ioe) {
+  LOG.warn(Encountered exception setting Rollback Image, ioe);
+} finally {
+  readUnlock();
+}
+return new RollingUpgradeInfo.Bean(upgradeInfo);
   }
 
   /** Is rolling upgrade in progress? */



[40/43] hadoop git commit: HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits. Contributed by Ming Ma.

2015-08-14 Thread sjlee
HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits. 
Contributed by Ming Ma.

(cherry picked from commit 7817674a3a4d097b647dd77f1345787dd376d5ea)
(cherry picked from commit 17fb442a4c4e43105374c97fccd68dd966729a19)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fad2a062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fad2a062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fad2a062

Branch: refs/heads/sjlee/hdfs-merge
Commit: fad2a062ddbb955a42dd5a90d64781617287f8df
Parents: 77a10e7
Author: Jing Zhao ji...@apache.org
Authored: Fri May 29 11:05:13 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Aug 13 23:33:31 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 18 --
 .../hdfs/server/namenode/NameNodeRpcServer.java | 20 +++
 .../namenode/ha/TestRetryCacheWithHA.java   | 37 ++--
 3 files changed, 55 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad2a062/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2c6a65d..19edbb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2003,7 +2003,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 
 HdfsFileStatus resultingStat = null;
 FSPermissionChecker pc = getPermissionChecker();
-checkOperation(OperationCategory.WRITE);
 waitForLoadingFSImage();
 writeLock();
 try {
@@ -2563,7 +2562,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 boolean skipSync = false;
 HdfsFileStatus stat = null;
 FSPermissionChecker pc = getPermissionChecker();
-checkOperation(OperationCategory.WRITE);
 if (blockSize  minBlockSize) {
   throw new IOException(Specified block size is less than configured +
minimum value ( + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
@@ -3137,7 +3135,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 
 LocatedBlock lb = null;
 FSPermissionChecker pc = getPermissionChecker();
-checkOperation(OperationCategory.WRITE);
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 writeLock();
 try {
@@ -3806,7 +3803,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
   throw new IOException(Invalid name:  + dst);
 }
 FSPermissionChecker pc = getPermissionChecker();
-checkOperation(OperationCategory.WRITE);
 byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
 byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
 boolean status = false;
@@ -3879,7 +3875,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 }
 final FSPermissionChecker pc = getPermissionChecker();
 
-checkOperation(OperationCategory.WRITE);
 CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
 if (cacheEntry != null  cacheEntry.isSuccess()) {
   return; // Return previous response
@@ -4003,7 +3998,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
 ListINode removedINodes = new ChunkedArrayListINode();
 FSPermissionChecker pc = getPermissionChecker();
-checkOperation(OperationCategory.WRITE);
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 boolean ret = false;
 
@@ -7048,7 +7042,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
   void updatePipeline(String clientName, ExtendedBlock oldBlock, 
   ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
   throws IOException {
-checkOperation(OperationCategory.WRITE);
 CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
 if (cacheEntry != null  cacheEntry.isSuccess()) {
   return; // Return previous response
@@ -8141,7 +8134,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats

[17/43] hadoop git commit: HDFS-7009. Active NN and standby NN have different live nodes. Contributed by Ming Ma.

2015-08-14 Thread sjlee
HDFS-7009. Active NN and standby NN have different live nodes. Contributed by 
Ming Ma.

(cherry picked from commit 769507bd7a501929d9a2fd56c72c3f50673488a4)
(cherry picked from commit 657a6e389b3f6eae43efb11deb6253c3b1255a51)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5ddc345
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5ddc345
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5ddc345

Branch: refs/heads/sjlee/hdfs-merge
Commit: d5ddc3450f2f49ea411de590ff3de15b5ec4e17c
Parents: 1faa44d
Author: cnauroth cnaur...@apache.org
Authored: Mon Feb 23 15:12:27 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:19:33 2015 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java |   3 +-
 .../TestDatanodeProtocolRetryPolicy.java| 231 +++
 2 files changed, 233 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ddc345/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 96da01c..8a98eb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -25,6 +25,7 @@ import java.io.BufferedOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.EOFException;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -279,7 +280,7 @@ public class Client {
   /** Check the rpc response header. */
   void checkResponse(RpcResponseHeaderProto header) throws IOException {
 if (header == null) {
-  throw new IOException(Response is null.);
+  throw new EOFException(Response is null.);
 }
 if (header.hasClientId()) {
   // check client IDs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ddc345/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
new file mode 100644
index 000..c7ed5b9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.EOFException;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+
+import com.google.common.base.Supplier;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import

[07/43] hadoop git commit: HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin P. McCabe) (cherry picked from commit 3b173d95171d01ab55042b1162569d1cf14a8d43)

2015-08-14 Thread sjlee
HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin P. 
McCabe)
(cherry picked from commit 3b173d95171d01ab55042b1162569d1cf14a8d43)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

(cherry picked from commit dda1fc169db2e69964cca746be4ff8965eb8b56f)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba28192f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba28192f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba28192f

Branch: refs/heads/sjlee/hdfs-merge
Commit: ba28192f9d5a8385283bd717bca494e6981d378f
Parents: 418bd16
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Dec 17 16:41:59 2014 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:11:55 2015 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  28 ++--
 .../datanode/fsdataset/impl/FsVolumeList.java   | 138 +--
 .../fsdataset/impl/TestFsDatasetImpl.java   |  70 +-
 3 files changed, 174 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba28192f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index e7fa6d7..0d9f096 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -127,7 +127,7 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 
   @Override // FsDatasetSpi
   public ListFsVolumeImpl getVolumes() {
-return volumes.volumes;
+return volumes.getVolumes();
   }
 
   @Override
@@ -140,9 +140,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
   throws IOException {
 StorageReport[] reports;
 synchronized (statsLock) {
-  reports = new StorageReport[volumes.volumes.size()];
+  ListFsVolumeImpl curVolumes = getVolumes();
+  reports = new StorageReport[curVolumes.size()];
   int i = 0;
-  for (FsVolumeImpl volume : volumes.volumes) {
+  for (FsVolumeImpl volume : curVolumes) {
 reports[i++] = new StorageReport(volume.toDatanodeStorage(),
  false,
  volume.getCapacity(),
@@ -1322,7 +1323,8 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
 MapString, ArrayListReplicaInfo uc =
 new HashMapString, ArrayListReplicaInfo();
 
-for (FsVolumeSpi v : volumes.volumes) {
+ListFsVolumeImpl curVolumes = getVolumes();
+for (FsVolumeSpi v : curVolumes) {
   finalized.put(v.getStorageID(), new ArrayListReplicaInfo());
   uc.put(v.getStorageID(), new ArrayListReplicaInfo());
 }
@@ -1349,7 +1351,7 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
   }
 }
 
-for (FsVolumeSpi v : volumes.volumes) {
+for (FsVolumeImpl v : curVolumes) {
   ArrayListReplicaInfo finalizedList = finalized.get(v.getStorageID());
   ArrayListReplicaInfo ucList = uc.get(v.getStorageID());
   blockReportsMap.put(((FsVolumeImpl) v).toDatanodeStorage(),
@@ -,7 +2224,7 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
 
   private CollectionVolumeInfo getVolumeInfo() {
 CollectionVolumeInfo info = new ArrayListVolumeInfo();
-for (FsVolumeImpl volume : volumes.volumes) {
+for (FsVolumeImpl volume : getVolumes()) {
   long used = 0;
   long free = 0;
   try {
@@ -2256,8 +2258,9 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
   @Override //FsDatasetSpi
   public synchronized void deleteBlockPool(String bpid, boolean force)
   throws IOException {
+ListFsVolumeImpl curVolumes = getVolumes();
 if (!force) {
-  for (FsVolumeImpl volume : volumes.volumes) {
+  for (FsVolumeImpl volume : curVolumes) {
 if (!volume.isBPDirEmpty(bpid)) {
   LOG.warn(bpid

[09/43] hadoop git commit: reverted CHANGES.txt for HDFS-7225.

2015-08-14 Thread sjlee
reverted CHANGES.txt for HDFS-7225.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/084674aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/084674aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/084674aa

Branch: refs/heads/sjlee/hdfs-merge
Commit: 084674aa28e841a68d97cec98289d1ad137ece6c
Parents: 33fb7b4
Author: Sangjin Lee sj...@apache.org
Authored: Wed Aug 12 22:16:27 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:16:27 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/084674aa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cc4d2ab..47ec910 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,9 +35,6 @@ Release 2.6.1 - UNRELEASED
 
 HDFS-8486. DN startup may cause severe data loss. (daryn via cmccabe)
 
-HDFS-7225. Remove stale block invalidation work when DN re-registers with
-different UUID. (Zhe Zhang and Andrew Wang)
-
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[16/43] hadoop git commit: HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah. (cherry picked from commit 7ae5255a16

2015-08-14 Thread sjlee
HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes 
created with an old release. Contributed by Rushabh Shah.
(cherry picked from commit 7ae5255a1613ccfb43646f33eabacf1062c86e93)

(cherry picked from commit b9157f92fc3e008e4f3029f8feeaf6acb52eb76f)

Conflicts:
  
hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz
  
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1faa44d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1faa44d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1faa44d8

Branch: refs/heads/sjlee/hdfs-merge
Commit: 1faa44d8f4d7b944e99dd0470ea2638c7653a131
Parents: c1e65de
Author: Kihwal Lee kih...@apache.org
Authored: Fri Feb 20 09:09:56 2015 -0600
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:15:07 2015 -0700

--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   3 ++
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   4 ++
 .../resources/image-with-zero-block-size.tar.gz | Bin 0 - 1378 bytes
 .../hdfs/server/namenode/TestFSImage.java   |  48 +++
 4 files changed, 55 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1faa44d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 5136f8b..1dd6da3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -103,6 +103,9 @@ public class INodeFile extends INodeWithAdditionalFields
 static long toLong(long preferredBlockSize, short replication,
 byte storagePolicyID) {
   long h = 0;
+  if (preferredBlockSize == 0) {
+preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
+  }
   h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
   h = REPLICATION.BITS.combine(replication, h);
   h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1faa44d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 863d9f7..9399d84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable {
 }
 return (record  ~MASK) | (value  OFFSET);
   }
+  
+  public long getMin() {
+return MIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1faa44d8/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz
new file mode 100644
index 000..41f3105
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/image-with-zero-block-size.tar.gz
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1faa44d8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index f21834e..d19980c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -28,10 +28,13 @@ import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import

[06/43] hadoop git commit: HDFS-7446. HDFS inotify should have the ability to determine what txid it has read up to (cmccabe) (cherry picked from commit 75a326aaff8c92349701d9b3473c3070b8c2be44)

2015-08-14 Thread sjlee
HDFS-7446. HDFS inotify should have the ability to determine what txid it has 
read up to (cmccabe)
(cherry picked from commit 75a326aaff8c92349701d9b3473c3070b8c2be44)

(cherry picked from commit 06552a15d5172a2b0ad3d61aa7f9a849857385aa)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/418bd16e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/418bd16e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/418bd16e

Branch: refs/heads/sjlee/hdfs-merge
Commit: 418bd16eaea26e647318db74fd2f42c0d5758a3c
Parents: 014d07d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Nov 25 17:44:34 2014 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:46:01 2015 -0700

--
 .../hadoop/hdfs/DFSInotifyEventInputStream.java |  65 ++--
 .../apache/hadoop/hdfs/inotify/EventBatch.java  |  41 +++
 .../hadoop/hdfs/inotify/EventBatchList.java |  63 
 .../apache/hadoop/hdfs/inotify/EventsList.java  |  63 
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +-
 .../ClientNamenodeProtocolTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 341 ++-
 .../namenode/InotifyFSEditLogOpTranslator.java  |  74 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  23 +-
 .../hadoop-hdfs/src/main/proto/inotify.proto|  10 +-
 .../hdfs/TestDFSInotifyEventInputStream.java| 209 +++-
 11 files changed, 513 insertions(+), 388 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/418bd16e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
index 73c5f55..83b92b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInotifyEventInputStream.java
@@ -19,11 +19,10 @@
 package org.apache.hadoop.hdfs;
 
 import com.google.common.collect.Iterators;
-import com.google.common.util.concurrent.UncheckedExecutionException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.inotify.Event;
-import org.apache.hadoop.hdfs.inotify.EventsList;
+import org.apache.hadoop.hdfs.inotify.EventBatch;
+import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.inotify.MissingEventsException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.util.Time;
@@ -33,13 +32,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
 
 /**
  * Stream for reading inotify events. DFSInotifyEventInputStreams should not
@@ -52,7 +45,7 @@ public class DFSInotifyEventInputStream {
   .class);
 
   private final ClientProtocol namenode;
-  private IteratorEvent it;
+  private IteratorEventBatch it;
   private long lastReadTxid;
   /**
* The most recent txid the NameNode told us it has sync'ed -- helps us
@@ -78,22 +71,22 @@ public class DFSInotifyEventInputStream {
   }
 
   /**
-   * Returns the next event in the stream or null if no new events are 
currently
-   * available.
+   * Returns the next batch of events in the stream or null if no new
+   * batches are currently available.
*
* @throws IOException because of network error or edit log
* corruption. Also possible if JournalNodes are unresponsive in the
* QJM setting (even one unresponsive JournalNode is enough in rare cases),
* so catching this exception and retrying at least a few times is
* recommended.
-   * @throws MissingEventsException if we cannot return the next event in the
-   * stream because the data for the event (and possibly some subsequent 
events)
-   * has been deleted (generally because this stream is a very large number of
-   * events behind the current state of the NameNode). It is safe to continue
-   * reading from the stream after this exception is thrown -- the next
-   * available event will be returned.
+   * @throws

[05/43] hadoop git commit: HDFS-7225. Remove stale block invalidation work when DN re-registers with different UUID. (Zhe Zhang and Andrew Wang)

2015-08-14 Thread sjlee
HDFS-7225. Remove stale block invalidation work when DN re-registers with 
different UUID. (Zhe Zhang and Andrew Wang)

(cherry picked from commit 406c09ad1150c4971c2b7675fcb0263d40517fbf)
(cherry picked from commit 2e15754a92c6589308ccbbb646166353cc2f2456)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/014d07de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/014d07de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/014d07de

Branch: refs/heads/sjlee/hdfs-merge
Commit: 014d07de2e9b39be4b6793f0e09fcf8548570ad5
Parents: d79a584
Author: Andrew Wang w...@apache.org
Authored: Tue Nov 18 22:14:04 2014 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 21:32:30 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|  21 ++-
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../TestComputeInvalidateWork.java  | 167 +++
 4 files changed, 156 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/014d07de/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 47ec910..cc4d2ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.6.1 - UNRELEASED
 
 HDFS-8486. DN startup may cause severe data loss. (daryn via cmccabe)
 
+HDFS-7225. Remove stale block invalidation work when DN re-registers with
+different UUID. (Zhe Zhang and Andrew Wang)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/014d07de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 17112bf..d26cc52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1112,6 +1112,18 @@ public class BlockManager {
   }
 
   /**
+   * Remove all block invalidation tasks under this datanode UUID;
+   * used when a datanode registers with a new UUID and the old one
+   * is wiped.
+   */
+  void removeFromInvalidates(final DatanodeInfo datanode) {
+if (!namesystem.isPopulatingReplQueues()) {
+  return;
+}
+invalidateBlocks.remove(datanode);
+  }
+
+  /**
* Mark the block belonging to datanode as corrupt
* @param blk Block to be marked as corrupt
* @param dn Datanode which holds the corrupt replica
@@ -3395,7 +3407,14 @@ public class BlockManager {
 return 0;
   }
   try {
-toInvalidate = 
invalidateBlocks.invalidateWork(datanodeManager.getDatanode(dn));
+DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
+if (dnDescriptor == null) {
+  LOG.warn(DataNode  + dn +  cannot be found with UUID  +
+  dn.getDatanodeUuid() + , removing block invalidation work.);
+  invalidateBlocks.remove(dn);
+  return 0;
+}
+toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);
 
 if (toInvalidate == null) {
   return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/014d07de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 6a52349..80965b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -593,6 +593,8 @@ public class DatanodeManager {
 synchronized (datanodeMap) {
   host2DatanodeMap.remove(datanodeMap.remove(key));
 }
+// Also remove all block invalidation tasks under this node
+blockManager.removeFromInvalidates(new DatanodeInfo(node));
 if (LOG.isDebugEnabled()) {
   LOG.debug

[19/43] hadoop git commit: HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' message. Contributed by Jing Zhao.

2015-08-14 Thread sjlee
HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' 
message. Contributed by Jing Zhao.

(cherry picked from commit b442aeec95abfa1c6f835a116dfe6e186b0d841d)
(cherry picked from commit 6090f51725e2b44d794433ed72a1901fae2ba7e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1af1ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1af1ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1af1ac4

Branch: refs/heads/sjlee/hdfs-merge
Commit: e1af1ac4e91d36b21df18ce5627e1f69f27f0776
Parents: fd70e4d
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 20:22:04 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 23:30:57 2015 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1af1ac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8e5a2db..5541637 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5203,14 +5203,16 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 + rollThreshold);
 rollEditLog();
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(Swallowing exception in 
+  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
+}
+try {
   Thread.sleep(sleepIntervalMs);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
   +  was interrupted, exiting);
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(Swallowing exception in 
-  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
 }
   }
 }



[10/43] hadoop git commit: HDFS-7182. JMX metrics aren't accessible when NN is busy. Contributed by Ming Ma.

2015-08-14 Thread sjlee
HDFS-7182. JMX metrics aren't accessible when NN is busy. Contributed by Ming 
Ma.

(cherry picked from commit 4b589e7cfa27bd042e228bbbcf1c3b75b2aeaa57)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96f0813c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96f0813c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96f0813c

Branch: refs/heads/sjlee/hdfs-merge
Commit: 96f0813c5d6140aabe7b2837f30971936276e689
Parents: 084674a
Author: Jing Zhao ji...@apache.org
Authored: Fri Jan 9 17:35:57 2015 -0800
Committer: Sangjin Lee sj...@apache.org
Committed: Wed Aug 12 22:19:28 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 ++---
 .../server/namenode/TestFSNamesystemMBean.java  | 69 +---
 2 files changed, 23 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96f0813c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9e38195..7077b68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -421,7 +421,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 
   private String nameserviceId;
 
-  private RollingUpgradeInfo rollingUpgradeInfo = null;
+  private volatile RollingUpgradeInfo rollingUpgradeInfo = null;
   /**
* A flag that indicates whether the checkpointer should checkpoint a 
rollback
* fsimage. The edit log tailer sets this flag. The checkpoint will create a
@@ -8355,16 +8355,11 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
 
   @Override  // NameNodeMXBean
   public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
-readLock();
-try {
-  RollingUpgradeInfo upgradeInfo = getRollingUpgradeInfo();
-  if (upgradeInfo != null) {
-return new RollingUpgradeInfo.Bean(upgradeInfo);
-  }
-  return null;
-} finally {
-  readUnlock();
+RollingUpgradeInfo upgradeInfo = getRollingUpgradeInfo();
+if (upgradeInfo != null) {
+  return new RollingUpgradeInfo.Bean(upgradeInfo);
 }
+return null;
   }
 
   /** Is rolling upgrade in progress? */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96f0813c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index 39e1165..c044fb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -17,11 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
 
 import java.lang.management.ManagementFactory;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanInfo;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
@@ -51,66 +56,28 @@ public class TestFSNamesystemMBean {
 // come from hadoop metrics framework for the class FSNamesystem.
 ObjectName mxbeanNamefsn = new ObjectName(
 Hadoop:service=NameNode,name=FSNamesystem);
-Integer blockCapacity = (Integer) (mbs.getAttribute(mxbeanNamefsn,
-BlockCapacity));
 
 // Metrics that belong to FSNamesystemState.
 // These are metrics that FSNamesystem registers directly with 
MBeanServer.
 ObjectName mxbeanNameFsns = new ObjectName(
 Hadoop:service=NameNode,name=FSNamesystemState);
-String FSState = (String) (mbs.getAttribute(mxbeanNameFsns,
-FSState));
-Long blocksTotal = (Long) (mbs.getAttribute(mxbeanNameFsns,
-BlocksTotal));
-Long capacityTotal = (Long

[2/2] hadoop git commit: YARN-3049. [Storage Implementation] Implement storage reader interface to fetch raw data from HBase backend (Zhijie Shen via sjlee)

2015-08-07 Thread sjlee
YARN-3049. [Storage Implementation] Implement storage reader interface to fetch 
raw data from HBase backend (Zhijie Shen via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07433c2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07433c2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07433c2a

Branch: refs/heads/YARN-2928
Commit: 07433c2ad52df9e844dbd90020c277d3df844dcd
Parents: 895ccfa
Author: Sangjin Lee sj...@apache.org
Authored: Fri Aug 7 10:00:22 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Fri Aug 7 10:00:22 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   6 +
 .../records/timelineservice/TimelineEntity.java |   9 +-
 .../storage/FileSystemTimelineReaderImpl.java   | 164 +++
 .../storage/HBaseTimelineReaderImpl.java| 424 +++
 .../storage/HBaseTimelineWriterImpl.java|  43 +-
 .../storage/TimelineSchemaCreator.java  |  12 +
 .../storage/apptoflow/AppToFlowColumn.java  | 126 ++
 .../apptoflow/AppToFlowColumnFamily.java|  51 +++
 .../storage/apptoflow/AppToFlowRowKey.java  |  39 ++
 .../storage/apptoflow/AppToFlowTable.java   | 110 +
 .../storage/apptoflow/package-info.java |  23 +
 .../storage/common/BaseTable.java   |  16 +
 .../storage/common/ColumnPrefix.java|   2 +-
 .../common/TimelineEntitySchemaConstants.java   |  68 ---
 .../common/TimelineHBaseSchemaConstants.java|  68 +++
 .../storage/common/TimelineReaderUtils.java | 112 +
 .../storage/entity/EntityColumn.java|   2 +-
 .../storage/entity/EntityColumnFamily.java  |   2 +-
 .../storage/entity/EntityColumnPrefix.java  |   2 +-
 .../storage/entity/EntityRowKey.java|  36 +-
 .../storage/entity/EntityTable.java |   8 +-
 .../storage/TestHBaseTimelineWriterImpl.java|  82 +++-
 23 files changed, 1198 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07433c2a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 15ca8f5..b5af67a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -82,6 +82,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3949. Ensure timely flush of timeline writes. (Sangjin Lee via
 junping_du)
 
+YARN-3049. [Storage Implementation] Implement storage reader interface to
+fetch raw data from HBase backend (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07433c2a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d25d1d9..5583cd6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -506,4 +506,10 @@
 /Or
 Bug pattern=URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD /
   /Match
+
+  !-- Object cast is based on the event type --
+  Match
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.metrics.AbstractTimelineServicePublisher
 /
+ Bug pattern=BC_UNCONFIRMED_CAST /
+  /Match
 /FindBugsFilter

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07433c2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 9ef2d90..0701001 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -29,7 +29,9 @@ import javax.xml.bind.annotation.XmlRootElement;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.Set;
+import java.util.TreeSet;
 
 /**
  * The basic timeline entity data structure for timeline service v2. Timeline

[1/2] hadoop git commit: YARN-3049. [Storage Implementation] Implement storage reader interface to fetch raw data from HBase backend (Zhijie Shen via sjlee)

2015-08-07 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 895ccfa1a - 07433c2ad


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07433c2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index fd5643d..ab02779 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -38,11 +39,15 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineWriterUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
@@ -71,6 +76,8 @@ public class TestHBaseTimelineWriterImpl {
   private static void createSchema() throws IOException {
 new EntityTable()
 .createTable(util.getHBaseAdmin(), util.getConfiguration());
+new AppToFlowTable()
+.createTable(util.getHBaseAdmin(), util.getConfiguration());
   }
 
   @Test
@@ -138,10 +145,15 @@ public class TestHBaseTimelineWriterImpl {
 te.addEntity(entity);
 
 HBaseTimelineWriterImpl hbi = null;
+HBaseTimelineReaderImpl hbr = null;
 try {
   Configuration c1 = util.getConfiguration();
   hbi = new HBaseTimelineWriterImpl(c1);
   hbi.init(c1);
+  hbi.start();
+  hbr = new HBaseTimelineReaderImpl();
+  hbr.init(c1);
+  hbr.start();
   String cluster = cluster1;
   String user = user1;
   String flow = some_flow_name;
@@ -255,9 +267,22 @@ public class TestHBaseTimelineWriterImpl {
   assertEquals(1, rowCount);
   assertEquals(17, colCount);
 
+  TimelineEntity e1 = hbr.getEntity(user, cluster, flow, runid, appName,
+  entity.getType(), entity.getId(), 
EnumSet.of(TimelineReader.Field.ALL));
+  SetTimelineEntity es1 = hbr.getEntities(user, cluster, flow, runid,
+  appName, entity.getType(), null, null, null, null, null, null, null,
+  null, null, null, null, EnumSet.of(TimelineReader.Field.ALL));
+  assertNotNull(e1);
+  assertEquals(1, es1.size());
 } finally {
-  hbi.stop();
-  hbi.close();
+  if (hbi != null) {
+hbi.stop();
+hbi.close();
+  }
+  if (hbr != null) {
+hbr.stop();
+hbr.close();
+  }
 }
 
 // Somewhat of a hack, not a separate test in order not to have to deal 
with
@@ -283,7 +308,7 @@ public class TestHBaseTimelineWriterImpl {
 
   private void testAdditionalEntity() throws IOException {
 TimelineEvent event = new TimelineEvent();
-String eventId = foo_event_id;
+String eventId = ApplicationMetricsConstants.CREATED_EVENT_TYPE;
 event.setId(eventId);
 Long expTs = 1436512802000L;
 event.setTimestamp(expTs);
@@ -291,19 +316,23 @@ public class TestHBaseTimelineWriterImpl {
 Object expVal = test;
 event.addInfo(expKey, expVal);
 
-final TimelineEntity entity = new TimelineEntity();
-entity.setId(attempt_1329348432655_0001_m_08_18);
-entity.setType(FOO_ATTEMPT);
+final 

hadoop git commit: YARN-3836. add equals and hashCode to TimelineEntity and other classes in the data model (Li Lu via sjlee)

2015-07-09 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 4c5f88fb0 - 2d4a8f456


YARN-3836. add equals and hashCode to TimelineEntity and other classes in the 
data model (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d4a8f45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d4a8f45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d4a8f45

Branch: refs/heads/YARN-2928
Commit: 2d4a8f4563c06339717ca9410b2794754603fba3
Parents: 4c5f88f
Author: Sangjin Lee sj...@apache.org
Authored: Thu Jul 9 20:50:48 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Jul 9 20:50:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../records/timelineservice/TimelineEntity.java | 89 +++-
 .../records/timelineservice/TimelineEvent.java  | 41 -
 .../records/timelineservice/TimelineMetric.java | 30 +++
 .../TestTimelineServiceRecords.java | 36 +++-
 5 files changed, 195 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4a8f45/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c8c95c4..e9124dd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -87,6 +87,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3706. Generalize native HBase writer for additional tables (Joep
 Rottinghuis via sjlee)
 
+YARN-3836. add equals and hashCode to TimelineEntity and other classes in
+the data model (Li Lu via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4a8f45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 60fba85..9ef2d90 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -31,11 +31,25 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+/**
+ * The basic timeline entity data structure for timeline service v2. Timeline
+ * entity objects are not thread safe and should not be accessed concurrently.
+ * All collection members will be initialized into empty collections. Two
+ * timeline entities are equal iff. their type and id are identical.
+ *
+ * All non-primitive type, non-collection members will be initialized into 
null.
+ * User should set the type and id of a timeline entity to make it valid (can 
be
+ * checked by using the {@link #isValid()} method). Callers to the getters
+ * should perform null checks for non-primitive type, non-collection members.
+ *
+ * Callers are recommended not to alter the returned collection objects from 
the
+ * getters.
+ */
 @XmlRootElement(name = entity)
 @XmlAccessorType(XmlAccessType.NONE)
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
-public class TimelineEntity {
+public class TimelineEntity implements ComparableTimelineEntity {
   protected final static String SYSTEM_INFO_KEY_PREFIX = SYSTEM_INFO_;
 
   @XmlRootElement(name = identifier)
@@ -77,6 +91,41 @@ public class TimelineEntity {
   type=' + type + '\'' +
   , id=' + id + '\'' + ];
 }
+
+@Override
+public int hashCode() {
+  final int prime = 31;
+  int result = 1;
+  result = prime * result + ((id == null) ? 0 : id.hashCode());
+  result =
+prime * result + ((type == null) ? 0 : type.hashCode());
+  return result;
+}
+
+@Override
+public boolean equals(Object obj) {
+  if (this == obj)
+return true;
+  if (!(obj instanceof Identifier)) {
+return false;
+  }
+  Identifier other = (Identifier) obj;
+  if (id == null) {
+if (other.getId() != null) {
+  return false;
+}
+  } else if (!id.equals(other.getId())) {
+return false;
+  }
+  if (type == null) {
+if (other.getType() != null) {
+  return false;
+}
+  } else if (!type.equals(other.getType())) {
+return false;
+  }
+  return true;
+}
   }
 
   private TimelineEntity real

hadoop git commit: YARN-3792. Test case failures in TestDistributedShell and some issue fixes related to ATSV2 (Naganarasimha G R via sjlee)

2015-06-22 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 8c036a14e - 84f37f1c7


YARN-3792. Test case failures in TestDistributedShell and some issue fixes 
related to ATSV2 (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84f37f1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84f37f1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84f37f1c

Branch: refs/heads/YARN-2928
Commit: 84f37f1c7eefec6d139cbf091c50d6c06f734323
Parents: 8c036a1
Author: Sangjin Lee sj...@apache.org
Authored: Mon Jun 22 20:47:56 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon Jun 22 20:47:56 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 33 
 .../applications/distributedshell/Client.java   |  2 +-
 .../distributedshell/TestDistributedShell.java  | 89 +---
 .../TestDistributedShellWithNodeLabels.java |  9 +-
 .../client/api/impl/TimelineClientImpl.java |  8 ++
 .../application/ApplicationImpl.java|  4 +-
 .../monitor/ContainersMonitorImpl.java  | 15 ++--
 .../RMTimelineCollectorManager.java |  2 +-
 .../collector/NodeTimelineCollectorManager.java | 14 ---
 .../PerNodeTimelineCollectorsAuxService.java|  3 +-
 .../collector/TimelineCollectorManager.java |  2 +-
 11 files changed, 106 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84f37f1c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a4ffa03..69e66c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -35,9 +35,6 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-. Rename TimelineAggregator etc. to TimelineCollector. (Sangjin 
Lee
 via junping_du)
 
-YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
-(Sangjin Lee via zjshen)
-
 YARN-3034. Implement RM starting its timeline collector. (Naganarasimha G R
 via junping_du)
 
@@ -61,27 +58,15 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3551. Consolidate data model change according to the backend
 implementation (Zhijie Shen via sjlee)
 
-YARN-3562. unit tests failures and issues found from findbug from earlier
-ATS checkins (Naganarasimha G R via sjlee)
-
 YARN-3134. Implemented Phoenix timeline writer to access HBase backend. (Li
 Lu via zjshen)
 
 YARN-3529. Added mini HBase cluster and Phoenix support to timeline service
 v2 unit tests. (Li Lu via zjshen)
 
-YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
-Sangjin Lee via junping_du)
-
 YARN-3411. [Storage implementation] explore the native HBase write schema
 for storage (Vrushali C via sjlee)
 
-YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
-test data (Vrushali C via sjlee)
-
-YARN-3721. build is broken on YARN-2928 branch due to possible dependency
-cycle (Li Lu via sjlee)
-
 YARN-3044. Made RM write app, attempt and optional container lifecycle
 events to timeline service v2. (Naganarasimha G R via zjshen)
 
@@ -100,6 +85,24 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   BUG FIXES
 
+YARN-3377. Fixed test failure in TestTimelineServiceClientIntegration.
+(Sangjin Lee via zjshen)
+
+YARN-3562. unit tests failures and issues found from findbug from earlier
+ATS checkins (Naganarasimha G R via sjlee)
+
+YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
+Sangjin Lee via junping_du)
+
+YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
+test data (Vrushali C via sjlee)
+
+YARN-3721. build is broken on YARN-2928 branch due to possible dependency
+cycle (Li Lu via sjlee)
+
+YARN-3792. Test case failures in TestDistributedShell and some issue fixes
+related to ATSV2 (Naganarasimha G R via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84f37f1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications

[2/2] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-06-18 Thread sjlee
YARN-3706. Generalize native HBase writer for additional tables (Joep 
Rottinghuis via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9137aeae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9137aeae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9137aeae

Branch: refs/heads/YARN-2928
Commit: 9137aeae0dec83f9eff40d12cae712dfd508c0c5
Parents: a1bb913
Author: Sangjin Lee sj...@apache.org
Authored: Thu Jun 18 10:49:20 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Jun 18 10:49:20 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/EntityColumnDetails.java| 110 --
 .../storage/EntityColumnFamily.java |  95 -
 .../storage/HBaseTimelineWriterImpl.java| 114 +++---
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 134 +---
 .../timelineservice/storage/TimelineWriter.java |   3 +-
 .../storage/TimelineWriterUtils.java| 344 ---
 .../storage/common/BaseTable.java   | 118 +++
 .../common/BufferedMutatorDelegator.java|  73 
 .../timelineservice/storage/common/Column.java  |  59 
 .../storage/common/ColumnFamily.java|  34 ++
 .../storage/common/ColumnHelper.java| 247 +
 .../storage/common/ColumnPrefix.java|  83 +
 .../timelineservice/storage/common/Range.java   |  59 
 .../storage/common/Separator.java   | 303 
 .../common/TimelineEntitySchemaConstants.java   |  68 
 .../storage/common/TimelineWriterUtils.java | 127 +++
 .../storage/common/TypedBufferedMutator.java|  28 ++
 .../storage/common/package-info.java|  24 ++
 .../storage/entity/EntityColumn.java| 141 
 .../storage/entity/EntityColumnFamily.java  |  65 
 .../storage/entity/EntityColumnPrefix.java  | 212 
 .../storage/entity/EntityRowKey.java|  93 +
 .../storage/entity/EntityTable.java | 161 +
 .../storage/entity/package-info.java|  25 ++
 .../storage/TestHBaseTimelineWriterImpl.java| 252 --
 .../storage/common/TestSeparator.java   | 129 +++
 .../storage/common/TestTimelineWriterUtils.java |  29 ++
 30 files changed, 2301 insertions(+), 962 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 040afea..197a154 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -93,6 +93,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via
 zjshen)
 
+YARN-3706. Generalize native HBase writer for additional tables (Joep
+Rottinghuis via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
deleted file mode 100644
index 2894c41..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied

[1/2] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-06-18 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 a1bb9137a - 9137aeae0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
new file mode 100644
index 000..ee57890
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Used to separate row qualifiers, column qualifiers and compount fields.
+ */
+public enum Separator {
+
+  /**
+   * separator in key or column qualifier fields
+   */
+  QUALIFIERS(!, %0$),
+
+  /**
+   * separator in values, and/or compound key/column qualifier fields.
+   */
+  VALUES(?, %1$),
+
+  /**
+   * separator in values, often used to avoid having these in qualifiers and
+   * names. Note that if we use HTML form encoding through URLEncoder, we end 
up
+   * getting a + for a space, which may already occur in strings, so we don't
+   * want that.
+   */
+  SPACE( , %2$);
+
+  /**
+   * The string value of this separator.
+   */
+  private final String value;
+
+  /**
+   * The URLEncoded version of this separator
+   */
+  private final String encodedValue;
+
+  /**
+   * The bye representation of value.
+   */
+  private final byte[] bytes;
+
+  /**
+   * The value quoted so that it can be used as a safe regex
+   */
+  private final String quotedValue;
+
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  /**
+   * @param value of the separator to use. Cannot be null or empty string.
+   * @param encodedValue choose something that isn't likely to occur in the 
data
+   *  itself. Cannot be null or empty string.
+   */
+  private Separator(String value, String encodedValue) {
+this.value = value;
+this.encodedValue = encodedValue;
+
+// validation
+if (value == null || value.length() == 0 || encodedValue == null
+|| encodedValue.length() == 0) {
+  throw new IllegalArgumentException(
+  Cannot create separator from null or empty string.);
+}
+
+this.bytes = Bytes.toBytes(value);
+this.quotedValue = Pattern.quote(value);
+  }
+
+  /**
+   * Used to make token safe to be used with this separator without collisions.
+   *
+   * @param token
+   * @return the token with any occurrences of this separator URLEncoded.
+   */
+  public String encode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(value, encodedValue);
+  }
+
+  /**
+   * @param token
+   * @return the token with any occurrences of the encoded separator replaced 
by
+   * the separator itself.
+   */
+  public String decode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(encodedValue, value);
+  }
+
+  /**
+   * Encode the given separators in the token with their encoding equivalent.
+   * This means that when encoding is already present in the token itself, this
+   * is not a reversible process. See also {@link #decode(String, 
Separator...)}
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *  

hadoop git commit: YARN-3721. build is broken on YARN-2928 branch due to possible dependency cycle (Li Lu via sjlee)

2015-05-28 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 4c0b6d739 - a9738ceb1


YARN-3721. build is broken on YARN-2928 branch due to possible dependency cycle 
(Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9738ceb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9738ceb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9738ceb

Branch: refs/heads/YARN-2928
Commit: a9738ceb17b50cce8844fd42bb800c7f83f15caf
Parents: 4c0b6d7
Author: Sangjin Lee sj...@apache.org
Authored: Thu May 28 12:03:53 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu May 28 12:03:53 2015 -0700

--
 hadoop-project/pom.xml  | 97 ++--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../hadoop-yarn-server-timelineservice/pom.xml  |  1 -
 3 files changed, 53 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9738ceb/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 0889241..e2091ab 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -985,55 +985,58 @@
 /exclusions
   /dependency
 
-dependency
-  groupIdorg.apache.hbase/groupId
-  artifactIdhbase-client/artifactId
-  version${hbase.version}/version
-/dependency
-dependency
-  groupIdorg.apache.phoenix/groupId
-  artifactIdphoenix-core/artifactId
-  version${phoenix.version}/version
-  exclusions
-!-- Exclude jline from here --
-exclusion
-  artifactIdjline/artifactId
-  groupIdjline/groupId
-/exclusion
-  /exclusions
-/dependency
-dependency
-  groupIdorg.apache.phoenix/groupId
-  artifactIdphoenix-core/artifactId
-  typetest-jar/type
-  version${phoenix.version}/version
-  scopetest/scope
-/dependency
-dependency
-  groupIdorg.apache.hbase/groupId
-  artifactIdhbase-it/artifactId
-  version${hbase.version}/version
-  scopetest/scope
-  classifiertests/classifier
-/dependency
-dependency
-  groupIdorg.apache.hbase/groupId
-  artifactIdhbase-testing-util/artifactId
-  version${hbase.version}/version
-  scopetest/scope
-  optionaltrue/optional
-  exclusions
-exclusion
-  groupIdorg.jruby/groupId
-  artifactIdjruby-complete/artifactId
-/exclusion
-exclusion
+  dependency
+groupIdorg.apache.hbase/groupId
+artifactIdhbase-client/artifactId
+version${hbase.version}/version
+  /dependency
+  dependency
+groupIdorg.apache.phoenix/groupId
+artifactIdphoenix-core/artifactId
+version${phoenix.version}/version
+exclusions
+  !-- Exclude jline from here --
+  exclusion
+artifactIdjline/artifactId
+groupIdjline/groupId
+  /exclusion
+/exclusions
+  /dependency
+  dependency
+groupIdorg.apache.phoenix/groupId
+artifactIdphoenix-core/artifactId
+typetest-jar/type
+version${phoenix.version}/version
+scopetest/scope
+  /dependency
+  dependency
+groupIdorg.apache.hbase/groupId
+artifactIdhbase-it/artifactId
+version${hbase.version}/version
+scopetest/scope
+classifiertests/classifier
+  /dependency
+  dependency
+groupIdorg.apache.hbase/groupId
+artifactIdhbase-testing-util/artifactId
+version${hbase.version}/version
+scopetest/scope
+optionaltrue/optional
+exclusions
+  exclusion
+groupIdorg.jruby/groupId
+artifactIdjruby-complete/artifactId
+  /exclusion
+  exclusion
 groupIdorg.apache.hadoop/groupId
 artifactIdhadoop-hdfs/artifactId
-/exclusion
-  /exclusions
-/dependency
-
+  /exclusion
+  exclusion
+groupIdorg.apache.hadoop/groupId
+artifactIdhadoop-minicluster/artifactId
+  /exclusion
+/exclusions
+  /dependency
 /dependencies
   /dependencyManagement
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9738ceb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aa131a1..21de924 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -79,6 +79,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
 test data (Vrushali C via sjlee

hadoop git commit: YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its test data (Vrushali C via sjlee)

2015-05-27 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 e19566a66 - 4c0b6d739


YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its test 
data (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c0b6d73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c0b6d73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c0b6d73

Branch: refs/heads/YARN-2928
Commit: 4c0b6d73914f2e249795deb292f508177ea54884
Parents: e19566a
Author: Sangjin Lee sj...@apache.org
Authored: Wed May 27 20:28:04 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed May 27 20:28:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  |  4 
 .../storage/TestHBaseTimelineWriterImpl.java | 15 ---
 2 files changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c0b6d73/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9ec9618..aa131a1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -75,6 +75,10 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-3411. [Storage implementation] explore the native HBase write schema
 for storage (Vrushali C via sjlee)
+
+YARN-3726. Fix TestHBaseTimelineWriterImpl unit test failure by fixing its
+test data (Vrushali C via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c0b6d73/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
index 48bacd6..f999b4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineWriterImpl.java
@@ -121,12 +121,13 @@ public class TestHBaseTimelineWriterImpl {
 TimelineMetric m1 = new TimelineMetric();
 m1.setId(MAP_SLOT_MILLIS);
 MapLong, Number metricValues = new HashMapLong, Number();
-metricValues.put(1429741609000L, 1);
-metricValues.put(1429742609000L, 2);
-metricValues.put(1429743609000L, 3);
-metricValues.put(1429744609000L, 4);
-metricValues.put(1429745609000L, 500L);
-metricValues.put(1429746609000L, 600L);
+long ts = System.currentTimeMillis();
+metricValues.put(ts - 12, 1);
+metricValues.put(ts - 10, 2);
+metricValues.put(ts - 8, 3);
+metricValues.put(ts - 6, 4);
+metricValues.put(ts - 4, 500L);
+metricValues.put(ts - 2, 600L);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
@@ -216,7 +217,7 @@ public class TestHBaseTimelineWriterImpl {
   private void checkMetricsTimeseries(ListCell metricCells,
   TimelineMetric m1) throws IOException {
 MapLong, Number timeseries = m1.getValues();
-assertEquals(metricCells.size(), timeseries.size());
+assertEquals(timeseries.size(), metricCells.size());
 for (Cell c1 : metricCells) {
   assertTrue(timeseries.containsKey(c1.getTimestamp()));
   assertEquals(GenericObjectMapper.read(CellUtil.cloneValue(c1)),



hadoop git commit: YARN-3411. [Storage implementation] explore the native HBase write schema for storage (Vrushali C via sjlee)

2015-05-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 463e070a8 - 7a3068854


YARN-3411. [Storage implementation] explore the native HBase write schema for 
storage (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a306885
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a306885
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a306885

Branch: refs/heads/YARN-2928
Commit: 7a3068854d27eadae1c57545988f5b2029bf119a
Parents: 463e070
Author: Sangjin Lee sj...@apache.org
Authored: Thu May 21 14:11:01 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu May 21 14:11:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../collector/TimelineCollectorManager.java |  19 +
 .../storage/EntityColumnDetails.java| 110 ++
 .../storage/EntityColumnFamily.java |  95 +
 .../storage/HBaseTimelineWriterImpl.java| 225 
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 231 +
 .../storage/TimelineWriterUtils.java| 344 +++
 .../storage/TestHBaseTimelineWriterImpl.java| 292 
 10 files changed, 1448 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 808fdf7..975e3c6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -73,6 +73,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3634. TestMRTimelineEventHandling and TestApplication are broken. (
 Sangjin Lee via junping_du)
 
+YARN-3411. [Storage implementation] explore the native HBase write schema
+for storage (Vrushali C via sjlee)
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 61fa1d7..7e2d4e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -59,6 +59,13 @@ public class TimelineCollectorManager extends 
AbstractService {
 super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+super.serviceStart();
+if (writer != null) {
+  writer.start();
+}
+  }
 
   // access to this map is synchronized with the map itself
   private final MapApplicationId, TimelineCollector collectors =
@@ -151,4 +158,16 @@ public class TimelineCollectorManager extends 
AbstractService {
 return collectors.containsKey(appId);
   }
 
+  @Override
+  protected void serviceStop() throws Exception {
+if (collectors != null  collectors.size()  1) {
+  for (TimelineCollector c : collectors.values()) {
+c.serviceStop();
+  }
+}
+if (writer != null) {
+  writer.close();
+}
+super.serviceStop();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a306885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
new file mode 100644
index 000..2894c41
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn

hadoop git commit: YARN-3562. unit tests failures and issues found from findbug from earlier ATS checkins (Naganarasimha G R via sjlee)

2015-05-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 557a3950b - d4a23625b


YARN-3562. unit tests failures and issues found from findbug from earlier ATS 
checkins (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4a23625
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4a23625
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4a23625

Branch: refs/heads/YARN-2928
Commit: d4a23625b1e9a2c4cefd5fa68c28549ba6c1bc2e
Parents: 557a395
Author: Sangjin Lee sj...@apache.org
Authored: Wed May 6 20:31:50 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Wed May 6 20:31:50 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../server/resourcemanager/ResourceTrackerService.java  |  4 ++--
 .../timelineservice/RMTimelineCollectorManager.java |  2 +-
 .../yarn/server/resourcemanager/TestAppManager.java |  5 +
 .../server/resourcemanager/TestClientRMService.java | 12 +++-
 .../TestRMAppLogAggregationStatus.java  |  4 
 .../metrics/TestSystemMetricsPublisher.java |  2 +-
 .../resourcemanager/rmapp/TestRMAppTransitions.java |  5 +
 .../org/apache/hadoop/yarn/server/MiniYARNCluster.java  |  2 ++
 .../storage/FileSystemTimelineWriterImpl.java   |  7 +--
 10 files changed, 39 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a23625/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8cafca6..c7f310d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -61,6 +61,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3551. Consolidate data model change according to the backend
 implementation (Zhijie Shen via sjlee)
 
+YARN-3562. unit tests failures and issues found from findbug from earlier
+ATS checkins (Naganarasimha G R via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a23625/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index b094a9d..8018ee0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -531,8 +531,8 @@ public class ResourceTrackerService extends AbstractService 
implements
 appId +  is not found in RMContext!);
   } else {
 String previousCollectorAddr = rmApp.getCollectorAddr();
-if (previousCollectorAddr == null ||
-previousCollectorAddr != collectorAddr) {
+if (previousCollectorAddr == null
+|| !previousCollectorAddr.equals(collectorAddr)) {
   // sending collector update event.
   RMAppCollectorUpdateEvent event =
   new RMAppCollectorUpdateEvent(appId, collectorAddr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a23625/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
index 25e0e0f..7d1b657 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn

hadoop git commit: YARN-3551. Consolidate data model change according to the backend implementation (Zhijie Shen via sale)

2015-05-04 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 0b1e184cc - 557a3950b


YARN-3551. Consolidate data model change according to the backend 
implementation (Zhijie Shen via sale)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/557a3950
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/557a3950
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/557a3950

Branch: refs/heads/YARN-2928
Commit: 557a3950bddc837469244835f5577899080115d8
Parents: 0b1e184
Author: Sangjin Lee sj...@apache.org
Authored: Mon May 4 16:10:20 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon May 4 16:10:20 2015 -0700

--
 .../mapred/TimelineServicePerformanceV2.java|   2 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineEntity.java |  16 +--
 .../records/timelineservice/TimelineMetric.java | 131 +--
 .../TestTimelineServiceRecords.java |  81 +---
 .../monitor/ContainersMonitorImpl.java  |   5 +-
 .../TestTimelineServiceClientIntegration.java   |   6 +
 7 files changed, 146 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
index de46617..1c2e28d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
@@ -261,7 +261,7 @@ public class TimelineServicePerformanceV2 extends 
Configured implements Tool {
   // add a metric
   TimelineMetric metric = new TimelineMetric();
   metric.setId(foo_metric);
-  metric.setSingleData(123456789L);
+  metric.addValue(System.currentTimeMillis(), 123456789L);
   entity.addMetric(metric);
   // add a config
   entity.addConfig(foo, bar);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3957b24..8cafca6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3431. Sub resources of timeline entity needs to be passed to a 
separate 
 endpoint. (Zhijie Shen via junping_du)
 
+YARN-3551. Consolidate data model change according to the backend
+implementation (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 6cab753..3be7f52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -80,7 +80,7 @@ public class TimelineEntity {
   private TimelineEntity real;
   private Identifier identifier;
   private HashMapString, Object info = new HashMap();
-  private HashMapString, Object configs = new HashMap();
+  private HashMapString, String configs = new HashMap();
   private SetTimelineMetric metrics = new HashSet();
   private SetTimelineEvent events = new HashSet();
   private HashMapString, SetString isRelatedToEntities = new HashMap();
@@ -213,7 +213,7 @@ public class TimelineEntity {
   // required by JAXB
   @InterfaceAudience.Private
   @XmlElement(name = configs)
-  public HashMapString, Object getConfigsJAXB() {
+  public HashMapString, String

hadoop git commit: YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)

2015-04-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 5eeb2b156 - 582211888


YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58221188
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58221188
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58221188

Branch: refs/heads/YARN-2928
Commit: 58221188811e0f61d842dac89e1f4ad4fd8aa182
Parents: 5eeb2b1
Author: Sangjin Lee sj...@apache.org
Authored: Fri Apr 24 16:56:23 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Fri Apr 24 16:56:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../resourcemanager/RMActiveServiceContext.java |  13 +-
 .../server/resourcemanager/RMAppManager.java|   3 +-
 .../yarn/server/resourcemanager/RMContext.java  |   7 +-
 .../server/resourcemanager/RMContextImpl.java   |  12 +-
 .../server/resourcemanager/ResourceManager.java |  14 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 ++
 .../timelineservice/RMTimelineCollector.java| 111 
 .../RMTimelineCollectorManager.java |  75 ++
 .../TestTimelineServiceClientIntegration.java   |  12 +-
 .../collector/AppLevelTimelineCollector.java|   2 +-
 .../collector/NodeTimelineCollectorManager.java | 223 
 .../PerNodeTimelineCollectorsAuxService.java|  15 +-
 .../collector/TimelineCollector.java|   2 +-
 .../collector/TimelineCollectorManager.java | 259 +++
 .../collector/TimelineCollectorWebService.java  |  23 +-
 .../TestNMTimelineCollectorManager.java | 160 
 ...TestPerNodeTimelineCollectorsAuxService.java |  24 +-
 .../collector/TestTimelineCollectorManager.java | 160 
 19 files changed, 578 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58221188/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a3ca475..408b8e6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -53,6 +53,8 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3391. Clearly define flow ID/ flow run / flow version in API and 
storage.
 (Zhijie Shen via junping_du)
 
+YARN-3390. Reuse TimelineCollectorManager for RM (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58221188/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 1d95204..00768ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -47,7 +47,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRen
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
-import 
org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollector;
+import 
org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 
@@ -95,7 +95,7 @@ public class RMActiveServiceContext {
   private ApplicationMasterService applicationMasterService;
   private RMApplicationHistoryWriter rmApplicationHistoryWriter;
   private SystemMetricsPublisher systemMetricsPublisher;
-  private RMTimelineCollector timelineCollector;
+  private RMTimelineCollectorManager timelineCollectorManager;
 
   private RMNodeLabelsManager nodeLabelManager;
   private long epoch;
@@ -379,14 +379,15 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
-  public RMTimelineCollector

<    5   6   7   8   9   10