hadoop git commit: MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)

2017-04-14 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8515d35bd -> d4f553d42


MAPREDUCE-6673. Add a test example job that grows in memory usage over time 
(Karthik Kambatla via Haibo Chen)

Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea
(cherry picked from commit 25ac44709b4bbed78b607ea48021237b64e01b9f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4f553d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4f553d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4f553d4

Branch: refs/heads/branch-2
Commit: d4f553d42f428eb9f05d7b8a2a3b2f9e7903d138
Parents: 8515d35
Author: Haibo Chen 
Authored: Fri Apr 14 17:33:04 2017 -0700
Committer: Haibo Chen 
Committed: Fri Apr 14 17:37:37 2017 -0700

--
 .../hadoop/mapreduce/GrowingSleepJob.java   | 68 
 .../apache/hadoop/test/MapredTestDriver.java|  3 +
 2 files changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f553d4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
new file mode 100644
index 000..55740f7
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A sleep job whose mappers create 1MB buffer for every record.
+ */
+public class GrowingSleepJob extends SleepJob {
+  private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
+
+  public static class GrowingSleepMapper extends SleepMapper {
+private final int MB = 1024 * 1024;
+private ArrayList bytes = new ArrayList<>();
+
+@Override
+public void map(IntWritable key, IntWritable value, Context context)
+throws IOException, InterruptedException {
+  super.map(key, value, context);
+  long free = Runtime.getRuntime().freeMemory();
+  if (free > 32 * MB) {
+LOG.info("Free memory = " + free +
+" bytes. Creating 1 MB on the heap.");
+bytes.add(new byte[MB]);
+  }
+}
+  }
+
+  public static void main(String[] args) throws Exception {
+int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
+System.exit(res);
+  }
+
+  @Override
+  public Job createJob(int numMapper, int numReducer,
+   long mapSleepTime, int mapSleepCount,
+   long reduceSleepTime, int reduceSleepCount)
+  throws IOException {
+Job job = super.createJob(numMapper, numReducer, mapSleepTime,
+mapSleepCount, reduceSleepTime, reduceSleepCount);
+job.setMapperClass(GrowingSleepMapper.class);
+job.setJobName("Growing sleep job");
+return job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f553d4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
 

hadoop git commit: MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)

2017-04-14 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0ac17dc64 -> 25ac44709


MAPREDUCE-6673. Add a test example job that grows in memory usage over time 
(Karthik Kambatla via Haibo Chen)

Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25ac4470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25ac4470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25ac4470

Branch: refs/heads/trunk
Commit: 25ac44709b4bbed78b607ea48021237b64e01b9f
Parents: 0ac17dc
Author: Haibo Chen 
Authored: Fri Apr 14 17:33:04 2017 -0700
Committer: Haibo Chen 
Committed: Fri Apr 14 17:36:03 2017 -0700

--
 .../hadoop/mapreduce/GrowingSleepJob.java   | 68 
 .../apache/hadoop/test/MapredTestDriver.java|  3 +
 2 files changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25ac4470/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
new file mode 100644
index 000..55740f7
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A sleep job whose mappers create 1MB buffer for every record.
+ */
+public class GrowingSleepJob extends SleepJob {
+  private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
+
+  public static class GrowingSleepMapper extends SleepMapper {
+private final int MB = 1024 * 1024;
+private ArrayList bytes = new ArrayList<>();
+
+@Override
+public void map(IntWritable key, IntWritable value, Context context)
+throws IOException, InterruptedException {
+  super.map(key, value, context);
+  long free = Runtime.getRuntime().freeMemory();
+  if (free > 32 * MB) {
+LOG.info("Free memory = " + free +
+" bytes. Creating 1 MB on the heap.");
+bytes.add(new byte[MB]);
+  }
+}
+  }
+
+  public static void main(String[] args) throws Exception {
+int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
+System.exit(res);
+  }
+
+  @Override
+  public Job createJob(int numMapper, int numReducer,
+   long mapSleepTime, int mapSleepCount,
+   long reduceSleepTime, int reduceSleepCount)
+  throws IOException {
+Job job = super.createJob(numMapper, numReducer, mapSleepTime,
+mapSleepCount, reduceSleepTime, reduceSleepCount);
+job.setMapperClass(GrowingSleepMapper.class);
+job.setJobName("Growing sleep job");
+return job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25ac4470/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
 

hadoop git commit: HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

2017-04-14 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a61ec7c37 -> 5970e8297


HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

(cherry picked from commit 0ac17dc644c0429ff8a6f743bf9d3ecdd7458e58)
(cherry picked from commit 8515d35bd5424d0cf27e44d3618c3ea5ae7ea969)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5970e829
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5970e829
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5970e829

Branch: refs/heads/branch-2.8
Commit: 5970e8297d907f77ff982890dcac079f4281cef9
Parents: a61ec7c
Author: Arun Suresh 
Authored: Fri Apr 14 16:56:16 2017 -0700
Committer: Arun Suresh 
Committed: Fri Apr 14 16:59:25 2017 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5970e829/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index b7a6bce..39ace7c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -59,6 +59,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 protobuf-c-compiler \
 python \
 python2.7 \
+python2.7-dev \
 python-pip \
 rsync \
 snappy \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

2017-04-14 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1699b2052 -> 8515d35bd


HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

(cherry picked from commit 0ac17dc644c0429ff8a6f743bf9d3ecdd7458e58)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8515d35b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8515d35b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8515d35b

Branch: refs/heads/branch-2
Commit: 8515d35bd5424d0cf27e44d3618c3ea5ae7ea969
Parents: 1699b20
Author: Arun Suresh 
Authored: Fri Apr 14 16:56:16 2017 -0700
Committer: Arun Suresh 
Committed: Fri Apr 14 16:58:00 2017 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8515d35b/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index b7a6bce..39ace7c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -59,6 +59,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 protobuf-c-compiler \
 python \
 python2.7 \
+python2.7-dev \
 python-pip \
 rsync \
 snappy \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

2017-04-14 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 416880550 -> 0ac17dc64


HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ac17dc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ac17dc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ac17dc6

Branch: refs/heads/trunk
Commit: 0ac17dc644c0429ff8a6f743bf9d3ecdd7458e58
Parents: 4168805
Author: Arun Suresh 
Authored: Fri Apr 14 16:56:16 2017 -0700
Committer: Arun Suresh 
Committed: Fri Apr 14 16:56:16 2017 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac17dc6/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index a135c61..f939b1d 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -58,6 +58,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 protobuf-c-compiler \
 python \
 python2.7 \
+python2.7-dev \
 python-pip \
 rsync \
 snappy \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6480. Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

2017-04-14 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.1 95391fc08 -> 33295531f


YARN-6480. Timeout is too aggressive for 
TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

(cherry picked from commit 416880550214e58f2284a045ad0c96ba4aa78ea8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33295531
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33295531
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33295531

Branch: refs/heads/branch-2.8.1
Commit: 33295531f7cfee62698afde29d272d21b2ca776d
Parents: 95391fc
Author: Jason Lowe 
Authored: Fri Apr 14 17:15:48 2017 -0500
Committer: Jason Lowe 
Committed: Fri Apr 14 17:18:37 2017 -0500

--
 .../server/resourcemanager/applicationsmanager/TestAMRestart.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33295531/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 6980379..fd478f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -480,7 +480,7 @@ public class TestAMRestart {
   // Test RM restarts after AM container is preempted, new RM should not count
   // AM preemption failure towards the max-retry-account and should be able to
   // re-launch the AM.
-  @Test(timeout = 2)
+  @Test(timeout = 6)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6480. Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

2017-04-14 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d27f74d91 -> a61ec7c37


YARN-6480. Timeout is too aggressive for 
TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

(cherry picked from commit 416880550214e58f2284a045ad0c96ba4aa78ea8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a61ec7c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a61ec7c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a61ec7c3

Branch: refs/heads/branch-2.8
Commit: a61ec7c37475ffd82f5099967ec4eedde3657734
Parents: d27f74d
Author: Jason Lowe 
Authored: Fri Apr 14 17:15:48 2017 -0500
Committer: Jason Lowe 
Committed: Fri Apr 14 17:18:08 2017 -0500

--
 .../server/resourcemanager/applicationsmanager/TestAMRestart.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a61ec7c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 6980379..fd478f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -480,7 +480,7 @@ public class TestAMRestart {
   // Test RM restarts after AM container is preempted, new RM should not count
   // AM preemption failure towards the max-retry-account and should be able to
   // re-launch the AM.
-  @Test(timeout = 2)
+  @Test(timeout = 6)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6480. Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

2017-04-14 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 53da65ef8 -> 1699b2052


YARN-6480. Timeout is too aggressive for 
TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

(cherry picked from commit 416880550214e58f2284a045ad0c96ba4aa78ea8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1699b205
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1699b205
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1699b205

Branch: refs/heads/branch-2
Commit: 1699b20526e58b25f906ab083dc1919025fa35c2
Parents: 53da65e
Author: Jason Lowe 
Authored: Fri Apr 14 17:15:48 2017 -0500
Committer: Jason Lowe 
Committed: Fri Apr 14 17:17:46 2017 -0500

--
 .../server/resourcemanager/applicationsmanager/TestAMRestart.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1699b205/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 4fa8287..f5da5b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -490,7 +490,7 @@ public class TestAMRestart {
   // Test RM restarts after AM container is preempted, new RM should not count
   // AM preemption failure towards the max-retry-account and should be able to
   // re-launch the AM.
-  @Test(timeout = 2)
+  @Test(timeout = 6)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6480. Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

2017-04-14 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8a1d7480f -> 416880550


YARN-6480. Timeout is too aggressive for 
TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41688055
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41688055
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41688055

Branch: refs/heads/trunk
Commit: 416880550214e58f2284a045ad0c96ba4aa78ea8
Parents: 8a1d748
Author: Jason Lowe 
Authored: Fri Apr 14 17:15:48 2017 -0500
Committer: Jason Lowe 
Committed: Fri Apr 14 17:15:48 2017 -0500

--
 .../server/resourcemanager/applicationsmanager/TestAMRestart.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41688055/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 4fa8287..f5da5b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -490,7 +490,7 @@ public class TestAMRestart {
   // Test RM restarts after AM container is preempted, new RM should not count
   // AM preemption failure towards the max-retry-account and should be able to
   // re-launch the AM.
-  @Test(timeout = 2)
+  @Test(timeout = 6)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6433. Only accessible cgroup mount directories should be selected for a controller. (Miklos Szegedi via kasha)

2017-04-14 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c411adac3 -> 53da65ef8


YARN-6433. Only accessible cgroup mount directories should be selected for a 
controller. (Miklos Szegedi via kasha)

(cherry picked from commit 8a1d7480f73906d8e0342690ec6c6b008d6de21b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53da65ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53da65ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53da65ef

Branch: refs/heads/branch-2
Commit: 53da65ef8b1d314e81b578ee1e09a6d29da1c994
Parents: c411ada
Author: Karthik Kambatla 
Authored: Fri Apr 14 15:07:14 2017 -0700
Committer: Karthik Kambatla 
Committed: Fri Apr 14 15:07:54 2017 -0700

--
 .../containermanager/linux/resources/CGroupsHandlerImpl.java  | 7 ++-
 .../linux/resources/TestCGroupsHandlerImpl.java   | 5 +
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53da65ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 0b29abc..d5295c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -236,7 +236,12 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   Map entries) {
 for (Map.Entry e : entries.entrySet()) {
   if (e.getValue().contains(controller)) {
-return e.getKey();
+if (new File(e.getKey()).canRead()) {
+  return e.getKey();
+} else {
+  LOG.warn(String.format(
+  "Skipping inaccessible cgroup mount point %s", e.getKey()));
+}
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53da65ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index 38dc34f..4c0829e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -252,6 +252,10 @@ public class TestCGroupsHandlerImpl {
 String cpuMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/cpu cgroup rw,relatime,cpu 0 0\n";
+// Mark an empty directory called 'cp' cgroup. It is processed before 'cpu'
+String cpuMtabContentMissing =
+"none " + parentDir.getAbsolutePath()
++ "/cp cgroup rw,relatime,cpu 0 0\n";
 String blkioMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/blkio cgroup rw,relatime,blkio 0 0\n";
@@ -264,6 +268,7 @@ public class TestCGroupsHandlerImpl {
   }
 }
 FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
+mtabWriter.write(cpuMtabContentMissing);
 mtabWriter.write(cpuMtabContent);
 mtabWriter.write(blkioMtabContent);
 mtabWriter.close();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: YARN-6433. Only accessible cgroup mount directories should be selected for a controller. (Miklos Szegedi via kasha)

2017-04-14 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk a41f8dd58 -> 8a1d7480f


YARN-6433. Only accessible cgroup mount directories should be selected for a 
controller. (Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a1d7480
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a1d7480
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a1d7480

Branch: refs/heads/trunk
Commit: 8a1d7480f73906d8e0342690ec6c6b008d6de21b
Parents: a41f8dd
Author: Karthik Kambatla 
Authored: Fri Apr 14 15:07:14 2017 -0700
Committer: Karthik Kambatla 
Committed: Fri Apr 14 15:07:14 2017 -0700

--
 .../containermanager/linux/resources/CGroupsHandlerImpl.java  | 7 ++-
 .../linux/resources/TestCGroupsHandlerImpl.java   | 5 +
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1d7480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 0b29abc..d5295c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -236,7 +236,12 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   Map entries) {
 for (Map.Entry e : entries.entrySet()) {
   if (e.getValue().contains(controller)) {
-return e.getKey();
+if (new File(e.getKey()).canRead()) {
+  return e.getKey();
+} else {
+  LOG.warn(String.format(
+  "Skipping inaccessible cgroup mount point %s", e.getKey()));
+}
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1d7480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index 38dc34f..4c0829e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -252,6 +252,10 @@ public class TestCGroupsHandlerImpl {
 String cpuMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/cpu cgroup rw,relatime,cpu 0 0\n";
+// Mark an empty directory called 'cp' cgroup. It is processed before 'cpu'
+String cpuMtabContentMissing =
+"none " + parentDir.getAbsolutePath()
++ "/cp cgroup rw,relatime,cpu 0 0\n";
 String blkioMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/blkio cgroup rw,relatime,blkio 0 0\n";
@@ -264,6 +268,7 @@ public class TestCGroupsHandlerImpl {
   }
 }
 FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
+mtabWriter.write(cpuMtabContentMissing);
 mtabWriter.write(cpuMtabContent);
 mtabWriter.write(blkioMtabContent);
 mtabWriter.close();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun Zhang, Wei-Chiu Chuang, Xiao Chen, Rosie Li.

2017-04-14 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 157a9f310 -> c411adac3


HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun 
Zhang, Wei-Chiu Chuang, Xiao Chen, Rosie Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c411adac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c411adac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c411adac

Branch: refs/heads/branch-2
Commit: c411adac32cda42433b15979253248336487d977
Parents: 157a9f3
Author: Yongjun Zhang 
Authored: Fri Apr 14 10:14:02 2017 -0700
Committer: Yongjun Zhang 
Committed: Fri Apr 14 10:14:02 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  22 +-
 .../org/apache/hadoop/tools/CopyListing.java|  37 +-
 .../hadoop/tools/CopyListingFileStatus.java |  87 -
 .../java/org/apache/hadoop/tools/DistCp.java|  52 +++
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  10 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  22 +-
 .../org/apache/hadoop/tools/OptionsParser.java  |  36 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  83 +++--
 .../hadoop/tools/mapred/CopyCommitter.java  | 174 -
 .../apache/hadoop/tools/mapred/CopyMapper.java  |  40 +-
 .../tools/mapred/RetriableFileCopyCommand.java  |  26 +-
 .../tools/mapred/UniformSizeInputFormat.java|   5 +-
 .../apache/hadoop/tools/util/DistCpUtils.java   | 111 +-
 .../src/site/markdown/DistCp.md.vm  |   1 +
 .../apache/hadoop/tools/TestDistCpSystem.java   | 368 +--
 .../apache/hadoop/tools/TestOptionsParser.java  |   2 +-
 .../hadoop/tools/mapred/TestCopyCommitter.java  |   5 +-
 17 files changed, 971 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c411adac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index a8fff44..5cb2195 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -841,7 +841,27 @@ public class DFSTestUtil {
   out.write(toAppend);
 }
   }
-  
+
+  /**
+   * Append specified length of bytes to a given file, starting with new block.
+   * @param fs The file system
+   * @param p Path of the file to append
+   * @param length Length of bytes to append to the file
+   * @throws IOException
+   */
+  public static void appendFileNewBlock(DistributedFileSystem fs,
+  Path p, int length) throws IOException {
+assert fs.exists(p);
+assert length >= 0;
+byte[] toAppend = new byte[length];
+Random random = new Random();
+random.nextBytes(toAppend);
+try (FSDataOutputStream out = fs.append(p,
+EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)) {
+  out.write(toAppend);
+}
+  }
+
   /**
* @return url content as string (UTF-8 encoding assumed)
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c411adac/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index 481aa61..9ebf9d2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -145,12 +145,22 @@ public abstract class CopyListing extends Configured {
 Configuration config = getConf();
 FileSystem fs = pathToListFile.getFileSystem(config);
 
-Path sortedList = DistCpUtils.sortListing(fs, config, pathToListFile);
+final boolean splitLargeFile = options.splitLargeFile();
+
+// When splitLargeFile is enabled, we don't randomize the copylist
+// earlier, so we don't do the sorting here. For a file that has
+// multiple entries due to split, we check here that their
+//  is continuous.
+//
+Path checkPath = splitLargeFile?
+pathToListFile : DistCpUtils.sortListing(fs, config, pathToListFile);
 
 SequenceFile.Reader reader = new SequenceFile.Reader(
-  config, SequenceFile.Reader.file(sortedList));
+  config, SequenceFile.Reader.file(checkPath));
 try {
   Text lastKey = new 

hadoop git commit: YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. Contributed by Eric Badger

2017-04-14 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ecaedca5c -> d27f74d91


YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. 
Contributed by Eric Badger

(cherry picked from commit 157a9f310429f6be71d2d3e2bce42d0b36e9a1be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d27f74d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d27f74d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d27f74d9

Branch: refs/heads/branch-2.8
Commit: d27f74d915072d297fbf69111a7db35470132087
Parents: ecaedca
Author: Eric Payne 
Authored: Fri Apr 14 10:53:09 2017 -0500
Committer: Eric Payne 
Committed: Fri Apr 14 11:54:30 2017 -0500

--
 .../scheduler/capacity/TestCapacityScheduler.java  | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d27f74d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 925e7f8..bd0691c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -150,6 +151,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
@@ -3159,7 +3161,7 @@ public class TestCapacityScheduler {
 Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y");
   }
 
-  @Test(timeout = 3)
+  @Test(timeout = 6)
   public void testAMLimitUsage() throws Exception {
 
 CapacitySchedulerConfiguration config =
@@ -3287,11 +3289,12 @@ public class TestCapacityScheduler {
   private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
   throws Exception {
 MockRM rm = setUpMove(config);
-rm.registerNode("127.0.0.1:1234", 2 * GB);
+final int nodeMemory = 4 * GB;
+rm.registerNode("127.0.0.1:1234", nodeMemory);
 
 String queueName = "a1";
 String userName = "user_0";
-ResourceScheduler scheduler = rm.getRMContext().getScheduler();
+final ResourceScheduler scheduler = rm.getRMContext().getScheduler();
 LeafQueue queueA =
 (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
 Resource amResourceLimit = queueA.getAMResourceLimit();
@@ -3303,6 +3306,14 @@ public class TestCapacityScheduler {
 Resource.newInstance(amResourceLimit.getMemorySize() + 2048,
 amResourceLimit.getVirtualCores() + 1);
 
+// Wait for the scheduler to be updated with new node capacity
+GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return scheduler.getMaximumResourceCapability().getMemorySize() == 
nodeMemory;
+}
+  }, 100, 60 * 1000);
+
 rm.submitApp(amResource1, "app-1", userName, null, queueName);
 
 rm.submitApp(amResource2, "app-2", userName, null, queueName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. Contributed by Eric Badger (cherry-picked from commit a41f8dd58e27d8835fbb64eeaba5d7416df0499d)

2017-04-14 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 575625628 -> 157a9f310


YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. 
Contributed by Eric Badger
(cherry-picked from commit a41f8dd58e27d8835fbb64eeaba5d7416df0499d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/157a9f31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/157a9f31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/157a9f31

Branch: refs/heads/branch-2
Commit: 157a9f310429f6be71d2d3e2bce42d0b36e9a1be
Parents: 5756256
Author: Eric Payne 
Authored: Fri Apr 14 10:53:09 2017 -0500
Committer: Eric Payne 
Committed: Fri Apr 14 11:04:18 2017 -0500

--
 .../scheduler/capacity/TestCapacityScheduler.java  | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/157a9f31/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index c1255d8..48dddf9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
@@ -29,6 +30,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -3465,7 +3467,7 @@ public class TestCapacityScheduler {
 Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y");
   }
 
-  @Test(timeout = 3)
+  @Test(timeout = 6)
   public void testAMLimitUsage() throws Exception {
 
 CapacitySchedulerConfiguration config =
@@ -3593,11 +3595,12 @@ public class TestCapacityScheduler {
   private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
   throws Exception {
 MockRM rm = setUpMove(config);
-rm.registerNode("127.0.0.1:1234", 2 * GB);
+final int nodeMemory = 4 * GB;
+rm.registerNode("127.0.0.1:1234", nodeMemory);
 
 String queueName = "a1";
 String userName = "user_0";
-ResourceScheduler scheduler = rm.getRMContext().getScheduler();
+final ResourceScheduler scheduler = rm.getRMContext().getScheduler();
 LeafQueue queueA =
 (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
 Resource amResourceLimit = queueA.getAMResourceLimit();
@@ -3609,6 +3612,14 @@ public class TestCapacityScheduler {
 Resource.newInstance(amResourceLimit.getMemorySize() + 2048,
 amResourceLimit.getVirtualCores() + 1);
 
+// Wait for the scheduler to be updated with new node capacity
+GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return scheduler.getMaximumResourceCapability().getMemorySize() == 
nodeMemory;
+}
+  }, 100, 60 * 1000);
+
 rm.submitApp(amResource1, "app-1", userName, null, queueName);
 
 rm.submitApp(amResource2, "app-2", userName, null, queueName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. Contributed by Eric Badger

2017-04-14 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0cab57223 -> a41f8dd58


YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a41f8dd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a41f8dd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a41f8dd5

Branch: refs/heads/trunk
Commit: a41f8dd58e27d8835fbb64eeaba5d7416df0499d
Parents: 0cab572
Author: Eric Payne 
Authored: Fri Apr 14 10:53:09 2017 -0500
Committer: Eric Payne 
Committed: Fri Apr 14 10:53:09 2017 -0500

--
 .../scheduler/capacity/TestCapacityScheduler.java| 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a41f8dd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 447ee3d..bf1f6eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -40,6 +40,7 @@ import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +49,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -3626,7 +3628,7 @@ public class TestCapacityScheduler {
 Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y");
   }
 
-  @Test(timeout = 3)
+  @Test(timeout = 6)
   public void testAMLimitUsage() throws Exception {
 
 CapacitySchedulerConfiguration config =
@@ -3754,7 +3756,8 @@ public class TestCapacityScheduler {
   private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
   throws Exception {
 MockRM rm = setUpMove(config);
-rm.registerNode("127.0.0.1:1234", 2 * GB);
+int nodeMemory = 4 * GB;
+rm.registerNode("127.0.0.1:1234", nodeMemory);
 
 String queueName = "a1";
 String userName = "user_0";
@@ -3770,6 +3773,14 @@ public class TestCapacityScheduler {
 Resource.newInstance(amResourceLimit.getMemorySize() + 2048,
 amResourceLimit.getVirtualCores() + 1);
 
+// Wait for the scheduler to be updated with new node capacity
+GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return scheduler.getMaximumResourceCapability().getMemorySize() == 
nodeMemory;
+}
+  }, 100, 60 * 1000);
+
 rm.submitApp(amResource1, "app-1", userName, null, queueName);
 
 rm.submitApp(amResource2, "app-2", userName, null, queueName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot details. Contributed by Akira Ajisaka.

2017-04-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.1 e5c61a1a9 -> 95391fc08


HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot 
details. Contributed by Akira Ajisaka.

(cherry picked from commit 14414705f79495eda11e302f38c792128fe0182b)
(cherry picked from commit ecaedca5c4ee4e2d8112b153f27d7938b5d4d012)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95391fc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95391fc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95391fc0

Branch: refs/heads/branch-2.8.1
Commit: 95391fc083b19369e03b967b204e12b8c7c9aa29
Parents: e5c61a1
Author: Wei-Chiu Chuang 
Authored: Fri Mar 24 08:43:14 2017 -0700
Committer: Akira Ajisaka 
Committed: Fri Apr 14 21:12:14 2017 +0900

--
 .../OfflineImageReconstructor.java  | 84 ++--
 .../offlineImageViewer/PBImageXmlWriter.java| 34 ++--
 .../TestOfflineImageViewer.java | 15 +++-
 3 files changed, 103 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95391fc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index 137ceff..1f629b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -568,6 +568,13 @@ class OfflineImageReconstructor {
   private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
   throws IOException {
 inodeBld.setType(INodeSection.INode.Type.FILE);
+INodeSection.INodeFile.Builder bld = createINodeFileBuilder(node);
+inodeBld.setFile(bld);
+// Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeFile.Builder createINodeFileBuilder(Node node)
+  throws IOException {
 INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
 Integer ival = node.removeChildInt(SECTION_REPLICATION);
 if (ival != null) {
@@ -596,24 +603,7 @@ class OfflineImageReconstructor {
 if (block == null) {
   break;
 }
-HdfsProtos.BlockProto.Builder blockBld =
-HdfsProtos.BlockProto.newBuilder();
-Long id = block.removeChildLong(SECTION_ID);
-if (id == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setBlockId(id);
-Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
-if (genstamp == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setGenStamp(genstamp);
-Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
-if (numBytes == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setNumBytes(numBytes);
-bld.addBlocks(blockBld);
+bld.addBlocks(createBlockBuilder(block));
   }
 }
 Node fileUnderConstruction =
@@ -650,14 +640,44 @@ class OfflineImageReconstructor {
 if (ival != null) {
   bld.setStoragePolicyID(ival);
 }
-inodeBld.setFile(bld);
+return bld;
 // Will check remaining keys and serialize in processINodeXml
   }
 
+  private HdfsProtos.BlockProto.Builder createBlockBuilder(Node block)
+  throws IOException {
+HdfsProtos.BlockProto.Builder blockBld =
+HdfsProtos.BlockProto.newBuilder();
+Long id = block.removeChildLong(SECTION_ID);
+if (id == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setBlockId(id);
+Long genstamp = block.removeChildLong(INODE_SECTION_GENSTAMP);
+if (genstamp == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setGenStamp(genstamp);
+Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
+if (numBytes == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setNumBytes(numBytes);
+return blockBld;
+  }
+
   private void processDirectoryXml(Node node,
   INodeSection.INode.Builder inodeBld) throws IOException {
 inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
 INodeSection.INodeDirectory.Builder bld =
+createINodeDirectoryBuilder(node);
+inodeBld.setDirectory(bld);

hadoop git commit: HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot details. Contributed by Akira Ajisaka.

2017-04-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c43ee0058 -> ecaedca5c


HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot 
details. Contributed by Akira Ajisaka.

(cherry picked from commit 14414705f79495eda11e302f38c792128fe0182b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecaedca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecaedca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecaedca5

Branch: refs/heads/branch-2.8
Commit: ecaedca5c4ee4e2d8112b153f27d7938b5d4d012
Parents: c43ee00
Author: Wei-Chiu Chuang 
Authored: Fri Mar 24 08:43:14 2017 -0700
Committer: Akira Ajisaka 
Committed: Fri Apr 14 21:11:31 2017 +0900

--
 .../OfflineImageReconstructor.java  | 84 ++--
 .../offlineImageViewer/PBImageXmlWriter.java| 34 ++--
 .../TestOfflineImageViewer.java | 15 +++-
 3 files changed, 103 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecaedca5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index 137ceff..1f629b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -568,6 +568,13 @@ class OfflineImageReconstructor {
   private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
   throws IOException {
 inodeBld.setType(INodeSection.INode.Type.FILE);
+INodeSection.INodeFile.Builder bld = createINodeFileBuilder(node);
+inodeBld.setFile(bld);
+// Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeFile.Builder createINodeFileBuilder(Node node)
+  throws IOException {
 INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
 Integer ival = node.removeChildInt(SECTION_REPLICATION);
 if (ival != null) {
@@ -596,24 +603,7 @@ class OfflineImageReconstructor {
 if (block == null) {
   break;
 }
-HdfsProtos.BlockProto.Builder blockBld =
-HdfsProtos.BlockProto.newBuilder();
-Long id = block.removeChildLong(SECTION_ID);
-if (id == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setBlockId(id);
-Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
-if (genstamp == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setGenStamp(genstamp);
-Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
-if (numBytes == null) {
-  throw new IOException(" found without ");
-}
-blockBld.setNumBytes(numBytes);
-bld.addBlocks(blockBld);
+bld.addBlocks(createBlockBuilder(block));
   }
 }
 Node fileUnderConstruction =
@@ -650,14 +640,44 @@ class OfflineImageReconstructor {
 if (ival != null) {
   bld.setStoragePolicyID(ival);
 }
-inodeBld.setFile(bld);
+return bld;
 // Will check remaining keys and serialize in processINodeXml
   }
 
+  private HdfsProtos.BlockProto.Builder createBlockBuilder(Node block)
+  throws IOException {
+HdfsProtos.BlockProto.Builder blockBld =
+HdfsProtos.BlockProto.newBuilder();
+Long id = block.removeChildLong(SECTION_ID);
+if (id == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setBlockId(id);
+Long genstamp = block.removeChildLong(INODE_SECTION_GENSTAMP);
+if (genstamp == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setGenStamp(genstamp);
+Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
+if (numBytes == null) {
+  throw new IOException(" found without ");
+}
+blockBld.setNumBytes(numBytes);
+return blockBld;
+  }
+
   private void processDirectoryXml(Node node,
   INodeSection.INode.Builder inodeBld) throws IOException {
 inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
 INodeSection.INodeDirectory.Builder bld =
+createINodeDirectoryBuilder(node);
+inodeBld.setDirectory(bld);
+// Will check remaining keys and serialize in processINodeXml
+  }