hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a3675f382 -> 8e5081569


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.

(cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e508156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e508156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e508156

Branch: refs/heads/branch-3.0
Commit: 8e5081569f00cde23e58e234dc22a1dabb20323a
Parents: a3675f3
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:59 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e508156/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
+  }
 }


---

hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f2768eaa3 -> bf03b25f4


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.

(cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf03b25f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf03b25f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf03b25f

Branch: refs/heads/branch-3.1
Commit: bf03b25f4b940d9ee8507795fb85b2b6f36e2cf7
Parents: f2768ea
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:51 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf03b25f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
+  }
 }


---

hadoop git commit: HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell.

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b0f97724 -> 6677717c6


HDFS-13728. Disk Balancer should not fail if volume usage is greater than 
capacity. Contributed by Stephen O'Donnell.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6677717c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6677717c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6677717c

Branch: refs/heads/trunk
Commit: 6677717c689cc94a15f14c3466242e23652d473b
Parents: 2b0f977
Author: Xiao Chen 
Authored: Tue Aug 7 22:04:41 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 22:05:17 2018 -0700

--
 .../diskbalancer/datamodel/DiskBalancerVolume.java | 17 -
 .../hdfs/server/diskbalancer/TestDataModels.java   | 16 
 2 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a9fd7f0..e43b83e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -35,6 +36,9 @@ public class DiskBalancerVolume {
   private static final ObjectReader READER =
   new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -269,10 +273,13 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this volume.
*/
   public void setUsed(long dfsUsedSpace) {
-Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
-"DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
-dfsUsedSpace, getCapacity());
-this.used = dfsUsedSpace;
+if (dfsUsedSpace > this.getCapacity()) {
+  LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+
+this.getCapacity()+"). Setting volume usage to the capacity");
+  this.used = this.getCapacity();
+} else {
+  this.used = dfsUsedSpace;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6677717c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index ace8212..12fbcf1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -224,4 +224,20 @@ public class TestDataModels {
 Assert
 .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
+
+  @Test
+  public void testUsageLimitedToCapacity() throws Exception {
+DiskBalancerTestUtil util = new DiskBalancerTestUtil();
+
+// If usage is greater than capacity, then it should be set to capacity
+DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
+v1.setCapacity(DiskBalancerTestUtil.GB);
+v1.setUsed(2 * DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+// If usage is less than capacity, usage should be set to the real usage
+DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
+v2.setCapacity(2*DiskBalancerTestUtil.GB);
+v2.setUsed(DiskBalancerTestUtil.GB);
+Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
+  }
 }


-
To unsubscribe, e

hadoop git commit: HDFS-13786. EC: Display erasure coding policy for sub-directories is not working. Contributed by Ayush Saxena.

2018-08-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7862f1523 -> 2b0f97724


HDFS-13786. EC: Display erasure coding policy for sub-directories is not 
working. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b0f9772
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b0f9772
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b0f9772

Branch: refs/heads/trunk
Commit: 2b0f9772417d205e8df16bac6921c2bb8bdcf740
Parents: 7862f15
Author: Vinayakumar B 
Authored: Wed Aug 8 07:47:10 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Aug 8 07:53:17 2018 +0530

--
 .../namenode/ContentSummaryComputationContext.java|  2 ++
 .../apache/hadoop/hdfs/TestErasureCodingPolicies.java | 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0f9772/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index c81f82c..95f3fee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -191,6 +191,8 @@ public class ContentSummaryComputationContext {
   .getEnabledPolicyByName(ecPolicyName)
   .getName();
 }
+  } else if (inode.getParent() != null) {
+  return getErasureCodingPolicyName(inode.getParent());
   }
 } catch (IOException ioe) {
   LOG.warn("Encountered error getting ec policy for "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0f9772/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 7d97cce..835d18f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -154,6 +155,19 @@ public class TestErasureCodingPolicies {
   }
 
   @Test
+  public void testContentSummaryOfECSubdir() throws IOException {
+final Path testDir = new Path("/ec");
+fs.mkdir(testDir, FsPermission.getDirDefault());
+fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
+final Path fPath = new Path("ec/file");
+fs.create(fPath).close();
+final Path subdir = new Path("/ec/sub");
+fs.mkdir(subdir, FsPermission.getDirDefault());
+ContentSummary contentSummary = fs.getContentSummary(subdir);
+assertEquals(ecPolicy.getName(),contentSummary.getErasureCodingPolicy());
+  }
+
+  @Test
   public void testBasicSetECPolicy()
   throws IOException, InterruptedException {
 final Path testDir = new Path("/ec");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15400. Improve S3Guard documentation on Authoritative Mode implementation. (Contributed by Gabor Bota)

2018-08-07 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/trunk 38784f95f -> 7862f1523


HADOOP-15400. Improve S3Guard documentation on Authoritative Mode 
implementation. (Contributed by Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7862f152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7862f152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7862f152

Branch: refs/heads/trunk
Commit: 7862f1523f0591a0b163bcecf07af842de4b3a8b
Parents: 38784f9
Author: Sean Mackrory 
Authored: Tue Aug 7 13:55:59 2018 -0600
Committer: Sean Mackrory 
Committed: Tue Aug 7 20:13:09 2018 -0600

--
 .../site/markdown/tools/hadoop-aws/s3guard.md   | 51 ++--
 1 file changed, 47 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7862f152/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
index 60d26e2..66ee11d 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
@@ -100,7 +100,51 @@ More settings will may be added in the future.
 Currently the only Metadata Store-independent setting, besides the
 implementation class above, is the *allow authoritative* flag.
 
-It is recommended that you leave the default setting here:
+The _authoritative_ expression in S3Guard is present in two different layers, 
for
+two different reasons:
+
+* Authoritative S3Guard
+* S3Guard can be set as authoritative, which means that an S3A client will
+avoid round-trips to S3 when **getting directory listings** if there is a 
fully
+cached version of the directory stored in metadata store.
+* This mode can be set as a configuration property
+`fs.s3a.metadatastore.authoritative`
+* All interactions with the S3 bucket(s) must be through S3A clients 
sharing
+the same metadata store.
+* This is independent from which metadata store implementation is used.
+
+* Authoritative directory listings (isAuthoritative bit)
+* Tells if the stored directory listing metadata is complete.
+* This is set by the FileSystem client (e.g. s3a) via the 
`DirListingMetadata`
+class (`org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata`).
+(The MetadataStore only knows what the FS client tells it.)
+* If set to `TRUE`, we know that the directory listing
+(`DirListingMetadata`) is full, and complete.
+* If set to `FALSE` the listing may not be complete.
+* Metadata store may persist the isAuthoritative bit on the metadata store.
+* Currently only `org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore`
+implementation supports authoritative bit.
+
+More on Authoritative S3Guard:
+
+* It is not treating the MetadataStore (e.g. dynamodb) as the source of truth
+ in general.
+* It is the ability to short-circuit S3 list objects and serve listings from
+the MetadataStore in some circumstances.
+* For S3A to skip S3's list objects on some path, and serve it directly from
+the MetadataStore, the following things must all be true:
+1. The MetadataStore implementation persists the bit
+`DirListingMetadata.isAuthorititative` set when calling
+`MetadataStore#put` (`DirListingMetadata`)
+1. The S3A client is configured to allow metadatastore to be authoritative
+source of a directory listing (`fs.s3a.metadatastore.authoritative=true`).
+1. The MetadataStore has a **full listing for path** stored in it.  This 
only
+happens if the FS client (s3a) explicitly has stored a full directory
+listing with `DirListingMetadata.isAuthorititative=true` before the said
+listing request happens.
+
+This configuration only enables authoritative mode in the client layer. It is
+recommended that you leave the default setting here:
 
 ```xml
 
@@ -109,9 +153,8 @@ It is recommended that you leave the default setting here:
 
 ```
 
-Setting this to `true` is currently an experimental feature.  When true, the
-S3A client will avoid round-trips to S3 when getting directory listings, if
-there is a fully-cached version of the directory stored in the Metadata Store.
+Note that a MetadataStore MAY persist this bit. (Not MUST).
+Setting this to `true` is currently an experimental feature.
 
 Note that if this is set to true, it may exacerbate or persist existing race
 conditions around multiple concurrent modifications and listings of a given


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For add

hadoop git commit: HDDS-124. Validate all required configs needed for ozone-site.xml and reflect the changes in ozone-default.xml Contributed by Dinesh Chitlangia.

2018-08-07 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0f8cb127c -> 38784f95f


HDDS-124. Validate all required configs needed for ozone-site.xml and reflect 
the changes in ozone-default.xml
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38784f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38784f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38784f95

Branch: refs/heads/trunk
Commit: 38784f95fecd02c2f94344c1967cccf0799ec074
Parents: 0f8cb12
Author: Anu Engineer 
Authored: Tue Aug 7 16:40:33 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 7 16:40:33 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38784f95/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5099bbe..568e38d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -815,7 +815,7 @@
   
 ozone.scm.names
 
-OZONE
+OZONE, REQUIRED
 
   The value of this property is a set of DNS | DNS:PORT | IP
   Address | IP:PORT. Written as a comma separated string. e.g. scm1,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization between rollEditsRpcExecutor and tailerThread shutdown. Contributed by Hrishik

2018-08-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk d838179d8 -> 0f8cb127c


HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to 
missing synchronization
between rollEditsRpcExecutor and tailerThread shutdown. Contributed 
by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f8cb127
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f8cb127
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f8cb127

Branch: refs/heads/trunk
Commit: 0f8cb127cd759cdc6422d19d8b28f21198ddfd61
Parents: d838179
Author: Xiao Chen 
Authored: Tue Aug 7 16:11:37 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 16:13:41 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8cb127/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 2003f94..b306b8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -234,7 +234,6 @@ public class EditLogTailer {
   }
   
   public void stop() throws IOException {
-rollEditsRpcExecutor.shutdown();
 tailerThread.setShouldRun(false);
 tailerThread.interrupt();
 try {
@@ -242,6 +241,8 @@ public class EditLogTailer {
 } catch (InterruptedException e) {
   LOG.warn("Edit log tailer thread exited with an exception");
   throw new IOException(e);
+} finally {
+  rollEditsRpcExecutor.shutdown();
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8626. Create HomePolicyManager that sends all the requests to the home subcluster. Contributed by Inigo Goiri.

2018-08-07 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 861095f76 -> d838179d8


YARN-8626. Create HomePolicyManager that sends all the requests to the home 
subcluster. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d838179d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d838179d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d838179d

Branch: refs/heads/trunk
Commit: d838179d8dc257e582e8c7bb1cf312d4c0d3f733
Parents: 861095f
Author: Giovanni Matteo Fumarola 
Authored: Tue Aug 7 15:33:16 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Tue Aug 7 15:33:16 2018 -0700

--
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |   8 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |   7 --
 .../policies/amrmproxy/HomeAMRMProxyPolicy.java |  74 +
 .../amrmproxy/RejectAMRMProxyPolicy.java|   8 --
 .../policies/manager/HomePolicyManager.java |  61 ++
 .../amrmproxy/TestHomeAMRMProxyPolicy.java  | 110 +++
 .../policies/manager/TestHomePolicyManager.java |  39 +++
 .../utils/FederationPoliciesTestUtil.java   |  16 ++-
 8 files changed, 305 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d838179d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
index e853744..07cd6db 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
@@ -20,9 +20,12 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 
 import java.util.Map;
 
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.AbstractConfigurableFederationPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
@@ -44,4 +47,9 @@ public abstract class AbstractAMRMProxyPolicy extends
 }
   }
 
+  @Override
+  public void notifyOfResponse(SubClusterId subClusterId,
+  AllocateResponse response) throws YarnException {
+// By default, a stateless policy does not care about responses
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d838179d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
index 7fddb8e..eb83baa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
@@ -22,7 +22,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -65,10 +64,4 @@ public clas

hadoop git commit: YARN-8407. Container launch exception in AM log should be printed in ERROR level. (Yesha Vora via wangda)

2018-08-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 450c791ec -> f2768eaa3


YARN-8407. Container launch exception in AM log should be printed in ERROR 
level. (Yesha Vora via wangda)

Change-Id: I154e873df1df3503a09c41d6b3874ca195af91d9
(cherry picked from commit 861095f761b40171e0dc25f769f486d910cc3e88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2768eaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2768eaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2768eaa

Branch: refs/heads/branch-3.1
Commit: f2768eaa3830b9d0fde21831b7f11e44d440f76d
Parents: 450c791
Author: Wangda Tan 
Authored: Tue Aug 7 13:01:13 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 13:07:28 2018 -0700

--
 .../component/instance/ComponentInstance.java   | 22 +---
 1 file changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2768eaa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 44ae1e7..a1aa808 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -251,14 +251,20 @@ public class ComponentInstance implements 
EventHandler,
   // re-ask the failed container.
   comp.requestContainers(1);
   comp.reInsertPendingInstance(compInstance);
-  LOG.info(compInstance.getCompInstanceId()
-  + ": {} completed. Reinsert back to pending list and requested " 
+
-  "a new container." + System.lineSeparator() +
-  " exitStatus={}, diagnostics={}.",
-  event.getContainerId(), failureBeforeLaunch ? null :
-  event.getStatus().getExitStatus(),
-  failureBeforeLaunch ? FAILED_BEFORE_LAUNCH_DIAG :
-  event.getStatus().getDiagnostics());
+
+  StringBuilder builder = new StringBuilder();
+  builder.append(compInstance.getCompInstanceId()).append(": ");
+  builder.append(event.getContainerId()).append(" completed. Reinsert back 
to pending list and requested ");
+  builder.append("a new container.").append(System.lineSeparator());
+  builder.append(" exitStatus=").append(failureBeforeLaunch ? null : 
event.getStatus().getExitStatus());
+  builder.append(", diagnostics=");
+  builder.append(failureBeforeLaunch ? FAILED_BEFORE_LAUNCH_DIAG : 
event.getStatus().getDiagnostics());
+
+  if (event.getStatus().getExitStatus() != 0) {
+LOG.error(builder.toString());
+  } else {
+LOG.info(builder.toString());
+  }
 } else {
   // When no relaunch, update component's #succeeded/#failed
   // instances.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8407. Container launch exception in AM log should be printed in ERROR level. (Yesha Vora via wangda)

2018-08-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk c0599151b -> 861095f76


YARN-8407. Container launch exception in AM log should be printed in ERROR 
level. (Yesha Vora via wangda)

Change-Id: I154e873df1df3503a09c41d6b3874ca195af91d9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/861095f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/861095f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/861095f7

Branch: refs/heads/trunk
Commit: 861095f761b40171e0dc25f769f486d910cc3e88
Parents: c059915
Author: Wangda Tan 
Authored: Tue Aug 7 13:01:13 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 13:01:13 2018 -0700

--
 .../component/instance/ComponentInstance.java   | 22 +---
 1 file changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/861095f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 3499d92..10128a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -256,14 +256,20 @@ public class ComponentInstance implements 
EventHandler,
   // re-ask the failed container.
   comp.requestContainers(1);
   comp.reInsertPendingInstance(compInstance);
-  LOG.info(compInstance.getCompInstanceId()
-  + ": {} completed. Reinsert back to pending list and requested " 
+
-  "a new container." + System.lineSeparator() +
-  " exitStatus={}, diagnostics={}.",
-  event.getContainerId(), failureBeforeLaunch ? null :
-  event.getStatus().getExitStatus(),
-  failureBeforeLaunch ? FAILED_BEFORE_LAUNCH_DIAG :
-  event.getStatus().getDiagnostics());
+
+  StringBuilder builder = new StringBuilder();
+  builder.append(compInstance.getCompInstanceId()).append(": ");
+  builder.append(event.getContainerId()).append(" completed. Reinsert back 
to pending list and requested ");
+  builder.append("a new container.").append(System.lineSeparator());
+  builder.append(" exitStatus=").append(failureBeforeLaunch ? null : 
event.getStatus().getExitStatus());
+  builder.append(", diagnostics=");
+  builder.append(failureBeforeLaunch ? FAILED_BEFORE_LAUNCH_DIAG : 
event.getStatus().getDiagnostics());
+
+  if (event.getStatus().getExitStatus() != 0) {
+LOG.error(builder.toString());
+  } else {
+LOG.info(builder.toString());
+  }
 } else {
   // When no relaunch, update component's #succeeded/#failed
   // instances.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma Shivaprasad via wangda)

2018-08-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 947dc3422 -> 450c791ec


YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma 
Shivaprasad via wangda)

Change-Id: I392ef4f8baa84d5d7b1f2e438c560b5426b6d4f2
(cherry picked from commit d4258fcad71eabe2de3cf829cde36840200ab9b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/450c791e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/450c791e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/450c791e

Branch: refs/heads/branch-3.1
Commit: 450c791ecf512bbfe9b0c9d66949933837dfc2da
Parents: 947dc34
Author: Wangda Tan 
Authored: Tue Aug 7 12:36:55 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 12:41:55 2018 -0700

--
 .../linux/resources/CGroupsHandlerImpl.java | 26 
 1 file changed, 16 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/450c791e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 287e171..7a980a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -504,23 +504,29 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   private boolean checkAndDeleteCgroup(File cgf) throws InterruptedException {
 boolean deleted = false;
 // FileInputStream in = null;
-try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
-  if (in.read() == -1) {
+if ( cgf.exists() ) {
+  try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
+if (in.read() == -1) {
 /*
  * "tasks" file is empty, sleep a bit more and then try to delete the
  * cgroup. Some versions of linux will occasionally panic due to a race
  * condition in this area, hence the paranoia.
  */
-Thread.sleep(deleteCGroupDelay);
-deleted = cgf.delete();
-if (!deleted) {
-  LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  Thread.sleep(deleteCGroupDelay);
+  deleted = cgf.delete();
+  if (!deleted) {
+LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  }
+} else{
+  logLineFromTasksFile(cgf);
 }
-  } else {
-logLineFromTasksFile(cgf);
+  } catch (IOException e) {
+LOG.warn("Failed to read cgroup tasks file. ", e);
   }
-} catch (IOException e) {
-  LOG.warn("Failed to read cgroup tasks file. ", e);
+} else {
+  LOG.info("Parent Cgroups directory {} does not exist. Skipping "
+  + "deletion", cgf.getPath());
+  deleted = true;
 }
 return deleted;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma Shivaprasad via wangda)

2018-08-07 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk b1a59b164 -> c0599151b


YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma 
Shivaprasad via wangda)

Change-Id: I392ef4f8baa84d5d7b1f2e438c560b5426b6d4f2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4258fca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4258fca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4258fca

Branch: refs/heads/trunk
Commit: d4258fcad71eabe2de3cf829cde36840200ab9b6
Parents: b1a59b1
Author: Wangda Tan 
Authored: Tue Aug 7 12:36:55 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 12:36:55 2018 -0700

--
 .../linux/resources/CGroupsHandlerImpl.java | 26 
 1 file changed, 16 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4258fca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index c3800b6..a547e8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -504,23 +504,29 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   private boolean checkAndDeleteCgroup(File cgf) throws InterruptedException {
 boolean deleted = false;
 // FileInputStream in = null;
-try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
-  if (in.read() == -1) {
+if ( cgf.exists() ) {
+  try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
+if (in.read() == -1) {
 /*
  * "tasks" file is empty, sleep a bit more and then try to delete the
  * cgroup. Some versions of linux will occasionally panic due to a race
  * condition in this area, hence the paranoia.
  */
-Thread.sleep(deleteCGroupDelay);
-deleted = cgf.delete();
-if (!deleted) {
-  LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  Thread.sleep(deleteCGroupDelay);
+  deleted = cgf.delete();
+  if (!deleted) {
+LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  }
+} else{
+  logLineFromTasksFile(cgf);
 }
-  } else {
-logLineFromTasksFile(cgf);
+  } catch (IOException e) {
+LOG.warn("Failed to read cgroup tasks file. ", e);
   }
-} catch (IOException e) {
-  LOG.warn("Failed to read cgroup tasks file. ", e);
+} else {
+  LOG.info("Parent Cgroups directory {} does not exist. Skipping "
+  + "deletion", cgf.getPath());
+  deleted = true;
 }
 return deleted;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-7089. Mark the log-aggregation-controller APIs as public. (Zian Chen via wangda)

2018-08-07 Thread wangda
YARN-7089. Mark the log-aggregation-controller APIs as public. (Zian Chen via 
wangda)

Change-Id: I37851bdc5935d623a27d0973a206c997258716eb


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0599151
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0599151
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0599151

Branch: refs/heads/trunk
Commit: c0599151bb438d3dc0c6a54af93b2670770daefd
Parents: d4258fc
Author: Wangda Tan 
Authored: Tue Aug 7 12:37:32 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 12:37:32 2018 -0700

--
 .../filecontroller/LogAggregationFileController.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0599151/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 6b3c9a4..fe65288 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -35,7 +35,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -65,7 +65,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 /**
  * Base class to implement Log Aggregation File Controller.
  */
-@Private
+@Public
 @Unstable
 public abstract class LogAggregationFileController {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13796. Allow verbosity of InMemoryLevelDBAliasMapServer to be configurable.

2018-08-07 Thread virajith
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ed8593d1 -> b1a59b164


HDFS-13796. Allow verbosity of InMemoryLevelDBAliasMapServer to be configurable.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1a59b16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1a59b16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1a59b16

Branch: refs/heads/trunk
Commit: b1a59b164412fbd9f641a7e992a7d1a3fd0f1a10
Parents: 6ed8593
Author: Virajith Jalaparti 
Authored: Tue Aug 7 10:15:28 2018 -0700
Committer: Virajith Jalaparti 
Committed: Tue Aug 7 10:15:28 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java | 8 +++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1a59b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 5a1266c..4f21ee1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -96,6 +96,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT = 
500;
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED = 
"dfs.provided.aliasmap.inmemory.enabled";
   public static final boolean DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED_DEFAULT = 
false;
+  public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG = 
"dfs.provided.aliasmap.inmemory.server.log";
+  public static final boolean 
DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG_DEFAULT = false;
 
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1a59b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
index 1d06f13..f201bfd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
@@ -39,6 +39,8 @@ import java.util.Optional;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSUtil.getBindAddress;
 import static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.*;
 import static 
org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap.CheckedFunction2;
@@ -87,13 +89,17 @@ public class InMemoryLevelDBAliasMapServer implements 
InMemoryAliasMapProtocol,
 DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT,
 DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST);
 
+boolean setVerbose = conf.getBoolean(
+DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG,
+DFS_PROVIDED_ALIASMAP_INMEMORY_SERVER_LOG_DEFAULT);
+
 aliasMapServer = new RPC.Builder(conf)
 .setProtocol(AliasMapProtocolPB.class)
 .setInstance(aliasMapProtocolService)
 .setBindAddress(rpcAddress.getHostName())
 .setPort(rpcAddress.getPort())
 .setNumHandlers(1)
-.setVerbose(true)
+.setVerbose(setVerbose)
 .build();
 
 LOG.info("Starting InMemoryLevelDBAliasMapServer on {}", rpcAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1a59b16/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
-

[1/3] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-08-07 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9ed62cbf3 -> 2a6b62655


Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)
(cherry picked from commit d61d84279f7f22867c23dd95e8bfeb70ea7e0690)
(cherry picked from commit f5fd5aa025c904e9a2ff8c5fd932aaed2363a6a0)
(cherry picked from commit e20a840174bc2b27fcc0935e0086977bd6fbfcb3)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/883f082e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/883f082e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/883f082e

Branch: refs/heads/branch-2.8
Commit: 883f082efe206446e6ded58502d1006a0aebb237
Parents: 9ed62cb
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Miklos Szegedi 
Committed: Thu Aug 2 07:05:17 2018 +

--
 .../impl/container-executor.c   | 57 
 .../impl/container-executor.h   |  3 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md | 10 
 4 files changed, 72 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/883f082e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 0b86586..848358d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -67,6 +67,8 @@ static const int DEFAULT_MIN_USERID = 1000;
 
 static const char* DEFAULT_BANNED_USERS[] = {"mapred", "hdfs", "bin", 0};
 
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
+
 //location of traffic control binary
 static const char* TC_BIN = "/sbin/tc";
 static const char* TC_MODIFY_STATE_OPTS [] = { "-b" , NULL};
@@ -1065,6 +1067,12 @@ int is_tc_support_enabled() {
 return is_feature_enabled(TC_SUPPORT_ENABLED_KEY,
 DEFAULT_TC_SUPPORT_ENABLED);
 }
+
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED);
+}
+
 /**
  * Function to prepare the application directories for the container.
  */
@@ -2187,20 +2195,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2223,13 +2236,29 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(strlen(pair));
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
+  size_t len = strlen(pair);
 
-  if (get_kv_key(pair, controller, strlen(pair)) < 0 ||
-  get_kv_value(pair, mount_path, strlen(pair)) < 0) {
+  if (controller == NULL || mount_path == NULL) {
+fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n"

[2/3] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-08-07 Thread szegedim
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)

(cherry picked from commit 0838fe833738e04f5e6f6408e97866d77bebbf30)
(cherry picked from commit c1dc4ca2c6080377159157ce97bf5d72fa3285a1)
(cherry picked from commit 92f02f97fd8e8306fda7374b5180a633622f9636)
(cherry picked from commit 4328a7e0ed755883d9cf6c84e1c07b34f6368266)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/560ecf93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/560ecf93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/560ecf93

Branch: refs/heads/branch-2.8
Commit: 560ecf93d248c0f5ca66b521609eb228005c211b
Parents: 883f082
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Miklos Szegedi 
Committed: Thu Aug 2 07:08:45 2018 +

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/560ecf93/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 848358d..28a924a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2221,6 +2221,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2255,7 +2277,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/560ecf93/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 634aa05..871efed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -995,6 +995,23 @@ void test_get_docker_binary() {
   free(docker_binary_user);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This 

[3/3] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-08-07 Thread szegedim
YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)

(cherry picked from commit 1bc106a738a6ce4f7ed025d556bb44c1ede022e3)
(cherry picked from commit 6e0db6fe1a8ce50977175567f2ba1f957e7b9c91)
(cherry picked from commit edb9d8b55419dabf5b8ace678e5ddb5cd559972b)
(cherry picked from commit d9b9c9125815b20ef63ba65c4c2394c89345be9c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a6b6265
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a6b6265
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a6b6265

Branch: refs/heads/branch-2.8
Commit: 2a6b6265501adfeb54d8e8485e00fee7f12f0b65
Parents: 560ecf9
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Miklos Szegedi 
Committed: Thu Aug 2 07:09:03 2018 +

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a6b6265/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 871efed..63398e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -995,19 +995,23 @@ void test_get_docker_binary() {
   free(docker_binary_user);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-301. ozone command shell does not contain subcommand to run ozoneFS commands. Contributed by Nilotpal Nandi.

2018-08-07 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 900c0e114 -> 6ed8593d1


HDDS-301. ozone command shell does not contain subcommand to run ozoneFS 
commands. Contributed by Nilotpal Nandi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ed8593d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ed8593d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ed8593d

Branch: refs/heads/trunk
Commit: 6ed8593d180fe653f78f0a210478555338c4685a
Parents: 900c0e1
Author: Mukul Kumar Singh 
Authored: Tue Aug 7 16:09:53 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Aug 7 16:09:53 2018 +0530

--
 .../hadoop-common/src/main/bin/hadoop-functions.sh   | 1 +
 .../acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot| 4 ++--
 .../src/test/acceptance/ozonefs/ozonesinglenode.robot| 4 ++--
 hadoop-ozone/common/src/main/bin/ozone   | 4 
 hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh  | 1 +
 5 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed8593d/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index cbedd972..71ba7ff 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -600,6 +600,7 @@ function hadoop_bootstrap
   HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
   OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
   OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
+  OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
 
   HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed8593d/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot 
b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
index ea473c0..1d3aa4b 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
@@ -31,9 +31,9 @@ Create volume and bucket
 Execute on  datanodeozone oz -createBucket 
http://ozoneManager/fstest/bucket1
 
 Check volume from ozonefs
-${result} = Execute on  hadooplasthdfs dfs -ls 
o3://bucket1.fstest/
+${result} = Execute on  datanode  ozone fs -ls 
o3://bucket1.fstest/
 
 Create directory from ozonefs
-Execute on  hadooplasthdfs dfs -mkdir 
-p o3://bucket1.fstest/testdir/deep
+Execute on  datanode  ozone fs -mkdir 
-p o3://bucket1.fstest/testdir/deep
 ${result} = Execute on  ozoneManager  ozone oz 
-listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
 Should contain${result}
 testdir/deep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed8593d/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
--
diff --git 
a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
 
b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
index b844cee..a1a5189 100644
--- 
a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
+++ 
b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
@@ -31,10 +31,10 @@ Create volume and bucket
 Execute on  datanodeozone oz -createBucket 
http://ozoneManager/fstest/bucket1
 
 Check volume from ozonefs
-${result} = Execute on  hadooplasthdfs dfs -ls 
o3://bucket1.fstest/
+${result} = Execute on  datanode  ozone fs -ls 
o3://bucket1.fstest/
 
 Create directory from ozonefs
-Execute on  hadooplasthdfs dfs -mkdir 
-p o3://bucket1.fstest/testdir/deep
+Execute on  datanode  ozone fs -mkdir 
-p o3://bucket1.fstest/testdir/deep
 ${result} = Execute on  ozoneManager  ozone oz 
-listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'

hadoop git commit: HDDS-230. ContainerStateMachine should implement readStateMachineData api to read data from Containers if required during replication. Contributed by Mukul Kumar Singh.

2018-08-07 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e4e02b4d -> 900c0e114


HDDS-230. ContainerStateMachine should implement readStateMachineData api to 
read data from Containers if required during replication. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900c0e11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900c0e11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900c0e11

Branch: refs/heads/trunk
Commit: 900c0e114f391f4dbf21a0e08a63c2cf22659eb7
Parents: 2e4e02b
Author: Mukul Kumar Singh 
Authored: Tue Aug 7 15:03:14 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Aug 7 15:03:14 2018 +0530

--
 .../server/ratis/ContainerStateMachine.java | 142 ---
 .../server/ratis/XceiverServerRatis.java|  10 +-
 .../org/apache/hadoop/ozone/om/OMMetrics.java   |   2 +-
 hadoop-project/pom.xml  |   2 +-
 4 files changed, 129 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900c0e11/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index c0dd0ba..15e991a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -19,20 +19,26 @@
 package org.apache.hadoop.ozone.container.common.transport.server.ratis;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.shaded.com.google.protobuf
 .InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Stage;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.ReadChunkResponseProto;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.protocol.Message;
 import org.apache.ratis.protocol.RaftClientRequest;
-import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.server.storage.RaftStorage;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.ratis.shaded.proto.RaftProtos.LogEntryProto;
@@ -96,16 +102,16 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   private final SimpleStateMachineStorage storage
   = new SimpleStateMachineStorage();
   private final ContainerDispatcher dispatcher;
-  private ThreadPoolExecutor writeChunkExecutor;
+  private ThreadPoolExecutor chunkExecutor;
   private final ConcurrentHashMap>
   writeChunkFutureMap;
   private final ConcurrentHashMap>
   createContainerFutureMap;
 
   ContainerStateMachine(ContainerDispatcher dispatcher,
-  ThreadPoolExecutor writeChunkExecutor) {
+  ThreadPoolExecutor chunkExecutor) {
 this.dispatcher = dispatcher;
-this.writeChunkExecutor = writeChunkExecutor;
+this.chunkExecutor = chunkExecutor;
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerFutureMap = new ConcurrentHashMap<>();
   }
@@ -117,9 +123,9 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 
   @Override
   public void initialize(
-  RaftPeerId id, RaftProperties properties, RaftStorage raftStorage)
+  RaftServer server, RaftGroupId id, RaftStorage raftStorage)
   throws IOException {
-super.initialize(id, properties, raftStorage);
+super.initialize(server, id, raftStorage);
 storage.init(raftStorage);
 //  TODO handle snapshots
 
@@ -134,13 +140,13 @@ public class ContainerStateMachine exte