[hadoop] branch trunk updated: BytesWritable causes OOME when array size reaches Integer.MAX_VALUE. (#393)

2020-05-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d60496e  BytesWritable causes OOME when array size reaches 
Integer.MAX_VALUE. (#393)
d60496e is described below

commit d60496e6c6673680e3299a3f995fd9b368123e3d
Author: Joseph Smith 
AuthorDate: Tue May 12 13:50:35 2020 -0500

BytesWritable causes OOME when array size reaches Integer.MAX_VALUE. (#393)
---
 .../src/main/java/org/apache/hadoop/io/BytesWritable.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
index a81bc24..c5538c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Stable
 public class BytesWritable extends BinaryComparable
 implements WritableComparable {
+  private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
   private static final int LENGTH_BYTES = 4;
 
   private static final byte[] EMPTY_BYTES = new byte[0];
@@ -126,7 +127,7 @@ public class BytesWritable extends BinaryComparable
   public void setSize(int size) {
 if (size > getCapacity()) {
   // Avoid overflowing the int too early by casting to a long.
-  long newSize = Math.min(Integer.MAX_VALUE, (3L * size) / 2L);
+  long newSize = Math.min(MAX_ARRAY_SIZE, (3L * size) / 2L);
   setCapacity((int) newSize);
 }
 this.size = size;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (ddb395d -> 8a83e16)

2019-10-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ddb395d  HDDS-2226. S3 Secrets should use a strong RNG. (#1572)
 add 8a83e16  Preparing for release 0.4.1-alpha.

No new revisions were added by this update.

Summary of changes:
 hadoop-hdds/client/pom.xml  | 4 ++--
 hadoop-hdds/common/pom.xml  | 6 +++---
 hadoop-hdds/config/pom.xml  | 4 ++--
 hadoop-hdds/container-service/pom.xml   | 4 ++--
 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md  | 4 ++--
 hadoop-hdds/docs/pom.xml| 4 ++--
 hadoop-hdds/framework/pom.xml   | 4 ++--
 hadoop-hdds/pom.xml | 4 ++--
 hadoop-hdds/server-scm/pom.xml  | 4 ++--
 hadoop-hdds/tools/pom.xml   | 4 ++--
 hadoop-ozone/Jenkinsfile| 2 +-
 hadoop-ozone/client/pom.xml | 4 ++--
 hadoop-ozone/common/pom.xml | 4 ++--
 hadoop-ozone/csi/pom.xml| 4 ++--
 hadoop-ozone/datanode/pom.xml   | 4 ++--
 hadoop-ozone/dist/pom.xml   | 4 ++--
 hadoop-ozone/fault-injection-test/network-tests/pom.xml | 2 +-
 hadoop-ozone/fault-injection-test/pom.xml   | 4 ++--
 hadoop-ozone/integration-test/pom.xml   | 4 ++--
 hadoop-ozone/objectstore-service/pom.xml| 4 ++--
 hadoop-ozone/ozone-manager/pom.xml  | 4 ++--
 hadoop-ozone/ozone-recon-codegen/pom.xml| 2 +-
 hadoop-ozone/ozone-recon/pom.xml| 2 +-
 hadoop-ozone/ozonefs-lib-current/pom.xml| 4 ++--
 hadoop-ozone/ozonefs-lib-legacy/pom.xml | 4 ++--
 hadoop-ozone/ozonefs/pom.xml| 4 ++--
 hadoop-ozone/pom.xml| 6 +++---
 hadoop-ozone/s3gateway/pom.xml  | 4 ++--
 hadoop-ozone/tools/pom.xml  | 4 ++--
 hadoop-ozone/upgrade/pom.xml| 4 ++--
 pom.ozone.xml   | 2 +-
 31 files changed, 59 insertions(+), 59 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] annotated tag ozone-0.4.1-alpha created (now 687173f)

2019-10-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to annotated tag ozone-0.4.1-alpha
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 687173f  (tag)
 tagging 8a83e16da081434e3e2d52fd545391d6bfb47952 (commit)
  by Nanda kumar
  on Sun Oct 13 18:21:04 2019 +0530

- Log -
HDDS-2028. Ozone 0.4.1 Release
-BEGIN PGP SIGNATURE-

iQJFBAABCAAvFiEEwYvqJzsezX8Fpk0czmyKsSBHgN8FAl2jHbkRHG5hbmRhQGFw
YWNoZS5vcmcACgkQzmyKsSBHgN+5XA/+NnGc/f1fIGY/sNdAa+k9M0Vifh/8V22x
McvRvHU0m9PIhFeaW8Jt+pY2nLDZYyFciu4GBmJQpYt4xWmpKdSCcN7QKJiTR29F
SFRoWLwd/aIYLi7i72IHgJNpNrNHElfXKVkZyG0wqTAEzaB45QyQeDrIg9ZQyYD+
w5HGvGwZrJc9LNHEYm2ZuYJ16aIJ61lXmQ7d9LD95Jb1baxQyUYvMp7ve0oBQh79
zaPWtzdRD921Wy1lRBOW8nBuIHG5jSkA0i6/cNfD9QoEyekm8ZW0DBE5X6aWccWa
FWE5VeERQK8KApcqw+bIPOVk1IJrNRWsVN2PwzC3sC1cqHs+jKng7axbuWY+zzhG
xHE68AqnhGyijBo59PD3OcaeTWgAX2qLDUvkaL+Ley86ybOgnnAOKzrZ/D1J+Zw2
fuY1PeWik7dTkBaeUxHLLGEhtlbmZ3qmaZRbgexrZTEgX3iPvez25aNbZtluMwb2
J5UM3YgjlfmXHQceo73rq/BEJOwYJuhCc3VryWti1i0Bf9Zhb4sb4pFioEtobd0N
lVNkjNSVhWh5dMcB4Wc4fWKNONugWwSJksnEP6aRNlT6DB03yqvuGIwYyk0Jce8y
UsPnyeh6B6jwl+dS5wZZUOkqV82G48IThuql5CNK6k2eSOGi6K5kO2MypiMQWV6n
wvQC1KCOvjU=
=SaAl
-END PGP SIGNATURE-
---

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (effe608 -> 4850b3a)

2019-10-10 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from effe608  HADOOP-16650. ITestS3AClosedFS failing.
 add 4850b3a  HDDS-2269. Provide config for fair/non-fair for OM RW Lock. 
(#1623)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/ozone/OzoneConfigKeys.java |  3 +++
 .../java/org/apache/hadoop/ozone/lock/ActiveLock.java | 11 +++
 .../org/apache/hadoop/ozone/lock/LockManager.java | 19 ---
 .../apache/hadoop/ozone/lock/PooledLockFactory.java   |  7 ++-
 .../common/src/main/resources/ozone-default.xml   | 11 +++
 .../apache/hadoop/ozone/om/lock/OzoneManagerLock.java |  7 ++-
 6 files changed, 49 insertions(+), 9 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Preparing for release 0.4.1-alpha.

2019-10-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to annotated tag ozone-0.4.1-alpha-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8a83e16da081434e3e2d52fd545391d6bfb47952
Author: Nanda kumar 
AuthorDate: Fri Oct 4 12:33:16 2019 +0530

Preparing for release 0.4.1-alpha.
---
 hadoop-hdds/client/pom.xml  | 4 ++--
 hadoop-hdds/common/pom.xml  | 6 +++---
 hadoop-hdds/config/pom.xml  | 4 ++--
 hadoop-hdds/container-service/pom.xml   | 4 ++--
 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md  | 4 ++--
 hadoop-hdds/docs/pom.xml| 4 ++--
 hadoop-hdds/framework/pom.xml   | 4 ++--
 hadoop-hdds/pom.xml | 4 ++--
 hadoop-hdds/server-scm/pom.xml  | 4 ++--
 hadoop-hdds/tools/pom.xml   | 4 ++--
 hadoop-ozone/Jenkinsfile| 2 +-
 hadoop-ozone/client/pom.xml | 4 ++--
 hadoop-ozone/common/pom.xml | 4 ++--
 hadoop-ozone/csi/pom.xml| 4 ++--
 hadoop-ozone/datanode/pom.xml   | 4 ++--
 hadoop-ozone/dist/pom.xml   | 4 ++--
 hadoop-ozone/fault-injection-test/network-tests/pom.xml | 2 +-
 hadoop-ozone/fault-injection-test/pom.xml   | 4 ++--
 hadoop-ozone/integration-test/pom.xml   | 4 ++--
 hadoop-ozone/objectstore-service/pom.xml| 4 ++--
 hadoop-ozone/ozone-manager/pom.xml  | 4 ++--
 hadoop-ozone/ozone-recon-codegen/pom.xml| 2 +-
 hadoop-ozone/ozone-recon/pom.xml| 2 +-
 hadoop-ozone/ozonefs-lib-current/pom.xml| 4 ++--
 hadoop-ozone/ozonefs-lib-legacy/pom.xml | 4 ++--
 hadoop-ozone/ozonefs/pom.xml| 4 ++--
 hadoop-ozone/pom.xml| 6 +++---
 hadoop-ozone/s3gateway/pom.xml  | 4 ++--
 hadoop-ozone/tools/pom.xml  | 4 ++--
 hadoop-ozone/upgrade/pom.xml| 4 ++--
 pom.ozone.xml   | 2 +-
 31 files changed, 59 insertions(+), 59 deletions(-)

diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 1f139d7..ed19115 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.4.1-SNAPSHOT
+0.4.1-alpha
   
 
   hadoop-hdds-client
-  0.4.1-SNAPSHOT
+  0.4.1-alpha
   Apache Hadoop Distributed Data Store Client 
Library
   Apache Hadoop HDDS Client
   jar
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 5fc4a5e..207f474 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -20,16 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.4.1-SNAPSHOT
+0.4.1-alpha
   
   hadoop-hdds-common
-  0.4.1-SNAPSHOT
+  0.4.1-alpha
   Apache Hadoop Distributed Data Store Common
   Apache Hadoop HDDS Common
   jar
 
   
-0.4.1-SNAPSHOT
+0.4.1-alpha
 2.11.0
 3.4.2
 ${hdds.version}
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
index 880faa1..4143a3b 100644
--- a/hadoop-hdds/config/pom.xml
+++ b/hadoop-hdds/config/pom.xml
@@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.4.1-SNAPSHOT
+0.4.1-alpha
   
   hadoop-hdds-config
-  0.4.1-SNAPSHOT
+  0.4.1-alpha
   Apache Hadoop Distributed Data Store Config Tools
   Apache Hadoop HDDS Config
   jar
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 730c1ab..2b66a5c 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.4.1-SNAPSHOT
+0.4.1-alpha
   
   hadoop-hdds-container-service
-  0.4.1-SNAPSHOT
+  0.4.1-alpha
   Apache Hadoop Distributed Data Store Container 
Service
   Apache Hadoop HDDS Container Service
   jar
diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md 
b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
index 7202ebd..1fc9155 100644
--- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
+++ b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
@@ -88,7 +88,7 @@ _Note_: You may also use 
`org.apache.hadoop.fs.ozone.OzoneFileSystem` without th
 Copy the `ozonefs.jar` file from an ozone distribution (__use the legacy 
version!__)
 
 ```
-kubectl cp 
om-0:/opt/hadoop/share/ozone/lib/hadoop-ozone-filesystem-lib-

[hadoop] annotated tag ozone-0.4.1-alpha-RC0 created (now 9062dac)

2019-10-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to annotated tag ozone-0.4.1-alpha-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 9062dac  (tag)
 tagging 8a83e16da081434e3e2d52fd545391d6bfb47952 (commit)
  by Nanda kumar
  on Fri Oct 4 13:15:45 2019 +0530

- Log -
Apache Hadoop Ozone 0.4.1-alpha RC0 release.
-BEGIN PGP SIGNATURE-

iQJFBAABCAAvFiEEwYvqJzsezX8Fpk0czmyKsSBHgN8FAl2W+O4RHG5hbmRhQGFw
YWNoZS5vcmcACgkQzmyKsSBHgN+QOA/6AlhzccUSD87ROsGYzMzNcB/uWPhYu3AC
7QWgYmTfEld4k4ipyQLmLaMGqrmtubaJr683v76HkDBkWeyMMs7iZOLRWkSF98gm
8VVHgLUPS608p/l9eaA0Bn60SNP+fGQ3SqOOWyI1atQbZhr1cN1kmoU73uhsBS9J
4jDlWe45G8xil56a4DVElimyLGeCnpeP5MmdUmoO1QrJEz5INS/FxK1hCOLK23W5
AFNQwSV7aE6rv/JpZGdFB0Ix24EO1imkVwE9Itj+Td5hDTiwbmjCXJW6OlOSYvMy
TK0qdFXCAXffX6jf1f/ebTz1aFP5zRzgCPVCEEQ2MSjo1tbGKIR5ujvKqhA8cA+n
tBmE7IFpOCMf1IiY52+Jcwd6R75mO3hnRykC/LNstgvN3+HXalCnpRR8p4bMatd5
EJ83fh00pRw8UQytX0AaFvaGAGkVV2KVAI5froaOJGij330IgTwN/gLMJKySMAmd
fm94Hh5buZAs8L6G8p3dbloYq50UTd0Ex55eFrYgxEUtN1S4g5d597cgPgxLdD/v
pg8nszz4g8/yYN0Knd+yAOlGLV6mUCgeFbmvjhKWkhIAEzi/y1Ar/pnNG5TpNFXV
j5JgM8qbVFGVA3001hVWOfFV/gj0gPBZgW8+E57Pej+bWaUtoo0sCBYeywM+2UiC
sadHZ8e5YBA=
=ptWC
-END PGP SIGNATURE-
---

This annotated tag includes the following new commits:

 new 8a83e16  Preparing for release 0.4.1-alpha.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2223. Support ReadWrite lock in LockManager. (#1564)

2019-10-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9700e20  HDDS-2223. Support ReadWrite lock in LockManager. (#1564)
9700e20 is described below

commit 9700e2003aa1b7e2c4072a2a08d8827acc5aa779
Author: Nanda kumar 
AuthorDate: Fri Oct 4 08:32:43 2019 +0530

HDDS-2223. Support ReadWrite lock in LockManager. (#1564)
---
 .../org/apache/hadoop/ozone/lock/ActiveLock.java   |  63 ++--
 .../org/apache/hadoop/ozone/lock/LockManager.java  | 166 ++---
 .../apache/hadoop/ozone/lock/TestLockManager.java  | 145 +++---
 3 files changed, 323 insertions(+), 51 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
index c302084..49efad0 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
@@ -18,22 +18,22 @@
 package org.apache.hadoop.ozone.lock;
 
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Lock implementation which also maintains counter.
  */
 public final class ActiveLock {
 
-  private Lock lock;
+  private ReadWriteLock lock;
   private AtomicInteger count;
 
   /**
* Use ActiveLock#newInstance to create instance.
*/
   private ActiveLock() {
-this.lock = new ReentrantLock();
+this.lock = new ReentrantReadWriteLock();
 this.count = new AtomicInteger(0);
   }
 
@@ -47,21 +47,58 @@ public final class ActiveLock {
   }
 
   /**
-   * Acquires the lock.
+   * Acquires read lock.
*
-   * If the lock is not available then the current thread becomes
-   * disabled for thread scheduling purposes and lies dormant until the
-   * lock has been acquired.
+   * Acquires the read lock if the write lock is not held by
+   * another thread and returns immediately.
+   *
+   * If the write lock is held by another thread then
+   * the current thread becomes disabled for thread scheduling
+   * purposes and lies dormant until the read lock has been acquired.
+   */
+  void readLock() {
+lock.readLock().lock();
+  }
+
+  /**
+   * Attempts to release the read lock.
+   *
+   * If the number of readers is now zero then the lock
+   * is made available for write lock attempts.
+   */
+  void readUnlock() {
+lock.readLock().unlock();
+  }
+
+  /**
+   * Acquires write lock.
+   *
+   * Acquires the write lock if neither the read nor write lock
+   * are held by another thread
+   * and returns immediately, setting the write lock hold count to
+   * one.
+   *
+   * If the current thread already holds the write lock then the
+   * hold count is incremented by one and the method returns
+   * immediately.
+   *
+   * If the lock is held by another thread then the current
+   * thread becomes disabled for thread scheduling purposes and
+   * lies dormant until the write lock has been acquired.
*/
-  public void lock() {
-lock.lock();
+  void writeLock() {
+lock.writeLock().lock();
   }
 
   /**
-   * Releases the lock.
+   * Attempts to release the write lock.
+   *
+   * If the current thread is the holder of this lock then
+   * the hold count is decremented. If the hold count is now
+   * zero then the lock is released.
*/
-  public void unlock() {
-lock.unlock();
+  void writeUnlock() {
+lock.writeLock().unlock();
   }
 
   /**
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
index 5f76bd6..670d4d1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
@@ -25,42 +25,156 @@ import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Consumer;
 
 /**
  * Manages the locks on a given resource. A new lock is created for each
  * and every unique resource. Uniqueness of resource depends on the
  * {@code equals} implementation of it.
  */
-public class LockManager {
+public class LockManager {
 
   private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
 
-  private final Map activeLocks = new ConcurrentHashMap<>();
+  private final Map activeLocks = new ConcurrentHashMap<>();
   private final GenericObjectPool lockPool =
   new GenericObjectPool<>(new PooledLockFactory());
 
   /**
-   * Creates new LockManager instance.

[hadoop] 01/02: Revert "HDDS-2101. Ozone filesystem provider doesn't exist (#1473)"

2019-10-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 5001c6be03f9e495103757c6ce695baa26aa783b
Author: Nanda kumar 
AuthorDate: Fri Oct 4 08:16:02 2019 +0530

Revert "HDDS-2101. Ozone filesystem provider doesn't exist (#1473)"

This reverts commit 2eb41fb90aec72f1ab69aea7260eb21ab0105cbd.
---
 .../src/main/compose/ozone-mr/hadoop27/docker-config |  1 +
 .../src/main/compose/ozone-mr/hadoop31/docker-config |  1 +
 .../src/main/compose/ozone-mr/hadoop32/docker-config |  1 +
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 5 files changed, 3 insertions(+), 32 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
index fccdace..9e9cc04 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
@@ -14,5 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
 
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
index d7ead21..f826c75 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
@@ -14,5 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
index d7ead21..f826c75 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
@@ -14,5 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git 
a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 0368002..000
--- 
a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git 
a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 39ca348..000
--- 
a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to 

[hadoop] 02/02: HDDS-2226. S3 Secrets should use a strong RNG. (#1572)

2019-10-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ddb395d5cbf9152a3ab518f2a37a22a4dccef18a
Author: Anu Engineer 
AuthorDate: Thu Oct 3 09:28:41 2019 -0700

HDDS-2226. S3 Secrets should use a strong RNG. (#1572)

(cherry picked from commit d59bcbfa0f30fc6fedb0a7e1896292a524ff71c7)
---
 .../common/src/main/java/org/apache/hadoop/ozone/OmUtils.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 25bfc29..5b47876 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -30,6 +30,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Optional;
@@ -39,7 +40,6 @@ import com.google.common.base.Strings;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
@@ -70,6 +70,8 @@ import org.slf4j.LoggerFactory;
  */
 public final class OmUtils {
   public static final Logger LOG = LoggerFactory.getLogger(OmUtils.class);
+  private static final SecureRandom SRAND = new SecureRandom();
+  private static byte[] randomBytes = new byte[32];
 
   private OmUtils() {
   }
@@ -246,9 +248,9 @@ public final class OmUtils {
 
   public static byte[] getSHADigest() throws IOException {
 try {
+  SRAND.nextBytes(randomBytes);
   MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-  return sha.digest(RandomStringUtils.random(32)
-  .getBytes(StandardCharsets.UTF_8));
+  return sha.digest(randomBytes);
 } catch (NoSuchAlgorithmException ex) {
   throw new IOException("Error creating an instance of SHA-256 digest.\n" +
   "This could possibly indicate a faulty JRE");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (a0d210d -> ddb395d)

2019-10-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from a0d210d  HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu 
Yao. (#1524)
 new 5001c6b  Revert "HDDS-2101. Ozone filesystem provider doesn't exist 
(#1473)"
 new ddb395d  HDDS-2226. S3 Secrets should use a strong RNG. (#1572)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/main/java/org/apache/hadoop/ozone/OmUtils.java   |  8 +---
 .../src/main/compose/ozone-mr/hadoop27/docker-config |  1 +
 .../src/main/compose/ozone-mr/hadoop31/docker-config |  1 +
 .../src/main/compose/ozone-mr/hadoop32/docker-config |  1 +
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 6 files changed, 8 insertions(+), 35 deletions(-)
 delete mode 100644 
hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 delete mode 100644 
hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (1dde3ef -> cdaa480)

2019-10-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 1dde3ef  HADOOP-16624. Upgrade hugo to the latest version in Dockerfile
 add cdaa480  HDDS-2198. SCM should not consider containers in CLOSING 
state to come out of safemode. (#1540)

No new revisions were added by this update.

Summary of changes:
 .../hdds/scm/safemode/ContainerSafeModeRule.java   | 26 +++---
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |  6 +++--
 2 files changed, 17 insertions(+), 15 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/04: HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

2019-09-20 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 64d92deee02488a2602fd05e26f84f026a0352ee
Author: Márton Elek 
AuthorDate: Sat Sep 14 06:18:33 2019 +0200

HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

Signed-off-by: Anu Engineer 
(cherry picked from commit f6d884cd118fdb6987eb3c369fc9a4c9317acf68)
---
 .../apache/hadoop/hdds/server/ProfileServlet.java  | 60 -
 .../hadoop/hdds/server/TestProfileServlet.java | 63 ++
 2 files changed, 109 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
index e09e9b5..42944e1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
@@ -32,7 +32,9 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import org.apache.commons.io.IOUtils;
 import org.slf4j.Logger;
@@ -111,6 +113,10 @@ public class ProfileServlet extends HttpServlet {
   private static final AtomicInteger ID_GEN = new AtomicInteger(0);
   static final Path OUTPUT_DIR =
   Paths.get(System.getProperty("java.io.tmpdir"), "prof-output");
+  public static final String FILE_PREFIX = "async-prof-pid-";
+
+  public static final Pattern FILE_NAME_PATTERN =
+  Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
 
   private Lock profilerLock = new ReentrantLock();
   private Integer pid;
@@ -165,6 +171,26 @@ public class ProfileServlet extends HttpServlet {
 }
   }
 
+  @VisibleForTesting
+  protected static String generateFileName(Integer pid, Output output,
+  Event event) {
+return FILE_PREFIX + pid + "-" +
+event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
++ "." +
+output.name().toLowerCase();
+  }
+
+  @VisibleForTesting
+  protected static String validateFileName(String filename) {
+if (!FILE_NAME_PATTERN.matcher(filename).matches()) {
+  throw new IllegalArgumentException(
+  "Invalid file name parameter " + filename + " doesn't match pattern "
+  + FILE_NAME_PATTERN);
+
+}
+return filename;
+  }
+
   @Override
   protected void doGet(final HttpServletRequest req,
   final HttpServletResponse resp) throws IOException {
@@ -195,7 +221,8 @@ public class ProfileServlet extends HttpServlet {
   return;
 }
 
-final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+final int duration =
+getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
 final Output output = getOutput(req);
 final Event event = getEvent(req);
 final Long interval = getLong(req, "interval");
@@ -213,11 +240,11 @@ public class ProfileServlet extends HttpServlet {
 int lockTimeoutSecs = 3;
 if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
   try {
+//Should be in sync with FILE_NAME_PATTERN
 File outputFile =
-OUTPUT_DIR.resolve("async-prof-pid-" + pid + "-" +
-event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
-+ "." +
-output.name().toLowerCase()).toFile();
+OUTPUT_DIR.resolve(
+ProfileServlet.generateFileName(pid, output, event))
+.toFile();
 List cmd = new ArrayList<>();
 cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
 cmd.add("-e");
@@ -270,7 +297,8 @@ public class ProfileServlet extends HttpServlet {
 String relativeUrl = "/prof?file=" + outputFile.getName();
 resp.getWriter().write(
 "Started [" + event.getInternalName()
-+ "] profiling. This page will automatically redirect to " 
+
++ "] profiling. This page will automatically redirect to "
++
 relativeUrl + " after " + duration
 + " seconds.\n\ncommand:\n" + Joiner.on(" ").join(cmd));
 resp.getWriter().write(
@@ -320,9 +348,12 @@ public class ProfileServlet extends HttpServlet {
   final HttpServletResponse resp)

[hadoop] 03/04: HDDS-2156. Fix alignment issues in HDDS doc pages

2019-09-20 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3dafc786bb5dd9f2f19a3d5c2c44e992bd1ab2f6
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:41:00 2019 -0700

HDDS-2156. Fix alignment issues in HDDS doc pages

Signed-off-by: Anu Engineer 
(cherry picked from commit 9be448b3368088967064305e78ec17ffaaeaedb2)
---
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  3 +-
 hadoop-hdds/docs/content/security/_index.md|  2 +-
 .../themes/ozonedoc/layouts/_default/section.html  | 69 +-
 .../themes/ozonedoc/layouts/_default/single.html   |  2 +
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |  3 +
 5 files changed, 48 insertions(+), 31 deletions(-)

diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md 
b/hadoop-hdds/docs/content/security/SecurityAcls.md
index b010233..dd9f893 100644
--- a/hadoop-hdds/docs/content/security/SecurityAcls.md
+++ b/hadoop-hdds/docs/content/security/SecurityAcls.md
@@ -2,7 +2,8 @@
 title: "Ozone ACLs"
 date: "2019-April-03"
 weight: 6
-summary: Native ACL support provides ACL functionality without Ranger 
integration.
+summary: Native Ozone Authorizer provides Access Control List (ACL) support 
for Ozone without Ranger integration.
+icon: transfer
 ---
 
 
 {{}}
-  Ozone is an enterprise class, secure storage system. There many
+  Ozone is an enterprise class, secure storage system. There are many
   optional security features in Ozone. Following pages discuss how
   you can leverage the security features of Ozone.
 {{}}
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
index 4150d07..5c01241 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
@@ -18,42 +18,53 @@
 
 
 
-  {{ partial "navbar.html" . }}
+{{ partial "navbar.html" . }}
 
-  
+
 
-  {{ partial "sidebar.html" . }}
-  
-{{ .Title }}
-
-  {{ .Content }}
-{{.Params.card}}
-  {{ if not (eq .Params.cards "false")}}
-  {{ range .Pages }}
-
-  
-
-  
-
-  {{ with .Params.Icon}}
-
-{{end}}
-  {{ .LinkTitle }}
-
-{{.Summary}}
-{{.LinkTitle}}
-  
+{{ partial "sidebar.html" . }}
+
+
+{{ .Title }}
 
-  
+
+{{ .Content }}
+{{.Params.card}}
+{{ if not (eq .Params.cards "false")}}
+{{ range $page_index, $page_val := .Pages }}
+
+{{ $page_count := len .Pages }}
+{{if (eq (mod $page_index 2) 0)}}
+
+{{end}}
+
+
+
+
+{{ with .Params.Icon}}
+
+{{end}}
+{{ .LinkTitle }}
+
+{{.Summary}}
+{{.LinkTitle}}
+
+
+
 
-  {{ end }}
-  {{end}}
+{{if (or (eq (mod $page_index 2) 1) (eq $page_index (sub 
$page_count 1)))}}
+
+{{end}}
+{{ end }}
+{{end}}
+
 
-  
 
-  
+
 
-  {{ partial "footer.html" . }}
+{{ partial "footer.html" . }}
 
 
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
index 31125ba..3679ddb 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
@@ -36,7 +36,9 @@
 
   
 
+  
 {{.Title}}
+  
 
   {{ .Content }}
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css 
b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
index e004da0..6f812c8 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
+++ b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
@@ -160,6 +160,9 @@ h4 {
   padding: 30px;
 }
 
+h1 {
+  margin-bottom: 20px;
+}
 
 .card {
   padding: 20px;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (0fb42e5 -> 2eb41fb)

2019-09-20 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 0fb42e5  HDDS-2132. TestKeyValueContainer is failing (#1457).
 new 64d92de  HDDS-2110. Arbitrary file can be downloaded with the help of 
ProfilerServlet
 new 21402e2  HDDS-2127. Detailed Tools doc not reachable
 new 3dafc78  HDDS-2156. Fix alignment issues in HDDS doc pages
 new 2eb41fb  HDDS-2101. Ozone filesystem provider doesn't exist (#1473)

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 hadoop-hdds/docs/content/recipe/_index.md  |  2 +-
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  3 +-
 hadoop-hdds/docs/content/security/_index.md|  2 +-
 hadoop-hdds/docs/content/tools/AuditParser.md  |  2 +-
 hadoop-hdds/docs/content/tools/Freon.md| 62 ---
 hadoop-hdds/docs/content/tools/Genconf.md  |  2 +-
 hadoop-hdds/docs/content/tools/SCMCLI.md   |  2 +-
 hadoop-hdds/docs/content/tools/TestTools.md|  2 +-
 hadoop-hdds/docs/content/tools/Tools.md| 19 --
 .../content/{beyond/Tools.md => tools/_index.md}   | 40 +
 .../themes/ozonedoc/layouts/_default/section.html  | 69 +-
 .../themes/ozonedoc/layouts/_default/single.html   |  2 +
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |  3 +
 .../apache/hadoop/hdds/server/ProfileServlet.java  | 60 ++-
 .../hadoop/hdds/server/TestProfileServlet.java | 63 
 .../main/compose/ozone-mr/hadoop27/docker-config   |  1 -
 .../main/compose/ozone-mr/hadoop31/docker-config   |  1 -
 .../main/compose/ozone-mr/hadoop32/docker-config   |  1 -
 .../services/org.apache.hadoop.fs.FileSystem   |  0
 .../services/org.apache.hadoop.fs.FileSystem   |  2 +-
 20 files changed, 191 insertions(+), 147 deletions(-)
 delete mode 100644 hadoop-hdds/docs/content/tools/Freon.md
 delete mode 100644 hadoop-hdds/docs/content/tools/Tools.md
 rename hadoop-hdds/docs/content/{beyond/Tools.md => tools/_index.md} (85%)
 create mode 100644 
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java
 copy hadoop-ozone/{ozonefs/src/test => 
ozonefs-lib-current/src/main}/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 (100%)
 copy hadoop-ozone/{ozonefs/src/test => 
ozonefs-lib-legacy/src/main}/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 (94%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/04: HDDS-2127. Detailed Tools doc not reachable

2019-09-20 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 21402e2b30a3039f10c3deb2430ee5d24f31f1d5
Author: Márton Elek 
AuthorDate: Thu Sep 19 14:42:33 2019 +0200

HDDS-2127. Detailed Tools doc not reachable

Signed-off-by: Anu Engineer 
(cherry picked from commit f260b5aa5b26d85504e95f877b53300fb0cd70af)
---
 hadoop-hdds/docs/content/recipe/_index.md  |  2 +-
 hadoop-hdds/docs/content/tools/AuditParser.md  |  2 +-
 hadoop-hdds/docs/content/tools/Freon.md| 62 --
 hadoop-hdds/docs/content/tools/Genconf.md  |  2 +-
 hadoop-hdds/docs/content/tools/SCMCLI.md   |  2 +-
 hadoop-hdds/docs/content/tools/TestTools.md|  2 +-
 hadoop-hdds/docs/content/tools/Tools.md| 19 ---
 .../content/{beyond/Tools.md => tools/_index.md}   | 40 +-
 8 files changed, 33 insertions(+), 98 deletions(-)

diff --git a/hadoop-hdds/docs/content/recipe/_index.md 
b/hadoop-hdds/docs/content/recipe/_index.md
index beaab69..47053ab 100644
--- a/hadoop-hdds/docs/content/recipe/_index.md
+++ b/hadoop-hdds/docs/content/recipe/_index.md
@@ -2,7 +2,7 @@
 title: Recipes
 date: "2017-10-10"
 menu: main
-weight: 8
+weight: 9
 
 ---
 
-
-Overview
-
-
-Freon is a load-generator for Ozone. This tool is used for testing the 
functionality of ozone.
-
-### Random keys
-
-In randomkeys mode, the data written into ozone cluster is randomly generated.
-Each key will be of size 10 KB.
-
-The number of volumes/buckets/keys can be configured. The replication type and
-factor (eg. replicate with ratis to 3 nodes) Also can be configured.
-
-For more information use
-
-`bin/ozone freon --help`
-
-### Example
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
-{{< /highlight >}}
-
-{{< highlight bash >}}
-***
-Status: Success
-Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3
-Number of Volumes created: 10
-Number of Buckets created: 100
-Number of Keys added: 1000
-Ratis replication factor: THREE
-Ratis replication type: RATIS
-Average Time spent in volume creation: 00:00:00,035
-Average Time spent in bucket creation: 00:00:00,319
-Average Time spent in key creation: 00:00:03,659
-Average Time spent in key write: 00:00:10,894
-Total bytes written: 1024
-Total Execution time: 00:00:16,898
-***
-{{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/tools/Genconf.md 
b/hadoop-hdds/docs/content/tools/Genconf.md
index 146dfdc..35d5e3d 100644
--- a/hadoop-hdds/docs/content/tools/Genconf.md
+++ b/hadoop-hdds/docs/content/tools/Genconf.md
@@ -1,7 +1,7 @@
 ---
 title: "Generate Configurations"
 date: 2018-12-18
-
+summary: Tool to generate default configuration
 ---
 
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/beyond/Tools.md 
b/hadoop-hdds/docs/content/tools/_index.md
similarity index 85%
rename from hadoop-hdds/docs/content/beyond/Tools.md
rename to hadoop-hdds/docs/content/tools/_index.md
index 7316089..d7c9270 100644
--- a/hadoop-hdds/docs/content/beyond/Tools.md
+++ b/hadoop-hdds/docs/content/tools/_index.md
@@ -2,8 +2,11 @@
 title: "Tools"
 date: "2017-10-10"
 summary: Ozone supports a set of tools that are handy for developers.Here is a 
quick list of command line tools.
-weight: 3
+menu:
+   main:
+  weight: 8
 ---
+
 

[hadoop] 04/04: HDDS-2101. Ozone filesystem provider doesn't exist (#1473)

2019-09-20 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2eb41fb90aec72f1ab69aea7260eb21ab0105cbd
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:28:29 2019 -0700

HDDS-2101. Ozone filesystem provider doesn't exist (#1473)

(cherry picked from commit b7ae8a96cde5d78c7c73653e09b6e4b130b4d74b)
---
 .../src/main/compose/ozone-mr/hadoop27/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop31/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop32/docker-config |  1 -
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 5 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
index 9e9cc04..fccdace 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
 
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git 
a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..0368002
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git 
a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..39ca348
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to

[hadoop] 01/03: HDDS-2111. XSS fragments can be injected to the S3g landing page

2019-09-17 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6c0f86149c9765a581c09bbe9345d224a597eee6
Author: Márton Elek 
AuthorDate: Sat Sep 14 05:33:05 2019 +0200

HDDS-2111. XSS fragments can be injected to the S3g landing page

Signed-off-by: Anu Engineer 
(cherry picked from commit 2358e53e9c9f8489b24648b1017eb856d4bd42b0)
---
 .../src/main/resources/webapps/static/index.html   |  8 ++--
 .../src/main/resources/webapps/static/s3g.js   | 23 ++
 2 files changed, 29 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html 
b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
index 68939ef..b20bf35 100644
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
+++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
@@ -21,6 +21,7 @@
 
 
 
+
 
 
 
@@ -68,12 +69,15 @@
 
 For example with aws-cli:
 
-aws s3api --endpoint 
document.write(window.location.href.replace("static/", "")) 
create-bucket --bucket=wordcount
+aws s3api --endpoint  create-bucket 
--bucket=wordcount
 
 For more information, please check the documentation.
 
 
 
-
+
+
+
+
 
 
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js 
b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
new file mode 100644
index 000..8b1e977
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+window.onload = function () {
+var safeurl = window.location.protocol + "//" + window.location.host + 
window.location.pathname;
+safeurl = safeurl.replace("static/", "");
+document.getElementById('s3gurl').innerHTML = safeurl;
+};


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/03: HDDS-2114. Rename does not preserve non-explicitly created interim directories. Contributed by Lokesh Jain & Istvan Fajth.

2019-09-17 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 7f823d5e3c32817f06b3dd4e1b5ab8b8275dc030
Author: Mukul Kumar Singh 
AuthorDate: Tue Sep 17 10:47:00 2019 +0530

HDDS-2114. Rename does not preserve non-explicitly created interim 
directories. Contributed by Lokesh Jain & Istvan Fajth.

(cherry picked from commit 292bce7908bf4830c793a3f4e80376819c038379)
---
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 24 -
 .../hadoop/fs/ozone/TestOzoneFileSystem.java   | 25 ++
 2 files changed, 44 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 27bc925..5299e6a 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -374,7 +374,11 @@ public class BasicOzoneFileSystem extends FileSystem {
   }
 }
 RenameIterator iterator = new RenameIterator(src, dst);
-return iterator.iterate();
+boolean result = iterator.iterate();
+if (result) {
+  createFakeParentDirectory(src);
+}
+return result;
   }
 
   private class DeleteIterator extends OzoneListingIterator {
@@ -459,10 +463,7 @@ public class BasicOzoneFileSystem extends FileSystem {
 if (result) {
   // If this delete operation removes all files/directories from the
   // parent direcotry, then an empty parent directory must be created.
-  Path parent = f.getParent();
-  if (parent != null && !parent.isRoot()) {
-createFakeDirectoryIfNecessary(parent);
-  }
+  createFakeParentDirectory(f);
 }
 
 return result;
@@ -475,6 +476,19 @@ public class BasicOzoneFileSystem extends FileSystem {
* @param f path to the fake parent directory
* @throws IOException
*/
+  private void createFakeParentDirectory(Path f) throws IOException {
+Path parent = f.getParent();
+if (parent != null && !parent.isRoot()) {
+  createFakeDirectoryIfNecessary(parent);
+}
+  }
+
+  /**
+   * Create a fake directory key if it does not already exist.
+   *
+   * @param f path to the fake directory
+   * @throws IOException
+   */
   private void createFakeDirectoryIfNecessary(Path f) throws IOException {
 String key = pathToKey(f);
 if (!key.isEmpty() && !o3Exists(f)) {
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index ac8f11f..434e8f0 100644
--- 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -51,6 +51,7 @@ import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -285,6 +286,30 @@ public class TestOzoneFileSystem {
 fileStatus2.equals(dir12.toString()));
   }
 
+  @Test
+  public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved()
+  throws Exception {
+Path source = new Path("/source");
+Path interimPath = new Path(source, "interimPath");
+Path leafInsideInterimPath = new Path(interimPath, "leaf");
+Path target = new Path("/target");
+Path leafInTarget = new Path(target, "leaf");
+
+fs.mkdirs(source);
+fs.mkdirs(target);
+fs.mkdirs(leafInsideInterimPath);
+assertTrue(fs.rename(leafInsideInterimPath, leafInTarget));
+
+// after rename listStatus for interimPath should succeed and
+// interimPath should have no children
+FileStatus[] statuses = fs.listStatus(interimPath);
+assertNotNull("liststatus returns a null array", statuses);
+assertEquals("Statuses array is not empty", 0, statuses.length);
+FileStatus fileStatus = fs.getFileStatus(interimPath);
+assertEquals("FileStatus does not point to interimPath",
+interimPath.getName(), fileStatus.getPath().getName());
+  }
+
   private KeyInfo getKey(Path keyPath, boolean isDirectory)
   throws IOException, OzoneException {
 String key = o3fs.pathToKey(keyPath);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 03/03: HDDS-2132. TestKeyValueContainer is failing (#1457).

2019-09-17 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0fb42e514be56a43d732f1f7efc67171dd0224e1
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Tue Sep 17 11:01:33 2019 +0200

HDDS-2132. TestKeyValueContainer is failing (#1457).

(cherry picked from commit e54977f888e1a855e9f88b9fa41e0c8794bd0881)
---
 .../ozone/container/keyvalue/helpers/KeyValueContainerUtil.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index c0e7d78..2c02ad5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -198,8 +198,9 @@ public final class KeyValueContainerUtil {
   kvContainerData.setKeyCount(liveKeys.size());
   byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
   OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
-  Preconditions.checkNotNull(bcsId);
-  kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
+  if (bcsId != null) {
+
kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (dafb1c0 -> 0fb42e5)

2019-09-17 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from dafb1c0  HDDS-2124. Random next links
 new 6c0f861  HDDS-2111. XSS fragments can be injected to the S3g landing 
page
 new 7f823d5  HDDS-2114. Rename does not preserve non-explicitly created 
interim directories. Contributed by Lokesh Jain & Istvan Fajth.
 new 0fb42e5  HDDS-2132. TestKeyValueContainer is failing (#1457).

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../keyvalue/helpers/KeyValueContainerUtil.java|  5 +++--
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 24 -
 .../hadoop/fs/ozone/TestOzoneFileSystem.java   | 25 ++
 .../src/main/resources/webapps/static/index.html   |  8 +--
 .../src/main/resources/webapps/static/s3g.js}  | 11 --
 5 files changed, 57 insertions(+), 16 deletions(-)
 copy 
hadoop-ozone/{ozone-recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx
 => s3gateway/src/main/resources/webapps/static/s3g.js} (77%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2126. Ozone 0.4.1 branch build issue. (#1438)

2019-09-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 9580d40  HDDS-2126. Ozone 0.4.1 branch build issue. (#1438)
9580d40 is described below

commit 9580d40e068eb73124fa1f44d8ca46e9e6bc5977
Author: Nanda kumar 
AuthorDate: Sat Sep 14 11:48:54 2019 +0530

HDDS-2126. Ozone 0.4.1 branch build issue. (#1438)
---
 .../org/apache/hadoop/ozone/container/common/impl/ContainerData.java | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index ec70dbd..e6858cd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -544,4 +544,9 @@ public abstract class ContainerData {
* @return Protocol Buffer Message
*/
   public abstract ContainerProtos.ContainerDataProto getProtoBufMessage();
+
+  /**
+   * Returns the blockCommitSequenceId.
+   */
+  public abstract long getBlockCommitSequenceId();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2122. Broken logo image on category sub-pages (#1437)

2019-09-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new fd10837  HDDS-2122. Broken logo image on category sub-pages (#1437)
fd10837 is described below

commit fd10837a04dc7e1221fff1addc4c1695718b86e8
Author: Doroszlai, Attila 
AuthorDate: Fri Sep 13 16:11:00 2019 +0530

HDDS-2122. Broken logo image on category sub-pages (#1437)

Signed-off-by: Nanda kumar 
(cherry picked from commit 4a9a6a21b8ebe6c762b1050a802cb7dd80f004da)
---
 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
index 598ede6..0f26571 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
@@ -24,7 +24,7 @@
 
   
   
-
+
   
   
 Apache Hadoop Ozone/HDDS documentation


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2122. Broken logo image on category sub-pages (#1437)

2019-09-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4a9a6a2  HDDS-2122. Broken logo image on category sub-pages (#1437)
4a9a6a2 is described below

commit 4a9a6a21b8ebe6c762b1050a802cb7dd80f004da
Author: Doroszlai, Attila 
AuthorDate: Fri Sep 13 16:11:00 2019 +0530

HDDS-2122. Broken logo image on category sub-pages (#1437)

Signed-off-by: Nanda kumar 
---
 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
index 598ede6..0f26571 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html
@@ -24,7 +24,7 @@
 
   
   
-
+
   
   
 Apache Hadoop Ozone/HDDS documentation


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2076. Read fails because the block cannot be located in the container (#1410)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new aaee64c  HDDS-2076. Read fails because the block cannot be located in 
the container (#1410)
aaee64c is described below

commit aaee64cfbcdea5e7c6c0e26f405278f5695f0f6b
Author: Shashikant Banerjee 
AuthorDate: Thu Sep 12 21:16:39 2019 +0530

HDDS-2076. Read fails because the block cannot be located in the container 
(#1410)

Signed-off-by: Nanda kumar 
(cherry picked from commit fe8cdf0ab846df9c2f3f59d1d4875185633a27ea)
---
 .../keyvalue/helpers/KeyValueContainerUtil.java|   7 +
 .../rpc/TestContainerReplicationEndToEnd.java  | 215 +
 2 files changed, 222 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index d5487b3..c0e7d78 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -24,12 +24,15 @@ import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -193,6 +196,10 @@ public final class KeyValueContainerUtil {
   }).sum();
   kvContainerData.setBytesUsed(bytesUsed);
   kvContainerData.setKeyCount(liveKeys.size());
+  byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
+  OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
+  Preconditions.checkNotNull(bcsId);
+  kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
 }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
new file mode 100644
index 000..e5a3d2f
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneCl

[hadoop] branch trunk updated: HDDS-2076. Read fails because the block cannot be located in the container (#1410)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fe8cdf0  HDDS-2076. Read fails because the block cannot be located in 
the container (#1410)
fe8cdf0 is described below

commit fe8cdf0ab846df9c2f3f59d1d4875185633a27ea
Author: Shashikant Banerjee 
AuthorDate: Thu Sep 12 21:16:39 2019 +0530

HDDS-2076. Read fails because the block cannot be located in the container 
(#1410)

Signed-off-by: Nanda kumar 
---
 .../keyvalue/helpers/KeyValueContainerUtil.java|   7 +
 .../rpc/TestContainerReplicationEndToEnd.java  | 215 +
 2 files changed, 222 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index d5487b3..c0e7d78 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -24,12 +24,15 @@ import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -193,6 +196,10 @@ public final class KeyValueContainerUtil {
   }).sum();
   kvContainerData.setBytesUsed(bytesUsed);
   kvContainerData.setKeyCount(liveKeys.size());
+  byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
+  OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
+  Preconditions.checkNotNull(bcsId);
+  kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
 }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
new file mode 100644
index 000..e5a3d2f
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.KeyO

[hadoop] branch ozone-0.4.1 updated: HDDS-2075. Tracing in OzoneManager call is propagated with wrong parent (#1415)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new d8c98c5  HDDS-2075. Tracing in OzoneManager call is propagated with 
wrong parent (#1415)
d8c98c5 is described below

commit d8c98c5fc138734d51794bda5cf83984628095cd
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Sep 11 20:59:01 2019 +0200

HDDS-2075. Tracing in OzoneManager call is propagated with wrong parent 
(#1415)

(cherry picked from commit 64ed6b177d6b00b22d45576a8517432dc6c03348)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java| 7 +--
 .../om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java  | 5 ++---
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 0bbcd03..8e3deb1 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -150,8 +150,11 @@ public class RpcClient implements ClientProtocol {
 this.userRights = aclConfig.getUserDefaultRights();
 this.groupRights = aclConfig.getGroupDefaultRights();
 
-this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
-this.conf, clientId.toString(), ugi);
+this.ozoneManagerClient = TracingUtil.createProxy(
+new OzoneManagerProtocolClientSideTranslatorPB(
+this.conf, clientId.toString(), ugi),
+OzoneManagerProtocol.class, conf
+);
 long scmVersion =
 RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
 InetSocketAddress scmAddress = getScmAddressForClient();
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 094e689..44db898 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -202,10 +202,9 @@ public final class 
OzoneManagerProtocolClientSideTranslatorPB
 OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
 OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
 
-this.rpcProxy = TracingUtil.createProxy(
+this.rpcProxy =
 createRetryProxy(omFailoverProxyProvider, maxRetries, maxFailovers,
-sleepBase, sleepMax),
-OzoneManagerProtocolPB.class, conf);
+sleepBase, sleepMax);
 this.clientID = clientId;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2048: State check during container state transition in datanode should be lock protected (#1375)

2019-09-10 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c3beeb7  HDDS-2048: State check during container state transition in 
datanode should be lock protected (#1375)
c3beeb7 is described below

commit c3beeb7761a08f57ad1d45a2d31b4f8a35ff67d9
Author: Lokesh Jain 
AuthorDate: Tue Sep 10 14:14:52 2019 +0530

HDDS-2048: State check during container state transition in datanode should 
be lock protected (#1375)
---
 .../container/keyvalue/KeyValueContainer.java  |   6 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  | 120 -
 .../container/keyvalue/impl/BlockManagerImpl.java  |  32 +++---
 3 files changed, 94 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index ff57037..a6e914b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -82,7 +82,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Class to perform KeyValue Container operations.
+ * Class to perform KeyValue Container operations. Any modifications to
+ * KeyValueContainer object should ideally be done via api exposed in
+ * KeyValueHandler class.
  */
 public class KeyValueContainer implements Container {
 
@@ -554,6 +556,8 @@ public class KeyValueContainer implements 
Container {
* Acquire write lock.
*/
   public void writeLock() {
+// TODO: The lock for KeyValueContainer object should not be exposed
+// publicly.
 this.lock.writeLock().lock();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index ab1d124..f39973f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -881,75 +881,97 @@ public class KeyValueHandler extends Handler {
   @Override
   public void markContainerForClose(Container container)
   throws IOException {
-// Move the container to CLOSING state only if it's OPEN
-if (container.getContainerState() == State.OPEN) {
-  container.markContainerForClose();
-  sendICR(container);
+container.writeLock();
+try {
+  // Move the container to CLOSING state only if it's OPEN
+  if (container.getContainerState() == State.OPEN) {
+container.markContainerForClose();
+sendICR(container);
+  }
+} finally {
+  container.writeUnlock();
 }
   }
 
   @Override
   public void markContainerUnhealthy(Container container)
   throws IOException {
-if (container.getContainerState() != State.UNHEALTHY) {
-  try {
-container.markContainerUnhealthy();
-  } catch (IOException ex) {
-// explicitly catch IOException here since the this operation
-// will fail if the Rocksdb metadata is corrupted.
-long id = container.getContainerData().getContainerID();
-LOG.warn("Unexpected error while marking container "
-+id+ " as unhealthy", ex);
-  } finally {
-sendICR(container);
+container.writeLock();
+try {
+  if (container.getContainerState() != State.UNHEALTHY) {
+try {
+  container.markContainerUnhealthy();
+} catch (IOException ex) {
+  // explicitly catch IOException here since the this operation
+  // will fail if the Rocksdb metadata is corrupted.
+  long id = container.getContainerData().getContainerID();
+  LOG.warn("Unexpected error while marking container " + id
+  + " as unhealthy", ex);
+} finally {
+  sendICR(container);
+}
   }
+} finally {
+  container.writeUnlock();
 }
   }
 
   @Override
   public void quasiCloseContainer(Container container)
   throws IOException {
-final State state = container.getContainerState();
-// Quasi close call is idempotent.
-if (state == State.QUASI_CLOSED) {
-  return;
-}
-// The container has to be in CLOSING state.
-if (state != State.CLOSING) {
-  ContainerProtos.Result error = state == State.INVALID ?
-  INVALID_CONTAINER_STATE : CONTAINER_INTERNAL_ERROR;
-  throw new StorageContainerException("Cannot quasi close contain

[hadoop] branch trunk updated: HDDS-1561: Mark OPEN containers as QUASI_CLOSED as part of Ratis groupRemove (#1401)

2019-09-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6e4cdf8  HDDS-1561: Mark OPEN containers as QUASI_CLOSED as part of 
Ratis groupRemove (#1401)
6e4cdf8 is described below

commit 6e4cdf89effb11c5ec36578da83a46d3d3c48c11
Author: Lokesh Jain 
AuthorDate: Fri Sep 6 13:15:49 2019 +0530

HDDS-1561: Mark OPEN containers as QUASI_CLOSED as part of Ratis 
groupRemove (#1401)
---
 .../ozone/container/common/impl/ContainerData.java |  8 ++
 .../CloseContainerCommandHandler.java  | 55 +++---
 .../server/ratis/ContainerStateMachine.java| 26 ++-
 .../transport/server/ratis/XceiverServerRatis.java | 48 +++-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  3 +-
 .../TestCloseContainerCommandHandler.java  | 10 ++-
 hadoop-hdds/pom.xml|  2 +-
 .../TestCloseContainerByPipeline.java  | 86 +++---
 .../transport/server/ratis/TestCSMMetrics.java |  4 +
 .../container/server/TestContainerServer.java  |  5 +-
 .../server/TestSecureContainerServer.java  |  6 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java|  2 +-
 hadoop-ozone/pom.xml   |  2 +-
 13 files changed, 188 insertions(+), 69 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index e6858cd..85738e2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -285,6 +285,14 @@ public abstract class ContainerData {
   }
 
   /**
+   * checks if the container is unhealthy.
+   * @return - boolean
+   */
+  public synchronized boolean isUnhealthy() {
+return ContainerDataProto.State.UNHEALTHY == state;
+  }
+
+  /**
* Marks this container as quasi closed.
*/
   public synchronized void quasiCloseContainer() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index d4c3ff7..881fea0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -86,37 +86,38 @@ public class CloseContainerCommandHandler implements 
CommandHandler {
 return;
   }
 
-  if (container.getContainerState() ==
-  ContainerProtos.ContainerDataProto.State.CLOSED) {
-// Closing a container is an idempotent operation.
-return;
-  }
-
-  // Move the container to CLOSING state
+  // move the container to CLOSING if in OPEN state
   controller.markContainerForClose(containerId);
 
-  // If the container is part of open pipeline, close it via write channel
-  if (ozoneContainer.getWriteChannel()
-  .isExist(closeCommand.getPipelineID())) {
+  switch (container.getContainerState()) {
+  case OPEN:
+  case CLOSING:
+// If the container is part of open pipeline, close it via write 
channel
+if (ozoneContainer.getWriteChannel()
+.isExist(closeCommand.getPipelineID())) {
+  ContainerCommandRequestProto request =
+  getContainerCommandRequestProto(datanodeDetails,
+  closeCommand.getContainerID());
+  ozoneContainer.getWriteChannel()
+  .submitRequest(request, closeCommand.getPipelineID());
+} else {
+  // Container should not exist in CLOSING state without a pipeline
+  controller.markContainerUnhealthy(containerId);
+}
+break;
+  case QUASI_CLOSED:
 if (closeCommand.getForce()) {
-  LOG.warn("Cannot force close a container when the container is" +
-  " part of an active pipeline.");
-  return;
+  controller.closeContainer(containerId);
+  break;
 }
-ContainerCommandRequestProto request =
-getContainerCommandRequestProto(datanodeDetails,
-closeCommand.getContainerID());
-ozoneContainer.getWriteChannel().submitRequest(
-request, closeCommand.getPipelineID());
-return;
-  }
-  // If we reach here, there i

[hadoop] branch trunk updated: HDDS-1898. GrpcReplicationService#download cannot replicate the container. (#1326)

2019-09-05 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2b16d53  HDDS-1898. GrpcReplicationService#download cannot replicate 
the container. (#1326)
2b16d53 is described below

commit 2b16d5377c39518ed0140fd27f5b000d464c2f43
Author: Nanda kumar 
AuthorDate: Thu Sep 5 18:12:36 2019 +0530

HDDS-1898. GrpcReplicationService#download cannot replicate the container. 
(#1326)
---
 .../ozone/container/common/interfaces/Handler.java | 16 ++---
 .../commandhandler/DeleteBlocksCommandHandler.java |  7 +-
 .../container/keyvalue/KeyValueContainer.java  |  9 +++
 .../ozone/container/keyvalue/KeyValueHandler.java  | 28 +-
 .../background/BlockDeletingService.java   |  6 +
 .../container/ozoneimpl/ContainerController.java   | 12 --
 .../OnDemandContainerReplicationSource.java| 18 --
 .../ozone/container/TestContainerReplication.java  |  8 +++
 8 files changed, 75 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 97601ec..8c3b981 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -19,8 +19,9 @@
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 
-import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -109,14 +110,23 @@ public abstract class Handler {
   DispatcherContext dispatcherContext);
 
   /**
-   * Import container data from a raw input stream.
+   * Imports container from a raw input stream.
*/
   public abstract Container importContainer(
   long containerID,
   long maxSize,
   String originPipelineId,
   String originNodeId,
-  FileInputStream rawContainerStream,
+  InputStream rawContainerStream,
+  TarContainerPacker packer)
+  throws IOException;
+
+  /**
+   * Exports container to the output stream.
+   */
+  public abstract void exportContainer(
+  Container container,
+  OutputStream outputStream,
   TarContainerPacker packer)
   throws IOException;
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index a5d4760..a4849f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -127,7 +127,12 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
   case KeyValueContainer:
 KeyValueContainerData containerData = (KeyValueContainerData)
 cont.getContainerData();
-deleteKeyValueContainerBlocks(containerData, entry);
+cont.writeLock();
+try {
+  deleteKeyValueContainerBlocks(containerData, entry);
+} finally {
+  cont.writeUnlock();
+}
 txResultBuilder.setContainerID(containerId)
 .setSuccess(true);
 break;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 53065cc..b7f46c9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -330,6 +330,9 @@ public class KeyValueContainer implements 
Container {
 } finally {
   writeUnlock();
 }
+LOG.info("Container {} is closed with bcsId {}.",
+containerData.getContainerID(),
+containerData.getBlockCommitSequenceId());
   }
 
   /**
@@ -361,13 +364,10 @@ public class KeyValueContainer implements 
Container {
 }
   }
 
-  void compactDB() throws StorageContainerException {
+  private void compactDB() throws StorageContainerException {
 try {

[hadoop] branch ozone-0.4.1 updated (2d09c29c -> 58c626f)

2019-09-05 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 2d09c29c HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. 
(#1396)
 new 99e8e05  HDDS-2050. Error while compiling ozone-recon-web (#1374)
 new 58c626f  HDDS-2079. Fix TestSecureOzoneManager. Contributed by Xiaoyu 
Yao. (#1400)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/ozone/om/TestSecureOzoneManager.java|   54 +-
 .../webapps/recon/ozone-recon-web/package.json |2 +
 .../src/components/Breadcrumbs/Breadcrumbs.tsx |2 +-
 .../src/components/NavBar/NavBar.tsx   |2 +-
 .../webapps/recon/ozone-recon-web/yarn.lock| 4846 +---
 5 files changed, 3179 insertions(+), 1727 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDDS-2079. Fix TestSecureOzoneManager. Contributed by Xiaoyu Yao. (#1400)

2019-09-05 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 58c626f22674094eef931ebd36ce68c6cbbf6c3a
Author: Xiaoyu Yao 
AuthorDate: Wed Sep 4 14:03:38 2019 -0700

HDDS-2079. Fix TestSecureOzoneManager. Contributed by Xiaoyu Yao. (#1400)

(cherry picked from commit ae287474c023ce0ed3faa81ad30ffd215327b47f)
---
 .../hadoop/ozone/om/TestSecureOzoneManager.java| 54 ++
 1 file changed, 14 insertions(+), 40 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
index 728d170..48a9c6a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
@@ -36,7 +36,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-import java.net.ConnectException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.security.KeyPair;
@@ -122,61 +121,44 @@ public class TestSecureOzoneManager {
 omLogs.clearOutput();
 
 // Case 1: When keypair as well as certificate is missing. Initial keypair
-// boot-up. Get certificate will fail no SCM is not running.
-LambdaTestUtils.intercept(ConnectException.class, "Connection " +
-"refused; For more detail",
-() -> OzoneManager.initializeSecurity(conf, omStorage));
+// boot-up. Get certificate will fail when SCM is not running.
 SecurityConfig securityConfig = new SecurityConfig(conf);
-CertificateClient client =
-new OMCertificateClient(securityConfig);
+CertificateClient client = new OMCertificateClient(securityConfig,
+omStorage.getOmCertSerialId());
+Assert.assertEquals(CertificateClient.InitResponse.GETCERT, client.init());
 privateKey = client.getPrivateKey();
 publicKey = client.getPublicKey();
 Assert.assertNotNull(client.getPrivateKey());
 Assert.assertNotNull(client.getPublicKey());
 Assert.assertNull(client.getCertificate());
-Assert.assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
-omLogs.clearOutput();
 
 // Case 2: If key pair already exist than response should be RECOVER.
-client = new OMCertificateClient(securityConfig);
-LambdaTestUtils.intercept(RuntimeException.class, " OM security" +
-" initialization failed",
-() -> OzoneManager.initializeSecurity(conf, omStorage));
+client = new OMCertificateClient(securityConfig,
+omStorage.getOmCertSerialId());
+Assert.assertEquals(CertificateClient.InitResponse.RECOVER, client.init());
 Assert.assertNotNull(client.getPrivateKey());
 Assert.assertNotNull(client.getPublicKey());
 Assert.assertNull(client.getCertificate());
-Assert.assertTrue(omLogs.getOutput().contains("Init response: RECOVER"));
-Assert.assertTrue(omLogs.getOutput().contains(" OM certificate is " +
-"missing"));
-omLogs.clearOutput();
 
 // Case 3: When public key as well as certificate is missing.
 client = new OMCertificateClient(securityConfig);
 FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
 .toString(), securityConfig.getPublicKeyFileName()).toFile());
-LambdaTestUtils.intercept(RuntimeException.class, " OM security" +
-" initialization failed",
-() -> OzoneManager.initializeSecurity(conf, omStorage));
+Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init());
 Assert.assertNotNull(client.getPrivateKey());
 Assert.assertNull(client.getPublicKey());
 Assert.assertNull(client.getCertificate());
-Assert.assertTrue(omLogs.getOutput().contains("Init response: FAILURE"));
-omLogs.clearOutput();
 
 // Case 4: When private key and certificate is missing.
 client = new OMCertificateClient(securityConfig);
-FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
-.toString(), securityConfig.getPrivateKeyFileName()).toFile());
 KeyCodec keyCodec = new KeyCodec(securityConfig, COMPONENT);
 keyCodec.writePublicKey(publicKey);
-LambdaTestUtils.intercept(RuntimeException.class, " OM security" +
-" initialization failed",
-() -> OzoneManager.initializeSecurity(conf, omStorage));
+FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT)
+.toString(), securityConfig.getPrivateKeyFileName()).toFile());
+Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init());

[hadoop] branch ozone-0.4.1 updated: HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. (#1396)

2019-09-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 2d09c29c HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. 
(#1396)
2d09c29c is described below

commit 2d09c29cf37b0e58dfb81237edf9a640b3e9b2e4
Author: Nanda kumar 
AuthorDate: Wed Sep 4 15:28:59 2019 +0530

HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. (#1396)

(cherry picked from commit 1ae775975bc43bfc20ca0e61ad045a521e227f7c)
---
 pom.ozone.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/pom.ozone.xml b/pom.ozone.xml
index c082186..38a932c 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -78,6 +78,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 UTF-8
 UTF-8
 
+1.5
 bash
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. (#1396)

2019-09-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1ae7759  HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. 
(#1396)
1ae7759 is described below

commit 1ae775975bc43bfc20ca0e61ad045a521e227f7c
Author: Nanda kumar 
AuthorDate: Wed Sep 4 15:28:59 2019 +0530

HDDS-2077. Add maven-gpg-plugin.version to pom.ozone.xml. (#1396)
---
 pom.ozone.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/pom.ozone.xml b/pom.ozone.xml
index 8a18d89..b866c35 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -78,6 +78,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 UTF-8
 UTF-8
 
+1.5
 bash
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2069. Default values of properties hdds.datanode.storage.utilization.{critical | warning}.threshold are not reasonable. (#1393)

2019-09-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0f549ec  HDDS-2069. Default values of properties 
hdds.datanode.storage.utilization.{critical | warning}.threshold are not 
reasonable. (#1393)
0f549ec is described below

commit 0f549ec02a2c4421c22a5b719371d6f8c31e7f70
Author: Sammi Chen 
AuthorDate: Tue Sep 3 20:28:48 2019 +0800

HDDS-2069. Default values of properties 
hdds.datanode.storage.utilization.{critical | warning}.threshold are not 
reasonable. (#1393)
---
 .../common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 879e8c2..9050ebd 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -358,12 +358,12 @@ public final class OzoneConfigKeys {
   HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD =
   "hdds.datanode.storage.utilization.warning.threshold";
   public static final double
-  HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.95;
+  HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.75;
   public static final String
   HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD =
   "hdds.datanode.storage.utilization.critical.threshold";
   public static final double
-  HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
+  HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.95;
 
   public static final String OZONE_SECURITY_ENABLED_KEY =
   "ozone.security.enabled";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16534. Exclude submarine from hadoop source build. (#1356)

2019-09-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ac5a0ae  HADOOP-16534. Exclude submarine from hadoop source build. 
(#1356)
ac5a0ae is described below

commit ac5a0ae6d0de6cf08040e2c1a95d9c6657fcf17a
Author: Nanda kumar 
AuthorDate: Tue Sep 3 17:40:38 2019 +0530

HADOOP-16534. Exclude submarine from hadoop source build. (#1356)
---
 hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml | 1 +
 pom.xml| 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index e31a30a..b47b4bc 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -58,6 +58,7 @@
 **/SecurityAuth.audit*
 hadoop-ozone/**
 hadoop-hdds/**
+hadoop-submarine/**
   
 
   
diff --git a/pom.xml b/pom.xml
index 2671f83..b8c428c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -406,8 +406,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/x
 **/*.js
 hadoop-hdds/**/
 hadoop-ozone/**/
-hadoop-submarine/target/**
-hadoop-submarine/**/target/**
+hadoop-submarine/**/
 licenses/**
 licenses-binary/**
  


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1810. SCM command to Activate and Deactivate pipelines. (#1224)

2019-09-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0b9704f  HDDS-1810. SCM command to Activate and Deactivate pipelines. 
(#1224)
0b9704f is described below

commit 0b9704f6106587d9df06c8b3860a23afbd43
Author: Nanda kumar 
AuthorDate: Tue Sep 3 16:50:57 2019 +0530

HDDS-1810. SCM command to Activate and Deactivate pipelines. (#1224)
---
 .../hdds/scm/client/ContainerOperationClient.java  | 12 +
 .../apache/hadoop/hdds/scm/client/ScmClient.java   | 16 ++
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |  4 +-
 .../protocol/StorageContainerLocationProtocol.java | 16 ++
 ...inerLocationProtocolClientSideTranslatorPB.java | 32 
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |  2 +
 ...inerLocationProtocolServerSideTranslatorPB.java | 34 +
 .../proto/StorageContainerLocationProtocol.proto   | 22 
 hadoop-hdds/common/src/main/proto/hdds.proto   |  3 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  | 17 +++
 .../hdds/scm/pipeline/PipelineStateManager.java| 24 +
 .../hadoop/hdds/scm/pipeline/PipelineStateMap.java |  2 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  1 +
 .../hdds/scm/pipeline/SCMPipelineManager.java  | 26 ++
 .../hdds/scm/server/SCMClientProtocolServer.java   | 18 +++
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java |  4 ++
 .../cli/pipeline/ActivatePipelineSubcommand.java   | 53 
 .../cli/pipeline/DeactivatePipelineSubcommand.java | 53 
 .../scm/pipeline/TestPipelineStateManager.java |  7 +++
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  | 58 +++---
 20 files changed, 395 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index e2856d7..c97354f 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -227,6 +227,18 @@ public class ContainerOperationClient implements ScmClient 
{
   }
 
   @Override
+  public void activatePipeline(HddsProtos.PipelineID pipelineID)
+  throws IOException {
+storageContainerLocationClient.activatePipeline(pipelineID);
+  }
+
+  @Override
+  public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
+  throws IOException {
+storageContainerLocationClient.deactivatePipeline(pipelineID);
+  }
+
+  @Override
   public void closePipeline(HddsProtos.PipelineID pipelineID)
   throws IOException {
 storageContainerLocationClient.closePipeline(pipelineID);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index c2dd5f9..226ceda 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -181,6 +181,22 @@ public interface ScmClient extends Closeable {
   List listPipelines() throws IOException;
 
   /**
+   * Activates the pipeline given a pipeline ID.
+   *
+   * @param pipelineID PipelineID to activate.
+   * @throws IOException In case of exception while activating the pipeline
+   */
+  void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
+
+  /**
+   * Deactivates the pipeline given a pipeline ID.
+   *
+   * @param pipelineID PipelineID to deactivate.
+   * @throws IOException In case of exception while deactivating the pipeline
+   */
+  void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
+
+  /**
* Closes the pipeline given a pipeline ID.
*
* @param pipelineID PipelineID to close.
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index a84118a..1627569 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -354,7 +354,7 @@ public final class Pipeline {
* Possible Pipeline states in SCM.
*/
   public enum PipelineState {
-ALLOCATED, OPEN, CLOSED;
+ALLOCATED, OPEN, DORMANT, CLOSED;
 
 public static PipelineState fromProtobuf(HddsProtos.PipelineState state)
 throws UnknownPipelineStateException {
@@ -362,6 +362,7 @@ public final class Pipeline {
   switch (state) {
   case PIPELINE_ALLOCATED: return ALLOCATED

[hadoop] 01/02: HDDS-1942. Support copy during S3 multipart upload part creation

2019-08-29 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1845d5a970819e36d06bfc4d3c693bd2b64ed893
Author: Márton Elek 
AuthorDate: Sun Aug 11 14:45:02 2019 +0200

HDDS-1942. Support copy during S3 multipart upload part creation

Signed-off-by: Anu Engineer 
(cherry picked from commit 2fcd0da7dcbc15793041efb079210e06272482a4)
---
 .../src/main/smoketest/s3/MultipartUpload.robot|  52 +
 .../hadoop/ozone/s3/endpoint/CopyPartResult.java   |  69 ++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  79 +--
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   2 +
 .../hadoop/ozone/client/OzoneBucketStub.java   |  15 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   | 233 +
 .../ozone/s3/endpoint/TestObjectEndpoint.java  |  53 +
 7 files changed, 483 insertions(+), 20 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 0133d50..df95f4d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -200,3 +200,55 @@ Test Multipart Upload with the simplified aws s3 cp API
 Execute AWSS3Clicp s3://${BUCKET}/mpyawscli 
/tmp/part1.result
 Execute AWSS3Clirm s3://${BUCKET}/mpyawscli
 Compare files   /tmp/part1
/tmp/part1.result
+
+Test Multipart Upload Put With Copy
+Run Keyword Create Random file  5
+${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} 
--key copytest/source --body /tmp/part1
+
+
+${result} = Execute AWSS3APICli create-multipart-upload 
--bucket ${BUCKET} --key copytest/destination
+
+${uploadID} =   Execute and checkrc  echo '${result}' | jq -r 
'.UploadId'0
+Should contain   ${result}${BUCKET}
+Should contain   ${result}UploadId
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 
--copy-source ${BUCKET}/copytest/source
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag1} =  Execute and checkrc  echo '${result}' | jq -r 
'.CopyPartResult.ETag'   0
+
+
+Execute AWSS3APICli complete-multipart-upload 
--upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination 
--multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]'
+Execute AWSS3APICli get-object --bucket ${BUCKET} 
--key copytest/destination /tmp/part-result
+
+Compare files   /tmp/part1
/tmp/part-result
+
+Test Multipart Upload Put With Copy and range
+Run Keyword Create Random file  10
+${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} 
--key copyrange/source --body /tmp/part1
+
+
+${result} = Execute AWSS3APICli create-multipart-upload 
--bucket ${BUCKET} --key copyrange/destination
+
+${uploadID} =   Execute and checkrc  echo '${result}' | jq -r 
'.UploadId'0
+Should contain   ${result}${BUCKET}
+Should contain   ${result}UploadId
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 
--copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag1} =  Execute and checkrc  echo '${result}' | jq -r 
'.CopyPartResult.ETag'   0
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 
--copy-source ${BUCKET}/copyrange/source --copy-source-range 
bytes=10485758-10485760
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag2} =  Execute and checkrc  echo '${result}' | jq -r 
'.CopyPartResult.ETag'   0
+
+
+Execute AWSS3APICli complete-multipart-upload 
--upload-id ${uploadID} --bucket ${BUCKET} --key copyrange/destination

[hadoop] branch ozone-0.4.1 updated (d8226cb -> e5c64a8)

2019-08-29 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from d8226cb  HDDS-1915. Remove hadoop script from ozone distribution
 new 1845d5a  HDDS-1942. Support copy during S3 multipart upload part 
creation
 new e5c64a8  HDDS-1950. S3 MPU part-list call fails if there are no parts

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/main/smoketest/s3/MultipartUpload.robot|  52 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 ++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 ++
 ...CopyObjectResponse.java => CopyPartResult.java} |  22 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  79 +--
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   2 +
 .../hadoop/ozone/client/OzoneBucketStub.java   |  15 +-
 ...plete.java => TestMultipartUploadWithCopy.java} | 237 +++--
 ...BucketResponse.java => TestObjectEndpoint.java} |  29 ++-
 9 files changed, 421 insertions(+), 150 deletions(-)
 create mode 100644 
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
 copy 
hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/{CopyObjectResponse.java
 => CopyPartResult.java} (86%)
 copy 
hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/{TestMultipartUploadComplete.java
 => TestMultipartUploadWithCopy.java} (55%)
 copy 
hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/{TestBucketResponse.java
 => TestObjectEndpoint.java} (54%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDDS-1950. S3 MPU part-list call fails if there are no parts

2019-08-29 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e5c64a8f523d0ae302e5b428d575ad6fd59ecb2a
Author: Márton Elek 
AuthorDate: Sun Aug 11 14:32:00 2019 +0200

HDDS-1950. S3 MPU part-list call fails if there are no parts

Signed-off-by: Anu Engineer 
(cherry picked from commit aef6a4fe0d04fe0d42fa36dc04cac2cc53ae8efd)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 -
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 +
 2 files changed, 133 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 30b4604..d351320 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1296,8 +1296,9 @@ public class KeyManagerImpl implements KeyManager {
 multipartKeyInfo.getPartKeyInfoMap();
 Iterator> partKeyInfoMapIterator =
 partKeyInfoMap.entrySet().iterator();
-HddsProtos.ReplicationType replicationType =
-partKeyInfoMap.firstEntry().getValue().getPartKeyInfo().getType();
+
+HddsProtos.ReplicationType replicationType = null;
+
 int count = 0;
 List omPartInfoList = new ArrayList<>();
 
@@ -1314,11 +1315,30 @@ public class KeyManagerImpl implements KeyManager {
 partKeyInfo.getPartKeyInfo().getModificationTime(),
 partKeyInfo.getPartKeyInfo().getDataSize());
 omPartInfoList.add(omPartInfo);
+
+//if there are parts, use replication type from one of the parts
 replicationType = partKeyInfo.getPartKeyInfo().getType();
 count++;
   }
 }
 
+if (replicationType == null) {
+  //if there are no parts, use the replicationType from the open key.
+
+  OmKeyInfo omKeyInfo =
+  metadataManager.getOpenKeyTable().get(multipartKey);
+
+  if (omKeyInfo == null) {
+throw new IllegalStateException(
+"Open key is missing for multipart upload " + multipartKey);
+  }
+
+  replicationType = omKeyInfo.getType();
+
+}
+Preconditions.checkNotNull(replicationType,
+"Replication type can't be identified");
+
 if (partKeyInfoMapIterator.hasNext()) {
   Map.Entry partKeyInfoEntry =
   partKeyInfoMapIterator.next();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
new file mode 100644
index 000..a5a446c
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
+import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+

[hadoop] 03/03: HDDS-1915. Remove hadoop script from ozone distribution

2019-08-28 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit d8226cb42016680599a6393cc1bccf7d2794c2ca
Author: Márton Elek 
AuthorDate: Tue Aug 6 10:10:52 2019 +0200

HDDS-1915. Remove hadoop script from ozone distribution

Signed-off-by: Anu Engineer 
(cherry picked from commit 15545c8bf1318e936fe2251bc2ef7522a36af7cd)
---
 hadoop-ozone/dist/dev-support/bin/dist-layout-stitching | 2 --
 1 file changed, 2 deletions(-)

diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index d95242e..5def094 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -94,8 +94,6 @@ run cp 
"${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
 run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml" 
"etc/hadoop"
-run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/"
-run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" 
"bin/"
 run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/"
 run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/docker"
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (3a099ca -> d8226cb)

2019-08-28 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 3a099ca  HDDS-2037. Fix hadoop version in pom.ozone.xml. (#1352)
 new 2b7b5de  HDDS-1999. Basic acceptance test and SCM/OM web UI broken by 
Bootstrap upgrade (#1327)
 new aff53ba  HDDS-2000. Don't depend on bootstrap/jquery versions from 
hadoop-trunk snapshot
 new d8226cb  HDDS-1915. Remove hadoop script from ozone distribution

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../themes/ozonedoc/layouts/partials/footer.html   |2 +-
 .../themes/ozonedoc/layouts/partials/navbar.html   |7 +-
 .../ozonedoc/static/css/bootstrap-theme.min.css|6 +-
 .../static/css/bootstrap-theme.min.css.map |2 +-
 .../themes/ozonedoc/static/css/bootstrap.min.css   |6 +-
 .../ozonedoc/static/css/bootstrap.min.css.map  |2 +-
 .../themes/ozonedoc/static/js/bootstrap.min.js |7 +-
 .../themes/ozonedoc/static/js/jquery-3.4.1.min.js  |2 +
 .../docs/themes/ozonedoc/static/js/jquery.min.js   |5 -
 hadoop-hdds/framework/pom.xml  |   27 -
 .../bootstrap-3.4.1}/css/bootstrap-editable.css|0
 .../static/bootstrap-3.4.1/css/bootstrap-theme.css |  587 ++
 .../bootstrap-3.4.1/css/bootstrap-theme.css.map|1 +
 .../bootstrap-3.4.1}/css/bootstrap-theme.min.css   |6 +-
 .../css/bootstrap-theme.min.css.map|1 +
 .../static/bootstrap-3.4.1/css/bootstrap.css   | 6834 
 .../static/bootstrap-3.4.1/css/bootstrap.css.map   |1 +
 .../static/bootstrap-3.4.1/css/bootstrap.min.css   |6 +
 .../bootstrap-3.4.1/css/bootstrap.min.css.map  |1 +
 .../fonts/glyphicons-halflings-regular.eot |  Bin
 .../fonts/glyphicons-halflings-regular.svg |0
 .../fonts/glyphicons-halflings-regular.ttf |  Bin
 .../fonts/glyphicons-halflings-regular.woff|  Bin
 .../fonts/glyphicons-halflings-regular.woff2   |  Bin
 .../bootstrap-3.4.1}/js/bootstrap-editable.min.js  |0
 .../webapps/static/bootstrap-3.4.1/js/bootstrap.js | 2580 
 .../static/bootstrap-3.4.1/js/bootstrap.min.js |6 +
 .../src/main/resources/webapps/static/hadoop.css   |  331 +
 .../resources/webapps/static/jquery-3.4.1.min.js   |2 +
 hadoop-hdds/pom.xml|2 +
 .../src/main/resources/webapps/scm/index.html  |6 +-
 .../dist/dev-support/bin/dist-layout-stitching |2 -
 .../dist/src/main/smoketest/basic/basic.robot  |2 +-
 .../main/resources/webapps/ozoneManager/index.html |6 +-
 hadoop-ozone/pom.xml   |4 +-
 .../s3gateway/src/main/resources/browser.html  |6 +-
 .../src/main/resources/webapps/static/index.html   |4 +-
 37 files changed, 10389 insertions(+), 65 deletions(-)
 create mode 100644 
hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js
 delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery.min.js
 copy {hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7 
=> 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1}/css/bootstrap-editable.css
 (100%)
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map
 copy hadoop-hdds/{docs/themes/ozonedoc/static => 
framework/src/main/resources/webapps/static/bootstrap-3.4.1}/css/bootstrap-theme.min.css
 (90%)
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css
 create mode 100644 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map
 copy {hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7 
=> 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1}/fonts/glyphicons-halflings-regular.eot
 (100%)
 copy {hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7 
=> 
hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1}/fonts/glyphicons-halflings-regular.svg
 (100%)
 copy {hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7 
=> 
hadoop-hdds/framewor

[hadoop] 01/03: HDDS-1999. Basic acceptance test and SCM/OM web UI broken by Bootstrap upgrade (#1327)

2019-08-28 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2b7b5de43332fede133cc439282b79969a089491
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Aug 21 21:16:14 2019 +0200

HDDS-1999. Basic acceptance test and SCM/OM web UI broken by Bootstrap 
upgrade (#1327)

(cherry picked from commit 2ae7f444bdef15fda202f920232bcc1b639e8900)
---
 hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html| 6 +++---
 hadoop-ozone/dist/src/main/smoketest/basic/basic.robot  | 2 +-
 .../src/main/resources/webapps/ozoneManager/index.html  | 6 +++---
 hadoop-ozone/s3gateway/src/main/resources/browser.html  | 6 +++---
 hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html | 4 ++--
 5 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
index 2c943b6..1c5a334 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
@@ -26,7 +26,7 @@
 
 HDFS Storage Container Manager
 
-
+
 
 
 
@@ -63,7 +63,7 @@
 
 
 
-
+
 
 
 
@@ -71,6 +71,6 @@
 
 
 
-
+
 
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
index c750521..edaee5e 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -26,7 +26,7 @@ ${DATANODE_HOST}datanode
 
 Check webui static resources
 Run Keyword if'${SECURITY_ENABLED}' == 'true'Kinit HTTP user
-${result} =Executecurl --negotiate -u : -s -I 
http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
+${result} =Executecurl --negotiate -u : -s -I 
http://scm:9876/static/bootstrap-3.4.1/js/bootstrap.min.js
Should contain ${result}200
 
 Start freon testing
diff --git 
a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html 
b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html
index ba54cb2..1b5e693 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html
+++ 
b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html
@@ -26,7 +26,7 @@
 
 Ozone Manager
 
-
+
 
 
 
@@ -57,7 +57,7 @@
 
 
 
-
+
 
 
 
@@ -65,6 +65,6 @@
 
 
 
-
+
 
 
diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html 
b/hadoop-ozone/s3gateway/src/main/resources/browser.html
index a1f2338..0405b17e 100644
--- a/hadoop-ozone/s3gateway/src/main/resources/browser.html
+++ b/hadoop-ozone/s3gateway/src/main/resources/browser.html
@@ -24,7 +24,7 @@ permissions and limitations under the License.
 
 
 https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css;>
+  
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css;>
 https://use.fontawesome.com/releases/v5.2.0/css/all.css;>
 
 
-https://code.jquery.com/jquery-3.3.1.min.js&quot</a>;>
-https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js&quot</a>;>
+https://code.jquery.com/jquery-3.4.1.min.js&quot</a>;>
+https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js&quot</a>;>
 https://cdnjs.cloudflare.com/ajax/libs/bootbox.js/4.4.0/bootbox.min.js&quot</a>;>
 https://sdk.amazonaws.com/js/aws-sdk-2.207.0.min.js&quot</a>;>
 https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.0/moment.min.js&quot</a>;>
diff --git 
a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html 
b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
index c4b791b..68939ef 100644
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
+++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
@@ -26,7 +26,7 @@
 
 S3 gateway -- Apache Hadoop Ozone
 
-
+
 
 
 
@@ -74,6 +74,6 @@
 
 
 
-
+
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2037. Fix hadoop version in pom.ozone.xml. (#1352)

2019-08-28 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 3a099ca  HDDS-2037. Fix hadoop version in pom.ozone.xml. (#1352)
3a099ca is described below

commit 3a099ca1cc24a67d645ea41e65c82a25ad575ace
Author: Nanda kumar 
AuthorDate: Wed Aug 28 15:43:51 2019 +0530

HDDS-2037. Fix hadoop version in pom.ozone.xml. (#1352)
---
 hadoop-hdds/pom.xml  | 6 ++
 hadoop-hdds/server-scm/pom.xml   | 4 
 hadoop-ozone/ozone-recon/pom.xml | 2 +-
 pom.ozone.xml| 2 +-
 4 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index ec5cfdb..1397117 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -43,6 +43,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+3.2.0
 
 0.4.1-SNAPSHOT
 
@@ -209,14 +210,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   org.apache.hadoop
   hadoop-common
+  ${hadoop.version}
 
 
   org.apache.hadoop
   hadoop-hdfs
+  ${hadoop.version}
 
 
   org.apache.hadoop
   hadoop-hdfs-client
+  ${hadoop.version}
   
 
   com.squareup.okhttp
@@ -227,12 +231,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   org.apache.hadoop
   hadoop-common
+  ${hadoop.version}
   test
   test-jar
 
 
   org.apache.hadoop
   hadoop-hdfs
+  ${hadoop.version}
   test
   test-jar
 
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 942674b..2f73323 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -101,10 +101,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   bcprov-jdk15on
 
 
-  io.dropwizard.metrics
-  metrics-core
-
-
   com.google.code.findbugs
   findbugs
   provided
diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml
index 59036de..ece8468 100644
--- a/hadoop-ozone/ozone-recon/pom.xml
+++ b/hadoop-ozone/ozone-recon/pom.xml
@@ -189,7 +189,7 @@
 
   org.apache.hadoop
   hadoop-ozone-reconcodegen
-  ${version}
+  ${ozone.version}
 
 
   org.apache.hadoop
diff --git a/pom.ozone.xml b/pom.ozone.xml
index 039bed8..cbcd8a0 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -65,7 +65,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   
 
-3.3.0-SNAPSHOT
+3.2.0
 
 apache.snapshots.https
 Apache Development Snapshot 
Repository


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2002. Update documentation for 0.4.1 release.

2019-08-24 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 1df1a7c  HDDS-2002. Update documentation for 0.4.1 release.
1df1a7c is described below

commit 1df1a7c230adab693e8493b4862729d9809cf4ed
Author: Nanda kumar 
AuthorDate: Wed Aug 21 22:47:41 2019 +0530

HDDS-2002. Update documentation for 0.4.1 release.

Signed-off-by: Anu Engineer 
(cherry picked from commit b661dcf563c0b3cb6fe6f22bb3a39f87e3ec1c57)
---
 hadoop-hdds/docs/content/beyond/Containers.md  |  65 +---
 .../docs/content/beyond/DockerCheatSheet.md|   7 +-
 hadoop-hdds/docs/content/beyond/RunningWithHDFS.md |   2 +-
 hadoop-hdds/docs/content/concept/Datanodes.md  |   6 +-
 hadoop-hdds/docs/content/concept/Hdds.md   |   2 +-
 hadoop-hdds/docs/content/concept/Overview.md   |   6 +-
 hadoop-hdds/docs/content/concept/OzoneManager.md   |  20 +--
 hadoop-hdds/docs/content/interface/JavaApi.md  |   8 +-
 hadoop-hdds/docs/content/interface/OzoneFS.md  |   8 +-
 hadoop-hdds/docs/content/interface/S3.md   |  18 +--
 hadoop-hdds/docs/content/recipe/Prometheus.md  |  22 +--
 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md |  38 +++--
 hadoop-hdds/docs/content/recipe/_index.md  |   3 +-
 .../docs/content/security/SecuityWithRanger.md |   4 +-
 hadoop-hdds/docs/content/security/SecureOzone.md   | 169 +++--
 .../docs/content/security/SecuringDatanodes.md |  13 +-
 hadoop-hdds/docs/content/security/SecuringS3.md|   6 +-
 hadoop-hdds/docs/content/security/SecuringTDE.md   |   5 +-
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  35 ++---
 hadoop-hdds/docs/content/shell/BucketCommands.md   |  30 +---
 hadoop-hdds/docs/content/shell/KeyCommands.md  |  24 ++-
 hadoop-hdds/docs/content/shell/VolumeCommands.md   |  20 ++-
 hadoop-hdds/docs/content/start/Kubernetes.md   |   2 +-
 hadoop-hdds/docs/content/start/OnPrem.md   |   7 +-
 .../docs/content/start/StartFromDockerHub.md   |   7 +-
 25 files changed, 269 insertions(+), 258 deletions(-)

diff --git a/hadoop-hdds/docs/content/beyond/Containers.md 
b/hadoop-hdds/docs/content/beyond/Containers.md
index b4dc94f..ea7e3b1 100644
--- a/hadoop-hdds/docs/content/beyond/Containers.md
+++ b/hadoop-hdds/docs/content/beyond/Containers.md
@@ -25,8 +25,9 @@ Docker heavily is used at the ozone development with three 
principal use-cases:
 * __dev__:
  * We use docker to start local pseudo-clusters (docker provides unified 
environment, but no image creation is required)
 * __test__:
- * We create docker images from the dev branches to test ozone in 
kubernetes and other container orchestator system
- * We provide _apache/ozone_ images for each release to make it easier the 
evaluation of Ozone. These images are __not__ created __for production__ usage.
+ * We create docker images from the dev branches to test ozone in 
kubernetes and other container orchestrator system
+ * We provide _apache/ozone_ images for each release to make it easier for 
evaluation of Ozone.
+ These images are __not__ created __for production__ usage.
 
 
 We strongly recommend that you create your own custom images when you
@@ -36,7 +37,7 @@ shipped container images and k8s resources as examples and 
guides to help you
 
 
 * __production__:
- * We document how can you create your own docker image for your 
production cluster.
+ * We have documentation on how you can create your own docker image for 
your production cluster.
 
 Let's check out each of the use-cases in more detail:
 
@@ -46,38 +47,41 @@ Ozone artifact contains example docker-compose directories 
to make it easier to
 
 From distribution:
 
-```
+```bash
 cd compose/ozone
 docker-compose up -d
 ```
 
-After a local build
+After a local build:
 
-```
+```bash
 cd  hadoop-ozone/dist/target/ozone-*/compose
 docker-compose up -d
 ```
 
 These environments are very important tools to start different type of Ozone 
clusters at any time.
 
-To be sure that the compose files are up-to-date, we also provide acceptance 
test suites which start the cluster and check the basic behaviour.
+To be sure that the compose files are up-to-date, we also provide acceptance 
test suites which start
+the cluster and check the basic behaviour.
 
-The acceptance tests are part of the distribution, and you can find the test 
definitions in `./smoketest` directory.
+The acceptance tests are part of the distribution, and you can find the test 
definitions in `smoketest` directory.
 
 You can start the tests from any compose directory:
 
 For example:
 
-```
+```bash
 cd compose/ozone
 ./test.sh
 ```
 
 ### Implementation details
 
-`./compose` tests are based on the apache/hadoop-runner docker image. The 
image itself doesn't contain any Ozone jar file

[hadoop] branch trunk updated: HDDS-1978. Create helper script to run blockade tests. (#1310)

2019-08-23 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 20064b6  HDDS-1978. Create helper script to run blockade tests. (#1310)
20064b6 is described below

commit 20064b69a8a7926f2d80776b029da28d5f98f730
Author: Nanda kumar 
AuthorDate: Fri Aug 23 22:26:30 2019 +0530

HDDS-1978. Create helper script to run blockade tests. (#1310)
---
 hadoop-ozone/dev-support/checks/blockade.sh| 28 ++
 .../src/test/blockade/ozone/cluster.py | 14 ---
 2 files changed, 39 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/blockade.sh 
b/hadoop-ozone/dev-support/checks/blockade.sh
new file mode 100755
index 000..f8b25c1
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/blockade.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "$DIR/../../.." || exit 1
+
+OZONE_VERSION=$(grep "" "$DIR/../../pom.xml" | sed 
's/<[^>]*>//g'|  sed 's/^[ \t]*//')
+cd "$DIR/../../dist/target/ozone-$OZONE_VERSION/tests" || exit 1
+
+source 
${DIR}/../../dist/target/ozone-${OZONE_VERSION}/compose/ozoneblockade/.env
+export HADOOP_RUNNER_VERSION
+export HDDS_VERSION
+
+python -m pytest -s blockade
+exit $?
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
index f83ad25..1434266 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
@@ -19,7 +19,9 @@ import logging
 import os
 import re
 import subprocess
+import sys
 import yaml
+import time
 
 
 from os import environ
@@ -146,11 +148,17 @@ class OzoneCluster(object):
 """
 Start Ozone Cluster in docker containers.
 """
-# check if proper env $HDDS_VERSION and $HADOOP_RUNNER_VERSION
-# are set.
 
 # check if docker is up.
 
+if "HADOOP_RUNNER_VERSION" not in os.environ:
+self.__logger__.error("HADOOP_RUNNER_VERSION is not set.")
+sys.exit(1)
+
+if "HDDS_VERSION" not in os.environ:
+self.__logger__.error("HDDS_VERSION is not set.")
+sys.exit(1)
+
 self.__logger__.info("Starting Ozone Cluster")
 if Blockade.blockade_status() == 0:
 Blockade.blockade_destroy()
@@ -162,7 +170,7 @@ class OzoneCluster(object):
   "datanode=" + str(self.conf.datanode_count)])
 self.__logger__.info("Waiting 10s for cluster start up...")
 # Remove the sleep and wait only till the cluster is out of safemode
-# time.sleep(10)
+time.sleep(10)
 output = subprocess.check_output([Command.docker_compose, "-f",
   self.docker_compose_file, "ps"])
 node_list = []


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1948. S3 MPU can't be created with octet-stream content-type (#1266)

2019-08-23 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new d0d9c49  HDDS-1948. S3 MPU can't be created with octet-stream 
content-type  (#1266)
d0d9c49 is described below

commit d0d9c4977d3d46b3de8024092bd3b966a8299a04
Author: Elek, Márton 
AuthorDate: Fri Aug 23 08:15:43 2019 +0200

HDDS-1948. S3 MPU can't be created with octet-stream content-type  (#1266)

(cherry picked from commit edd708527d34d0bf3b09dc35a7f645f49e7becb3)
---
 .../apache/hadoop/ozone/s3/HeaderPreprocessor.java | 31 ++--
 .../hadoop/ozone/s3/S3GatewayHttpServer.java   |  5 ++
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java|  4 ++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   | 56 --
 .../s3/endpoint/TestAbortMultipartUpload.java  |  2 +-
 .../s3/endpoint/TestInitiateMultipartUpload.java   |  4 +-
 .../hadoop/ozone/s3/endpoint/TestListParts.java|  2 +-
 .../s3/endpoint/TestMultipartUploadComplete.java   |  4 +-
 .../hadoop/ozone/s3/endpoint/TestPartUpload.java   |  4 +-
 9 files changed, 63 insertions(+), 49 deletions(-)

diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
index 0a1480a..db94bbb 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java
@@ -17,39 +17,60 @@
  */
 package org.apache.hadoop.ozone.s3;
 
+import javax.annotation.Priority;
 import javax.ws.rs.container.ContainerRequestContext;
 import javax.ws.rs.container.ContainerRequestFilter;
 import javax.ws.rs.container.PreMatching;
 import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.ext.Provider;
 import java.io.IOException;
 
 /**
  * Filter to adjust request headers for compatible reasons.
+ *
+ * It should be executed AFTER signature check (VirtualHostStyleFilter) as the
+ * original Content-Type could be part of the base of the signature.
  */
-
 @Provider
 @PreMatching
+@Priority(VirtualHostStyleFilter.PRIORITY
++ S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER)
 public class HeaderPreprocessor implements ContainerRequestFilter {
 
+  public static final String MULTIPART_UPLOAD_MARKER = "ozone/mpu";
+
   @Override
   public void filter(ContainerRequestContext requestContext) throws
   IOException {
-if (requestContext.getUriInfo().getQueryParameters()
-.containsKey("delete")) {
+MultivaluedMap queryParameters =
+requestContext.getUriInfo().getQueryParameters();
+
+if (queryParameters.containsKey("delete")) {
   //aws cli doesn't send proper Content-Type and by default POST requests
   //processed as form-url-encoded. Here we can fix this.
   requestContext.getHeaders()
   .putSingle("Content-Type", MediaType.APPLICATION_XML);
 }
 
-if (requestContext.getUriInfo().getQueryParameters()
-.containsKey("uploadId")) {
+if (queryParameters.containsKey("uploadId")) {
   //aws cli doesn't send proper Content-Type and by default POST requests
   //processed as form-url-encoded. Here we can fix this.
   requestContext.getHeaders()
   .putSingle("Content-Type", MediaType.APPLICATION_XML);
+} else if (queryParameters.containsKey("uploads")) {
+  // uploads defined but uploadId is not --> this is the creation of the
+  // multi-part-upload requests.
+  //
+  //In  AWS SDK for go uses application/octet-stream which also
+  //should be fixed to route the request to the right jaxrs method.
+  //
+  //Should be empty instead of XML as the body is empty which can not be
+  //serialized as as CompleteMultipartUploadRequest
+  requestContext.getHeaders()
+  .putSingle("Content-Type", MULTIPART_UPLOAD_MARKER);
 }
+
   }
 
 }
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
index f20b928..f3d8341 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
@@ -27,6 +27,11 @@ import org.apache.hadoop.hdds.server.BaseHttpServer;
  */
 public class S3GatewayHttpServer extends BaseHttpServer {
 
+  /**
+   * Default offset between two filters.
+   */
+  public static final int FILTER_PRIORITY_DO_AFTER = 50;
+
   public S3GatewayHttpServer(Configuration conf,
   String name) throws IOException {

[hadoop] branch ozone-0.4.1 updated: HDDS-1871. Remove anti-affinity rules from k8s minkube example

2019-08-22 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 658815f  HDDS-1871. Remove anti-affinity rules from k8s minkube example
658815f is described below

commit 658815fd94ef31329697ba5611493f0bedeefade
Author: Márton Elek 
AuthorDate: Mon Jul 29 14:44:58 2019 +0200

HDDS-1871. Remove anti-affinity rules from k8s minkube example

Signed-off-by: Anu Engineer 
(cherry picked from commit 8fc6567b946f1d536ffed4798b5403a365021464)
---
 .../main/k8s/definitions/ozone/datanode-ss.yaml|  6 +++---
 .../getting-started/datanode-statefulset.yaml  |  6 +++---
 .../examples/getting-started/om-statefulset.yaml   | 22 ++
 .../examples/getting-started/s3g-statefulset.yaml  |  5 +
 .../examples/getting-started/scm-statefulset.yaml  |  4 
 .../dist/src/main/k8s/examples/minikube/Flekszible |  9 +
 .../examples/minikube/datanode-statefulset.yaml| 22 ++
 .../main/k8s/examples/minikube/om-statefulset.yaml |  6 +++---
 .../k8s/examples/minikube/s3g-statefulset.yaml |  6 +++---
 .../k8s/examples/minikube/scm-statefulset.yaml | 12 ++--
 .../ozone-dev/csi/csi-ozone-clusterrole.yaml   |  2 +-
 .../csi/csi-ozone-clusterrolebinding.yaml  |  6 +++---
 .../ozone-dev/csi/csi-ozone-serviceaccount.yaml|  2 +-
 .../examples/ozone-dev/datanode-statefulset.yaml   |  6 +++---
 .../examples/ozone-dev/prometheus-clusterrole.yaml |  2 +-
 .../prometheus-operator-clusterrolebinding.yaml|  6 +++---
 .../examples/ozone/csi/csi-ozone-clusterrole.yaml  |  2 +-
 .../ozone/csi/csi-ozone-clusterrolebinding.yaml|  6 +++---
 .../ozone/csi/csi-ozone-serviceaccount.yaml|  2 +-
 .../k8s/examples/ozone/datanode-statefulset.yaml   |  6 +++---
 20 files changed, 68 insertions(+), 70 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
index 94dc570..88a4308 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
@@ -32,9 +32,9 @@ spec:
 app: ozone
 component: datanode
   annotations:
-prdatanodeetheus.io/scrape: "true"
-prdatanodeetheus.io/port: "9882"
-prdatanodeetheus.io/path: "/prom"
+prometheus.io/scrape: "true"
+prometheus.io/port: "9882"
+prometheus.io/path: "/prom"
 spec:
   affinity:
 podAntiAffinity:
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
index 6c8d1bf..c393ead 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
+++ 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
@@ -33,9 +33,9 @@ spec:
 app: ozone
 component: datanode
   annotations:
-prdatanodeetheus.io/scrape: "true"
-prdatanodeetheus.io/port: "9882"
-prdatanodeetheus.io/path: /prom
+prometheus.io/scrape: "true"
+prometheus.io/port: "9882"
+prometheus.io/path: /prom
 spec:
   affinity:
 podAntiAffinity:
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
index c8ff81b..5de01f5 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
+++ 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
@@ -39,22 +39,6 @@ spec:
 spec:
   securityContext:
 fsGroup: 1000
-  initContainers:
-  - name: init
-image: '@docker.image@'
-args:
-- ozone
-- om
-- --init
-env:
-- name: WAITFOR
-  value: scm-0.scm:9876
-envFrom:
-- configMapRef:
-name: config
-volumeMounts:
-- name: data
-  mountPath: /data
   containers:
   - name: om
 image: '@docker.image@'
@@ -64,6 +48,12 @@ spec:
 env:
 - name: WAITFOR
   value: scm-0.scm:9876
+- name: ENSURE_OM_INITIALIZED
+  value: /data/metadata/om/current/VERSION
+livenessProbe:
+  tcpSocket:
+port: 9862
+  initialDelaySeconds: 30
 envFrom:
 - configMapRef:
 name: config
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.y

[hadoop] branch ozone-0.4.1 updated: HDDS-1768. Audit xxxAcl methods in OzoneManager (#1204)

2019-08-21 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 071caba  HDDS-1768. Audit xxxAcl methods in OzoneManager (#1204)
071caba is described below

commit 071caba5021cb3b5782b516c177541e7aadf4eac
Author: dineshchitlangia 
AuthorDate: Thu Aug 15 11:43:47 2019 -0400

HDDS-1768. Audit xxxAcl methods in OzoneManager (#1204)

(cherry picked from commit c801f7a26c08d367e902d3b18000853ad7ba2c82)
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   2 +
 .../org/apache/hadoop/ozone/audit/OMAction.java|   6 +
 .../apache/hadoop/ozone/security/acl/OzoneObj.java |  13 +
 .../rpc/TestOzoneRpcClientForAclAuditLog.java  | 284 +
 .../src/test/resources/log4j2.properties   |  76 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 197 +-
 6 files changed, 510 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 67bd22d..d9b33d8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -248,6 +248,7 @@ public final class OzoneConsts {
   public static final String MAX_KEYS = "maxKeys";
   public static final String PREFIX = "prefix";
   public static final String KEY_PREFIX = "keyPrefix";
+  public static final String ACL = "acl";
   public static final String ACLS = "acls";
   public static final String USER_ACL = "userAcl";
   public static final String ADD_ACLS = "addAcls";
@@ -255,6 +256,7 @@ public final class OzoneConsts {
   public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
   public static final String TO_KEY_NAME = "toKeyName";
   public static final String STORAGE_TYPE = "storageType";
+  public static final String RESOURCE_TYPE = "resourceType";
   public static final String IS_VERSION_ENABLED = "isVersionEnabled";
   public static final String CREATION_TIME = "creationTime";
   public static final String DATA_SIZE = "dataSize";
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index e72beff..ebcd439 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -58,6 +58,12 @@ public enum OMAction implements AuditAction {
   LIST_MULTIPART_UPLOAD_PARTS,
   ABORT_MULTIPART_UPLOAD,
 
+  //ACL Actions
+  ADD_ACL,
+  GET_ACL,
+  SET_ACL,
+  REMOVE_ACL,
+
   //FS Actions
   GET_FILE_STATUS,
   CREATE_DIRECTORY,
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
index 6e9ac25..4a95e55 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 
+import java.util.LinkedHashMap;
+import java.util.Map;
 import static 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.*;
 
 /**
@@ -131,4 +133,15 @@ public abstract class OzoneObj implements IOzoneObj {
   value = objType;
 }
   }
+
+  public Map toAuditMap() {
+Map auditMap = new LinkedHashMap<>();
+auditMap.put(OzoneConsts.RESOURCE_TYPE, this.getResourceType().value);
+auditMap.put(OzoneConsts.STORAGE_TYPE, this.getStoreType().value);
+auditMap.put(OzoneConsts.VOLUME, this.getVolumeName());
+auditMap.put(OzoneConsts.BUCKET, this.getBucketName());
+auditMap.put(OzoneConsts.KEY, this.getKeyName());
+return auditMap;
+  }
+
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
new file mode 100644
index 000..9320fec
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -0,0 +1,284 @@
+package org.apache.hadoop.ozone.client.rpc;
+
+import net.jcip.annotations.NotThreadSafe;
+import or

[hadoop] branch ozone-0.4.1 updated: HDDS-1887. Enable all the blockade test-cases. Contibuted by NandaKumar(#1206).

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new daf869c  HDDS-1887. Enable all the blockade test-cases. Contibuted by 
NandaKumar(#1206).
daf869c is described below

commit daf869ca7916154688bd6c98b62897eabac33798
Author: Nanda kumar 
AuthorDate: Fri Aug 2 23:23:11 2019 +0530

HDDS-1887. Enable all the blockade test-cases. Contibuted by 
NandaKumar(#1206).

(cherry picked from commit eaf350c414e3fdb8f8654cc695b54b471c094ca7)
---
 .../src/test/blockade/ozone/cluster.py | 12 +++-
 .../src/test/blockade/ozone/container.py   |  3 +++
 .../test/blockade/test_blockade_client_failure.py  |  4 
 .../blockade/test_blockade_datanode_isolation.py   | 22 +++---
 .../src/test/blockade/test_blockade_flaky.py   | 10 --
 .../test/blockade/test_blockade_mixed_failure.py   |  2 --
 .../test_blockade_mixed_failure_two_nodes.py   |  2 --
 7 files changed, 33 insertions(+), 22 deletions(-)

diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
index 9888e86..f83ad25 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
@@ -230,6 +230,16 @@ class OzoneCluster(object):
 raise ContainerNotFoundError(container_id)
 return Container(container_id, self)
 
+def is_container_replica_exist(self, container_id, datanode):
+container_parent_path = "%s/hdds/%s/current/containerDir0" % \
+(self.datanode_dir, self.scm_uuid)
+command = "find %s -type f -name '%s.container'" % 
(container_parent_path, container_id)
+exit_code, output = util.run_docker_command(command, datanode)
+container_path = output.strip()
+if not container_path:
+return False
+return True
+
 def get_containers_on_datanode(self, datanode):
 """
 Returns all the container on given datanode.
@@ -284,7 +294,7 @@ class OzoneCluster(object):
 (self.datanode_dir, self.scm_uuid)
 command = "find %s -type f -name '%s.container'" % 
(container_parent_path, container_id)
 exit_code, output = util.run_docker_command(command, datanode)
-if exit_code == 0:
+if output.strip():
 result.append(datanode)
 return result
 
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
index 6e8c344..65c6b2f 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
@@ -26,6 +26,9 @@ class Container:
 self.container_id = container_id
 self.cluster = cluster
 
+def is_on(self, datanode):
+return self.cluster.is_container_replica_exist(self.container_id, 
datanode)
+
 def get_datanode_states(self):
 dns = self.cluster.get_container_datanodes(self.container_id)
 states = []
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
index beb192f..6420564 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py
@@ -19,7 +19,6 @@ import re
 import time
 import logging
 import ozone.util
-import pytest
 
 from ozone.cluster import OzoneCluster
 
@@ -36,8 +35,6 @@ def teardown_function():
 cluster.stop()
 
 
-@pytest.mark.skip(reason="The test-case fails intermittently."
- "See HDDS-1817 for more info.")
 def test_client_failure_isolate_two_datanodes():
 """
 In this test, all DNs are isolated from each other.
@@ -79,7 +76,6 @@ def test_client_failure_isolate_two_datanodes():
 assert file_checksum == key_checksum
 
 
-@pytest.mark.skip(reason="HDDS-1817")
 def test_client_failure_isolate_one_datanode():
 """
 In this test, one of the DNs is isolated from all other nodes.
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/t

[hadoop] branch ozone-0.4.1 updated: HDDS-1964. TestOzoneClientProducer fails with ConnectException

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new b1e4eee  HDDS-1964. TestOzoneClientProducer fails with ConnectException
b1e4eee is described below

commit b1e4eeef59632ca127f6dded46bde3af2ee8558b
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 14 11:40:25 2019 +0200

HDDS-1964. TestOzoneClientProducer fails with ConnectException

Signed-off-by: Anu Engineer 
(cherry picked from commit 82420851645f1644f597e11e14a1d70bb8a7cc23)
---
 .../hadoop/ozone/s3/TestOzoneClientProducer.java|  2 ++
 .../s3gateway/src/test/resources/log4j.properties   | 21 +
 2 files changed, 23 insertions(+)

diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
index 641b1e4..17cf7bc 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.s3;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -78,6 +79,7 @@ public class TestOzoneClientProducer {
 context = Mockito.mock(ContainerRequestContext.class);
 OzoneConfiguration config = new OzoneConfiguration();
 config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true);
+config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
 setupContext();
 producer.setContext(context);
 producer.setOzoneConfiguration(config);
diff --git a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties 
b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
new file mode 100644
index 000..b8ad21d
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
@@ -0,0 +1,21 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} 
(%F:%M(%L)) - %m%n
+
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1966. Wrong expected key ACL in acceptance test

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new b6460f8  HDDS-1966. Wrong expected key ACL in acceptance test
b6460f8 is described below

commit b6460f8011231714b8044dca036c93eaf88c56cd
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 14 14:27:10 2019 +0200

HDDS-1966. Wrong expected key ACL in acceptance test

Signed-off-by: Anu Engineer 
(cherry picked from commit 06d8ac95226ef45aa810668f175a70a0ce9b7cb1)
---
 hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot  | 8 
 .../dist/src/main/smoketest/security/ozone-secure-fs.robot| 6 +++---
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
index 60a3f04..9606567 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
@@ -122,11 +122,11 @@ Test key Acls
 Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : 
. \"ALL\" .
 ${result} = Execute ozone sh key addacl 
${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
 ${result} = Execute ozone sh key removeacl 
${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
 ${result} = Execute ozone sh key setacl 
${protocol}${server}/${volume}/bb1/key2 -al 
user:superuser1:rwxy,group:superuser1:a,user:testuser/s...@example.com:rwxyc
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
\ No newline at end of file
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
+Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
diff --git 
a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot 
b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
index 20f9a4f..ee4688c 100644
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
@@ -87,10 +87,10 @@ Test key Acls
 Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\&qu

[hadoop] branch ozone-0.4.1 updated: HDDS-1955. TestBlockOutputStreamWithFailures#test2DatanodesFailure failing because of assertion error.

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new b4480cc  HDDS-1955. 
TestBlockOutputStreamWithFailures#test2DatanodesFailure failing because of 
assertion error.
b4480cc is described below

commit b4480cca596911f50cd2d41b733be491c1521f32
Author: Mukul Kumar Singh 
AuthorDate: Wed Aug 14 20:36:43 2019 +0530

HDDS-1955. TestBlockOutputStreamWithFailures#test2DatanodesFailure failing 
because of assertion error.

Signed-off-by: Nanda kumar 
(cherry picked from commit 2432356570140ec7f55e1ab56e442c373ff05a16)
---
 .../rpc/TestBlockOutputStreamWithFailures.java   | 20 
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
index 7a69e27..8649837 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
@@ -501,14 +501,18 @@ public class TestBlockOutputStreamWithFailures {
 // and one flush for partial chunk
 key.flush();
 
-// Since, 2 datanodes went down, if the pipeline gets destroyed quickly,
-// it will hit GroupMismatchException else, it will fail with
-// RaftRetryFailureException
-Assert.assertTrue((HddsClientUtils.
-checkForException(blockOutputStream
-.getIoException()) instanceof RaftRetryFailureException)
-|| HddsClientUtils.checkForException(
-blockOutputStream.getIoException()) instanceof GroupMismatchException);
+Throwable ioException = HddsClientUtils.checkForException(
+blockOutputStream.getIoException());
+// Since, 2 datanodes went down,
+// a) if the pipeline gets destroyed quickly it will hit
+//GroupMismatchException.
+// b) will hit close container exception if the container is closed
+//but pipeline is still not destroyed.
+// c) will fail with RaftRetryFailureException if the leader election
+//did not finish before the request retry count finishes.
+Assert.assertTrue(ioException instanceof RaftRetryFailureException
+|| ioException instanceof GroupMismatchException
+|| ioException instanceof ContainerNotOpenException);
 // Make sure the retryCount is reset after the exception is handled
 Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
 // now close the stream, It will update the ack length after watchForCommit


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1955. TestBlockOutputStreamWithFailures#test2DatanodesFailure failing because of assertion error.

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2432356  HDDS-1955. 
TestBlockOutputStreamWithFailures#test2DatanodesFailure failing because of 
assertion error.
2432356 is described below

commit 2432356570140ec7f55e1ab56e442c373ff05a16
Author: Mukul Kumar Singh 
AuthorDate: Wed Aug 14 20:36:43 2019 +0530

HDDS-1955. TestBlockOutputStreamWithFailures#test2DatanodesFailure failing 
because of assertion error.

Signed-off-by: Nanda kumar 
---
 .../rpc/TestBlockOutputStreamWithFailures.java   | 20 
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
index 7a69e27..8649837 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
@@ -501,14 +501,18 @@ public class TestBlockOutputStreamWithFailures {
 // and one flush for partial chunk
 key.flush();
 
-// Since, 2 datanodes went down, if the pipeline gets destroyed quickly,
-// it will hit GroupMismatchException else, it will fail with
-// RaftRetryFailureException
-Assert.assertTrue((HddsClientUtils.
-checkForException(blockOutputStream
-.getIoException()) instanceof RaftRetryFailureException)
-|| HddsClientUtils.checkForException(
-blockOutputStream.getIoException()) instanceof GroupMismatchException);
+Throwable ioException = HddsClientUtils.checkForException(
+blockOutputStream.getIoException());
+// Since, 2 datanodes went down,
+// a) if the pipeline gets destroyed quickly it will hit
+//GroupMismatchException.
+// b) will hit close container exception if the container is closed
+//but pipeline is still not destroyed.
+// c) will fail with RaftRetryFailureException if the leader election
+//did not finish before the request retry count finishes.
+Assert.assertTrue(ioException instanceof RaftRetryFailureException
+|| ioException instanceof GroupMismatchException
+|| ioException instanceof ContainerNotOpenException);
 // Make sure the retryCount is reset after the exception is handled
 Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
 // now close the stream, It will update the ack length after watchForCommit


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1965. Compile error due to leftover ScmBlockLocationTestIngClient file (#1293)

2019-08-14 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 83e452e  HDDS-1965. Compile error due to leftover 
ScmBlockLocationTestIngClient file (#1293)
83e452e is described below

commit 83e452eceac63559c2f5146510ae3e89e310ac1e
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Aug 14 15:09:45 2019 +0200

HDDS-1965. Compile error due to leftover ScmBlockLocationTestIngClient file 
(#1293)
---
 .../ozone/om/ScmBlockLocationTestIngClient.java| 195 -
 1 file changed, 195 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
deleted file mode 100644
index 982e87e..000
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.proto
-.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result;
-import static org.apache.hadoop.hdds.protocol.proto
-.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success;
-import static org.apache.hadoop.hdds.protocol.proto
-.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure;
-
-/**
- * This is a testing client that allows us to intercept calls from OzoneManager
- * to SCM.
- * 
- * TODO: OzoneManager#getScmBlockClient -- so that we can load this class up 
via
- * config setting into OzoneManager. Right now, we just pass this to
- * KeyDeletingService only.
- * 
- * TODO: Move this class to a generic test utils so we can use this class in
- * other Ozone Manager tests.
- */
-public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol 
{
-  private static final Logger LOG =
-  LoggerFactory.getLogger(ScmBlockLocationTestingClient.class);
-  private final String clusterID;
-  private final String scmId;
-
-  // 0 means no calls will fail, +1 means all calls will fail, +2 means every
-  // second call will fail, +3 means every third and so on.
-  private final int failCallsFrequency;
-  private int currentCall = 0;
-
-  /**
-   * If ClusterID or SCMID is blank a per instance ID is generated.
-   *
-   * @param clusterID - String or blank.
-   * @param scmId - String or Blank.
-   * @param failCallsFrequency - Set to 0 for no failures, 1 for always to 
fail,
-   * a positive number for that frequency of failure.
-   */
-  public ScmBlockLocationTestingClient(String clusterID, String scmId,
-  int failCallsFrequency) {
-this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID :
-UUID.randomUUID().toString();
-this.scmId = StringUtils

[hadoop] branch ozone-0.4.1 updated: HDDS-1952. Disable TestMiniChaosOzoneCluster in integration.sh. (#1284)

2019-08-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 569d75b  HDDS-1952. Disable TestMiniChaosOzoneCluster in 
integration.sh. (#1284)
569d75b is described below

commit 569d75bd363bdee626b3ee63fcb9a5576873703a
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 13 19:07:19 2019 +0530

HDDS-1952. Disable TestMiniChaosOzoneCluster in integration.sh. (#1284)

Signed-off-by: Nanda kumar 
(cherry picked from commit 3dc22d6ef12157d804a43c28e029b86d88cc4b5b)
---
 hadoop-ozone/dev-support/checks/integration.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/dev-support/checks/integration.sh 
b/hadoop-ozone/dev-support/checks/integration.sh
index 02d9b8b..8170c2e 100755
--- a/hadoop-ozone/dev-support/checks/integration.sh
+++ b/hadoop-ozone/dev-support/checks/integration.sh
@@ -18,7 +18,8 @@ cd "$DIR/../../.." || exit 1
 
 export MAVEN_OPTS="-Xmx4096m"
 mvn -B install -f pom.ozone.xml -DskipTests
-mvn -B -fn test -f pom.ozone.xml -pl 
:hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools
+mvn -B -fn test -f pom.ozone.xml -pl 
:hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \
+  -Dtest=\!TestMiniChaosOzoneCluster
 module_failed_tests=$(find "." -name 'TEST*.xml' -print0 \
 | xargs -0 -n1 "grep" -l -E "

[hadoop] branch trunk updated: HDDS-1952. Disable TestMiniChaosOzoneCluster in integration.sh. (#1284)

2019-08-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3dc22d6  HDDS-1952. Disable TestMiniChaosOzoneCluster in 
integration.sh. (#1284)
3dc22d6 is described below

commit 3dc22d6ef12157d804a43c28e029b86d88cc4b5b
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 13 19:07:19 2019 +0530

HDDS-1952. Disable TestMiniChaosOzoneCluster in integration.sh. (#1284)

Signed-off-by: Nanda kumar 
---
 hadoop-ozone/dev-support/checks/integration.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/dev-support/checks/integration.sh 
b/hadoop-ozone/dev-support/checks/integration.sh
index 02d9b8b..8170c2e 100755
--- a/hadoop-ozone/dev-support/checks/integration.sh
+++ b/hadoop-ozone/dev-support/checks/integration.sh
@@ -18,7 +18,8 @@ cd "$DIR/../../.." || exit 1
 
 export MAVEN_OPTS="-Xmx4096m"
 mvn -B install -f pom.ozone.xml -DskipTests
-mvn -B -fn test -f pom.ozone.xml -pl 
:hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools
+mvn -B -fn test -f pom.ozone.xml -pl 
:hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \
+  -Dtest=\!TestMiniChaosOzoneCluster
 module_failed_tests=$(find "." -name 'TEST*.xml' -print0 \
 | xargs -0 -n1 "grep" -l -E "

[hadoop] branch ozone-0.4.1 updated: HDDS-1908. TestMultiBlockWritesWithDnFailures is failing (#1282)

2019-08-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 2b7b0aa  HDDS-1908. TestMultiBlockWritesWithDnFailures is failing 
(#1282)
2b7b0aa is described below

commit 2b7b0aaa89db05e685bd9aeaa6853ff070cf3c0a
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Tue Aug 13 12:08:55 2019 +0200

HDDS-1908. TestMultiBlockWritesWithDnFailures is failing (#1282)

(cherry picked from commit 0b507d2ddf132985b43b4e2d3ad11d7fd2d90cd3)
---
 .../client/rpc/TestFailureHandlingByClient.java| 65 +-
 .../rpc/TestMultiBlockWritesWithDnFailures.java| 76 ++
 2 files changed, 67 insertions(+), 74 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 7c014cc..9f95be5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -65,7 +66,6 @@ public class TestFailureHandlingByClient {
   private String volumeName;
   private String bucketName;
   private String keyString;
-  private int maxRetries;
 
   /**
* Create a MiniDFSCluster for testing.
@@ -76,7 +76,6 @@ public class TestFailureHandlingByClient {
*/
   private void init() throws Exception {
 conf = new OzoneConfiguration();
-maxRetries = 100;
 chunkSize = (int) OzoneConsts.MB;
 blockSize = 4 * chunkSize;
 conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5,
@@ -114,7 +113,8 @@ public class TestFailureHandlingByClient {
   /**
* Shutdown MiniDFSCluster.
*/
-  private void shutdown() {
+  @After
+  public void shutdown() {
 if (cluster != null) {
   cluster.shutdown();
 }
@@ -159,61 +159,6 @@ public class TestFailureHandlingByClient {
 OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
 Assert.assertEquals(data.length, keyInfo.getDataSize());
 validateData(keyName, data);
-shutdown();
-  }
-
-
-  @Test
-  public void testMultiBlockWritesWithIntermittentDnFailures()
-  throws Exception {
-startCluster();
-String keyName = UUID.randomUUID().toString();
-OzoneOutputStream key =
-createKey(keyName, ReplicationType.RATIS, 6 * blockSize);
-String data = ContainerTestHelper
-.getFixedLengthString(keyString, blockSize + chunkSize);
-key.write(data.getBytes());
-
-// get the name of a valid container
-Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-KeyOutputStream keyOutputStream =
-(KeyOutputStream) key.getOutputStream();
-List streamEntryList =
-keyOutputStream.getStreamEntries();
-
-// Assert that 6 block will be preallocated
-Assert.assertEquals(6, streamEntryList.size());
-key.write(data.getBytes());
-key.flush();
-long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-BlockID blockId = streamEntryList.get(0).getBlockID();
-ContainerInfo container =
-cluster.getStorageContainerManager().getContainerManager()
-.getContainer(ContainerID.valueof(containerId));
-Pipeline pipeline =
-cluster.getStorageContainerManager().getPipelineManager()
-.getPipeline(container.getPipelineID());
-List datanodes = pipeline.getNodes();
-cluster.shutdownHddsDatanode(datanodes.get(0));
-
-// The write will fail but exception will be handled and length will be
-// updated correctly in OzoneManager once the steam is closed
-key.write(data.getBytes());
-
-// shutdown the second datanode
-cluster.shutdownHddsDatanode(datanodes.get(1));
-key.write(data.getBytes());
-key.close();
-OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-.setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-.setRefreshPipeline(true)
-.build();
-OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize());
-validateData(keyName,
-data.concat(data).concat(data).concat(data).ge

[hadoop] branch ozone-0.4.1 updated: HDDS-1951. Wrong symbolic release name on 0.4.1 branch. (#1273)

2019-08-13 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 7538798  HDDS-1951. Wrong symbolic release name on 0.4.1 branch. 
(#1273)
7538798 is described below

commit 7538798fa047047adc07b4b10d22e67d54401d89
Author: Márton Elek 
AuthorDate: Tue Aug 13 15:25:59 2019 +0530

HDDS-1951. Wrong symbolic release name on 0.4.1 branch. (#1273)

Signed-off-by: Nanda kumar 
---
 hadoop-ozone/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index be626bd..6a05b3a 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -31,7 +31,7 @@
 0.4.1-SNAPSHOT
 0.4.0-2337318-SNAPSHOT
 1.60
-Crater Lake
+Biscayne
 ${ozone.version}
 3.0.0-M1
 4.0


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1888. Add containers to node2container map in SCM as part of ICR processing.

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 033c175  HDDS-1888. Add containers to node2container map in SCM as 
part of ICR processing.
033c175 is described below

commit 033c175cd682545340211cb6e0b08b5a45572e82
Author: Nanda kumar 
AuthorDate: Thu Aug 8 15:22:03 2019 +0530

HDDS-1888. Add containers to node2container map in SCM as part of ICR 
processing.

Signed-off-by: Nanda kumar 
(cherry picked from commit 397a5633af767eee99083c0ac4a8d4282f651911)
---
 .../IncrementalContainerReportHandler.java | 16 -
 .../apache/hadoop/hdds/scm/node/NodeManager.java   | 11 
 .../hadoop/hdds/scm/node/NodeStateManager.java | 15 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  7 +++
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  | 72 +-
 .../hdds/scm/server/StorageContainerManager.java   |  3 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 13 
 .../TestIncrementalContainerReportHandler.java |  9 ++-
 .../testutils/ReplicationNodeManagerMock.java  |  7 +++
 9 files changed, 132 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index a7efb55..3dd3d9d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
 .ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -39,9 +42,13 @@ public class IncrementalContainerReportHandler extends
   private static final Logger LOG = LoggerFactory.getLogger(
   IncrementalContainerReportHandler.class);
 
+  private final NodeManager nodeManager;
+
   public IncrementalContainerReportHandler(
+  final NodeManager nodeManager,
   final ContainerManager containerManager)  {
 super(containerManager, LOG);
+this.nodeManager = nodeManager;
   }
 
   @Override
@@ -53,9 +60,16 @@ public class IncrementalContainerReportHandler extends
 for (ContainerReplicaProto replicaProto :
 report.getReport().getReportList()) {
   try {
-processContainerReplica(report.getDatanodeDetails(), replicaProto);
+final DatanodeDetails dd = report.getDatanodeDetails();
+final ContainerID id = ContainerID.valueof(
+replicaProto.getContainerID());
+nodeManager.addContainer(dd, id);
+processContainerReplica(dd, replicaProto);
   } catch (ContainerNotFoundException e) {
 LOG.warn("Container {} not found!", replicaProto.getContainerID());
+  } catch (NodeNotFoundException ex) {
+LOG.error("Received ICR from unknown datanode {} {}",
+report.getDatanodeDetails(), ex);
   } catch (IOException e) {
 LOG.error("Exception while processing ICR for container {}",
 replicaProto.getContainerID());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 0ccbb82..d8890fb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -130,6 +130,17 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
   void removePipeline(Pipeline pipeline);
 
   /**
+   * Adds the given container to the specified datanode.
+   *
+   * @param datanodeDetails - DatanodeDetails
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *use addDatanodeInContainerMap call.
+   */
+  void addContainer(DatanodeDetails datanodeDetails,
+ContainerID containerId) throws NodeNotFoundException;
+
+  /**
* Remaps datanode to containers mapping to the new set of containers.
* @param datanodeDetails - DatanodeDetails
* @param containerIds - Set of containerIDs
di

[hadoop] branch trunk updated: HDDS-1888. Add containers to node2container map in SCM as part of ICR processing.

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 397a563  HDDS-1888. Add containers to node2container map in SCM as 
part of ICR processing.
397a563 is described below

commit 397a5633af767eee99083c0ac4a8d4282f651911
Author: Nanda kumar 
AuthorDate: Thu Aug 8 15:22:03 2019 +0530

HDDS-1888. Add containers to node2container map in SCM as part of ICR 
processing.

Signed-off-by: Nanda kumar 
---
 .../IncrementalContainerReportHandler.java | 16 -
 .../apache/hadoop/hdds/scm/node/NodeManager.java   | 11 
 .../hadoop/hdds/scm/node/NodeStateManager.java | 15 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  7 +++
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  | 72 +-
 .../hdds/scm/server/StorageContainerManager.java   |  3 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 13 
 .../TestIncrementalContainerReportHandler.java |  9 ++-
 .../testutils/ReplicationNodeManagerMock.java  |  7 +++
 9 files changed, 132 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index a7efb55..3dd3d9d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
 .ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -39,9 +42,13 @@ public class IncrementalContainerReportHandler extends
   private static final Logger LOG = LoggerFactory.getLogger(
   IncrementalContainerReportHandler.class);
 
+  private final NodeManager nodeManager;
+
   public IncrementalContainerReportHandler(
+  final NodeManager nodeManager,
   final ContainerManager containerManager)  {
 super(containerManager, LOG);
+this.nodeManager = nodeManager;
   }
 
   @Override
@@ -53,9 +60,16 @@ public class IncrementalContainerReportHandler extends
 for (ContainerReplicaProto replicaProto :
 report.getReport().getReportList()) {
   try {
-processContainerReplica(report.getDatanodeDetails(), replicaProto);
+final DatanodeDetails dd = report.getDatanodeDetails();
+final ContainerID id = ContainerID.valueof(
+replicaProto.getContainerID());
+nodeManager.addContainer(dd, id);
+processContainerReplica(dd, replicaProto);
   } catch (ContainerNotFoundException e) {
 LOG.warn("Container {} not found!", replicaProto.getContainerID());
+  } catch (NodeNotFoundException ex) {
+LOG.error("Received ICR from unknown datanode {} {}",
+report.getDatanodeDetails(), ex);
   } catch (IOException e) {
 LOG.error("Exception while processing ICR for container {}",
 replicaProto.getContainerID());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 0ccbb82..d8890fb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -130,6 +130,17 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
   void removePipeline(Pipeline pipeline);
 
   /**
+   * Adds the given container to the specified datanode.
+   *
+   * @param datanodeDetails - DatanodeDetails
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *use addDatanodeInContainerMap call.
+   */
+  void addContainer(DatanodeDetails datanodeDetails,
+ContainerID containerId) throws NodeNotFoundException;
+
+  /**
* Remaps datanode to containers mapping to the new set of containers.
* @param datanodeDetails - DatanodeDetails
* @param containerIds - Set of containerIDs
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeSta

[hadoop] branch ozone-0.4.1 updated: HDDS-1925. ozonesecure acceptance test broken by HTTP auth requirement (#1248)

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new ed389df  HDDS-1925. ozonesecure acceptance test broken by HTTP auth 
requirement (#1248)
ed389df is described below

commit ed389df61562a4437530c6a77d206473420a205b
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Thu Aug 8 02:07:15 2019 +0200

HDDS-1925. ozonesecure acceptance test broken by HTTP auth requirement 
(#1248)

(cherry picked from commit ab6a5c9d07a50b49d696b983e1a1cd4f9ef2a44d)
---
 hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh   |  2 +-
 hadoop-ozone/dist/src/main/compose/testlib.sh| 16 +++-
 hadoop-ozone/dist/src/main/smoketest/basic/basic.robot   |  5 ++---
 .../dist/src/main/smoketest/basic/ozone-shell.robot  |  1 +
 hadoop-ozone/dist/src/main/smoketest/commonlib.robot |  6 +-
 .../dist/src/main/smoketest/s3/commonawslib.robot|  1 +
 hadoop-ozone/dist/src/main/smoketest/s3/webui.robot  |  7 ---
 7 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index f13f010..01106b8 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -33,7 +33,7 @@ execute_robot_test scm security
 
 execute_robot_test scm ozonefs/ozonefs.robot
 
-execute_robot_test scm s3
+execute_robot_test s3g s3
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh 
b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 065c53f..462b9fa 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -28,6 +28,20 @@ mkdir -p "$RESULT_DIR"
 #Should be writeable from the docker containers where user is different.
 chmod ogu+w "$RESULT_DIR"
 
+## @description print the number of datanodes up
+## @param the docker-compose file
+count_datanodes() {
+  local compose_file=$1
+
+  local 
jmx_url='http://scm:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo'
+  if [[ "${SECURITY_ENABLED}" == 'true' ]]; then
+docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k 
HTTP/s...@example.com -t /etc/security/keytabs/HTTP.keytab && curl --negotiate 
-u : -s '${jmx_url}'"
+  else
+docker-compose -f "${compose_file}" exec -T scm curl -s "${jmx_url}"
+  fi \
+| jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+}
+
 ## @description wait until datanodes are up (or 30 seconds)
 ## @param the docker-compose file
 ## @param number of datanodes to wait for (default: 3)
@@ -43,7 +57,7 @@ wait_for_datanodes(){
 
  #This line checks the number of HEALTHY datanodes registered in scm over 
the
  # jmx HTTP servlet
- datanodes=$(docker-compose -f "${compose_file}" exec -T scm curl -s 
'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo'
 | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
+ datanodes=$(count_datanodes "${compose_file}")
  if [[ "$datanodes" ]]; then
if [[ ${datanodes} -ge ${datanode_count} ]]; then
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
index 88af097..c750521 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -25,9 +25,8 @@ ${DATANODE_HOST}datanode
 *** Test Cases ***
 
 Check webui static resources
-${result} =Executecurl -s -I 
http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
-   Should contain ${result}200
-${result} =Executecurl -s -I 
http://om:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
+Run Keyword if'${SECURITY_ENABLED}' == 'true'Kinit HTTP user
+${result} =Executecurl --negotiate -u : -s -I 
http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
Should contain ${result}200
 
 Start freon testing
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
index 690fa26..60a3f04 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
@@ -17,6 +17,7 @@
 Documentation   Test ozone shell CLI usage
 Library OperatingSystem
 Resource../commonlib.robot
+Test Setup  Run Keyword if'${SECURITY_

[hadoop] branch ozone-0.4.1 updated: HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL errors.

2019-08-07 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 8f55dc5  HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL 
errors.
8f55dc5 is described below

commit 8f55dc5feec831ea6eef8ea965d838f5ae9ecbb1
Author: Xiaoyu Yao 
AuthorDate: Wed Aug 7 18:34:05 2019 +0530

HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL errors.

Signed-off-by: Nanda kumar 
(cherry picked from commit 70f46746b17c01450d2ef57edb2ce5314ab53308)
---
 .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java| 10 ++
 1 file changed, 10 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 8cede59..4e426ba 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -120,6 +120,8 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeFalse;
+
 import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -2268,6 +2270,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForVolume() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 store.createVolume(volumeName);
 
@@ -2282,6 +2286,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForBucket() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 
@@ -2342,6 +2348,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForKey() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 String key1 = "dir1/dir2" + UUID.randomUUID().toString();
@@ -2404,6 +2412,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForPrefix() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL errors.

2019-08-07 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 70f4674  HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL 
errors.
70f4674 is described below

commit 70f46746b17c01450d2ef57edb2ce5314ab53308
Author: Xiaoyu Yao 
AuthorDate: Wed Aug 7 18:34:05 2019 +0530

HDDS-1907. TestOzoneRpcClientWithRatis is failing with ACL errors.

Signed-off-by: Nanda kumar 
---
 .../hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java| 10 ++
 1 file changed, 10 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 8cede59..4e426ba 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -120,6 +120,8 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeFalse;
+
 import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -2268,6 +2270,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForVolume() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 store.createVolume(volumeName);
 
@@ -2282,6 +2286,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForBucket() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 
@@ -2342,6 +2348,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForKey() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 String key1 = "dir1/dir2" + UUID.randomUUID().toString();
@@ -2404,6 +2412,8 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testNativeAclsForPrefix() throws Exception {
+assumeFalse("Remove this once ACL HA is supported",
+getClass().equals(TestOzoneRpcClientWithRatis.class));
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1905. PipelineActionHandler is not closing the pipeline when close action is received. (#1227)

2019-08-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2662e20  HDDS-1905. PipelineActionHandler is not closing the pipeline 
when close action is received. (#1227)
2662e20 is described below

commit 2662e20580175c0824cbeb480edb05ffc00858e5
Author: Nanda kumar 
AuthorDate: Tue Aug 6 14:36:04 2019 +0530

HDDS-1905. PipelineActionHandler is not closing the pipeline when close 
action is received. (#1227)
---
 .../java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java  | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index 34e974a..8d040f1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -61,6 +61,7 @@ public class PipelineActionHandler
   "Reason : {}", action.getAction(), pipeline,
   report.getDatanodeDetails(),
   action.getClosePipeline().getDetailedReason());
+  pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
 } catch (IOException ioe) {
   LOG.error("Could not execute pipeline action={} pipeline={} {}",
   action, pipelineID, ioe);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1882. TestReplicationManager failed with NPE. (#1197)

2019-08-01 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new bb7ce03  HDDS-1882. TestReplicationManager failed with NPE. (#1197)
bb7ce03 is described below

commit bb7ce03cc06c5dffe089027343f0680eb6d0d738
Author: Sammi Chen 
AuthorDate: Fri Aug 2 03:33:57 2019 +0800

HDDS-1882. TestReplicationManager failed with NPE. (#1197)

(cherry picked from commit e111789aeb005c76e443c96418cd9fddf9bdb8a2)
---
 .../org/apache/hadoop/hdds/scm/container/ReplicationManager.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 0557ea1..a8dff40 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -486,8 +486,11 @@ public class ReplicationManager {
 final List excludeList = replicas.stream()
 .map(ContainerReplica::getDatanodeDetails)
 .collect(Collectors.toList());
-inflightReplication.get(id).stream().map(r -> r.datanode)
-.forEach(excludeList::add);
+List actionList = inflightReplication.get(id);
+if (actionList != null) {
+  actionList.stream().map(r -> r.datanode)
+  .forEach(excludeList::add);
+}
 final List selectedDatanodes = containerPlacement
 .chooseDatanodes(excludeList, null, delta,
 container.getUsedBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1882. TestReplicationManager failed with NPE. (#1197)

2019-08-01 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e111789  HDDS-1882. TestReplicationManager failed with NPE. (#1197)
e111789 is described below

commit e111789aeb005c76e443c96418cd9fddf9bdb8a2
Author: Sammi Chen 
AuthorDate: Fri Aug 2 03:33:57 2019 +0800

HDDS-1882. TestReplicationManager failed with NPE. (#1197)
---
 .../org/apache/hadoop/hdds/scm/container/ReplicationManager.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 0557ea1..a8dff40 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -486,8 +486,11 @@ public class ReplicationManager {
 final List excludeList = replicas.stream()
 .map(ContainerReplica::getDatanodeDetails)
 .collect(Collectors.toList());
-inflightReplication.get(id).stream().map(r -> r.datanode)
-.forEach(excludeList::add);
+List actionList = inflightReplication.get(id);
+if (actionList != null) {
+  actionList.stream().map(r -> r.datanode)
+  .forEach(excludeList::add);
+}
 final List selectedDatanodes = containerPlacement
 .chooseDatanodes(excludeList, null, delta,
 container.getUsedBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1853. Fix failing blockade test-cases. (#1151)

2019-07-24 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 7cdb80b  HDDS-1853. Fix failing blockade test-cases. (#1151)
7cdb80b is described below

commit 7cdb80b16254355d0e7546b559a2862becb67218
Author: Nanda kumar 
AuthorDate: Thu Jul 25 00:02:28 2019 +0530

HDDS-1853. Fix failing blockade test-cases. (#1151)

(cherry picked from commit cb69700ac6b535e108b43f00a61f31712f2cecb2)
---
 .../src/main/compose/ozoneblockade/docker-config   |  6 ++--
 .../src/test/blockade/ozone/cluster.py |  7 +
 .../src/test/blockade/ozone/container.py   | 34 +-
 .../test/blockade/test_blockade_client_failure.py  |  8 +++--
 .../blockade/test_blockade_datanode_isolation.py   |  1 +
 .../test/blockade/test_blockade_mixed_failure.py   |  2 ++
 ...t_blockade_mixed_failure_three_nodes_isolate.py |  2 +-
 .../test_blockade_mixed_failure_two_nodes.py   |  2 ++
 .../test/blockade/test_blockade_scm_isolation.py   |  7 +++--
 9 files changed, 52 insertions(+), 17 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
index 8347998..af72465 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
@@ -32,9 +32,9 @@ OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
 OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s
 OZONE-SITE.XML_hdds.heartbeat.interval=2s
 OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
-OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s
-OZONE-SITE.XML_hdds.scm.replication.event.timeout=7s
-OZONE-SITE.XML_dfs.ratis.server.failure.duration=25s
+OZONE-SITE.XML_hdds.scm.replication.thread.interval=6s
+OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
+OZONE-SITE.XML_dfs.ratis.server.failure.duration=35s
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
index d137793..9888e86 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
@@ -146,6 +146,11 @@ class OzoneCluster(object):
 """
 Start Ozone Cluster in docker containers.
 """
+# check if proper env $HDDS_VERSION and $HADOOP_RUNNER_VERSION
+# are set.
+
+# check if docker is up.
+
 self.__logger__.info("Starting Ozone Cluster")
 if Blockade.blockade_status() == 0:
 Blockade.blockade_destroy()
@@ -263,6 +268,8 @@ class OzoneCluster(object):
 
 # Reading the container file.
 exit_code, output = util.run_docker_command("cat " + container_path, 
datanode)
+if exit_code != 0:
+raise ContainerNotFoundError("Container not found!")
 data = output.split("\n")
 # Reading key value pairs from container file.
 key_value = [x for x in data if re.search(r"\w+:\s\w+", x)]
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
index ffb6a3d..6e8c344 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
@@ -83,20 +83,37 @@ class Container:
 for dn in dns:
 if self.cluster.get_container_state(self.container_id, dn) == 
'CLOSED':
 return True
-else:
-return False
+return False
 
 util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 
10)
 if not predicate():
 raise Exception("None of the container replica is closed!")
 
-def wait_until_all_replicas_are_closed(self):
+def wait_until_two_replicas_are_closed(self):
 def predicate():
 dns = self.cluster.get_container_datanodes(self.container_id)
+closed_count = 0
 for dn in dns:
-if self.cluster.get_container_state(self.container_id, dn) != 
'CLOSED':
-return False
-return True
+if self.cluster.get_container_state(self.container_id, dn) == 
'CLOSED':
+closed_count = close

[hadoop] branch trunk updated: HDDS-1853. Fix failing blockade test-cases. (#1151)

2019-07-24 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cb69700  HDDS-1853. Fix failing blockade test-cases. (#1151)
cb69700 is described below

commit cb69700ac6b535e108b43f00a61f31712f2cecb2
Author: Nanda kumar 
AuthorDate: Thu Jul 25 00:02:28 2019 +0530

HDDS-1853. Fix failing blockade test-cases. (#1151)
---
 .../src/main/compose/ozoneblockade/docker-config   |  6 ++--
 .../src/test/blockade/ozone/cluster.py |  7 +
 .../src/test/blockade/ozone/container.py   | 34 +-
 .../test/blockade/test_blockade_client_failure.py  |  8 +++--
 .../blockade/test_blockade_datanode_isolation.py   |  1 +
 .../test/blockade/test_blockade_mixed_failure.py   |  2 ++
 ...t_blockade_mixed_failure_three_nodes_isolate.py |  2 +-
 .../test_blockade_mixed_failure_two_nodes.py   |  2 ++
 .../test/blockade/test_blockade_scm_isolation.py   |  7 +++--
 9 files changed, 52 insertions(+), 17 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
index 8347998..af72465 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
@@ -32,9 +32,9 @@ OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
 OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s
 OZONE-SITE.XML_hdds.heartbeat.interval=2s
 OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
-OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s
-OZONE-SITE.XML_hdds.scm.replication.event.timeout=7s
-OZONE-SITE.XML_dfs.ratis.server.failure.duration=25s
+OZONE-SITE.XML_hdds.scm.replication.thread.interval=6s
+OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
+OZONE-SITE.XML_dfs.ratis.server.failure.duration=35s
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
index d137793..9888e86 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py
@@ -146,6 +146,11 @@ class OzoneCluster(object):
 """
 Start Ozone Cluster in docker containers.
 """
+# check if proper env $HDDS_VERSION and $HADOOP_RUNNER_VERSION
+# are set.
+
+# check if docker is up.
+
 self.__logger__.info("Starting Ozone Cluster")
 if Blockade.blockade_status() == 0:
 Blockade.blockade_destroy()
@@ -263,6 +268,8 @@ class OzoneCluster(object):
 
 # Reading the container file.
 exit_code, output = util.run_docker_command("cat " + container_path, 
datanode)
+if exit_code != 0:
+raise ContainerNotFoundError("Container not found!")
 data = output.split("\n")
 # Reading key value pairs from container file.
 key_value = [x for x in data if re.search(r"\w+:\s\w+", x)]
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
index ffb6a3d..6e8c344 100644
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
+++ 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py
@@ -83,20 +83,37 @@ class Container:
 for dn in dns:
 if self.cluster.get_container_state(self.container_id, dn) == 
'CLOSED':
 return True
-else:
-return False
+return False
 
 util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 
10)
 if not predicate():
 raise Exception("None of the container replica is closed!")
 
-def wait_until_all_replicas_are_closed(self):
+def wait_until_two_replicas_are_closed(self):
 def predicate():
 dns = self.cluster.get_container_datanodes(self.container_id)
+closed_count = 0
 for dn in dns:
-if self.cluster.get_container_state(self.container_id, dn) != 
'CLOSED':
-return False
-return True
+if self.cluster.get_container_state(self.container_id, dn) == 
'CLOSED':
+closed_count = closed_count + 1
+if closed_count > 1:
+return True
+return False
+
+ 

[hadoop] 08/08: HDDS-1766. ContainerStateMachine is unable to increment lastAppliedTermIndex. Contributed by Mukul Kumar Singh. (#1072)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9b3c034695d55c1edd7b5874b6929781fe35318e
Author: Mukul Kumar Singh 
AuthorDate: Sun Jul 14 10:53:51 2019 +0530

HDDS-1766. ContainerStateMachine is unable to increment 
lastAppliedTermIndex. Contributed by  Mukul Kumar Singh. (#1072)

(cherry picked from commit 0976f6fc30ed8bb774d823f09c58cea54be05ae7)
---
 .../server/ratis/ContainerStateMachine.java| 40 --
 1 file changed, 21 insertions(+), 19 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index f4a8008..87826e6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.server.RaftServer;
@@ -195,17 +196,16 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 if (snapshot == null) {
   TermIndex empty =
   TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX);
-  LOG.info(
-  "The snapshot info is null." + "Setting the last applied index to:"
-  + empty);
+  LOG.info("{}: The snapshot info is null. Setting the last applied index" 
+
+  "to:{}", gid, empty);
   setLastAppliedTermIndex(empty);
-  return RaftLog.INVALID_LOG_INDEX;
+  return empty.getIndex();
 }
 
 final File snapshotFile = snapshot.getFile().getPath().toFile();
 final TermIndex last =
 SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile);
-LOG.info("Setting the last applied index to " + last);
+LOG.info("{}: Setting the last applied index to {}", gid, last);
 setLastAppliedTermIndex(last);
 
 // initialize the dispatcher with snapshot so that it build the missing
@@ -241,18 +241,20 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   @Override
   public long takeSnapshot() throws IOException {
 TermIndex ti = getLastAppliedTermIndex();
-LOG.info("Taking snapshot at termIndex:" + ti);
+long startTime = Time.monotonicNow();
 if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
   final File snapshotFile =
   storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
-  LOG.info("Taking a snapshot to file {}", snapshotFile);
+  LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile);
   try (FileOutputStream fos = new FileOutputStream(snapshotFile)) {
 persistContainerSet(fos);
   } catch (IOException ioe) {
-LOG.warn("Failed to write snapshot file \"" + snapshotFile
-+ "\", last applied index=" + ti);
+LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
+snapshotFile);
 throw ioe;
   }
+  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}",
+  gid, ti, snapshotFile, (Time.monotonicNow() - startTime));
   return ti.getIndex();
 }
 return -1;
@@ -326,7 +328,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 
   private ContainerCommandResponseProto dispatchCommand(
   ContainerCommandRequestProto requestProto, DispatcherContext context) {
-LOG.trace("dispatch {} containerID={} pipelineID={} traceID={}",
+LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid,
 requestProto.getCmdType(), requestProto.getContainerID(),
 requestProto.getPipelineID(), requestProto.getTraceID());
 if (isBlockTokenEnabled) {
@@ -344,7 +346,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 }
 ContainerCommandResponseProto response =
 dispatcher.dispatch(requestProto, context);
-LOG.trace("response {}", response);
+LOG.trace("{}: response {}", gid, response);
 return response;
   }
 
@@ -384,18 +386,18 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 .supplyAsync(() -> ru

[hadoop] 01/08: HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 758d756a8e08c8928df83093439ba9e434ac162b
Author: Vivek Ratnavel Subramanian 
AuthorDate: Mon Jul 8 21:06:50 2019 -0700

HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and 
containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.

(cherry picked from commit 82d88a8d30790c5841fc4f71ea39cc12b470c41f)
---
 .../org/apache/hadoop/ozone/common/Storage.java|   6 +-
 .../common/src/main/resources/ozone-default.xml|   6 +-
 .../hadoop/ozone/om/OzoneManagerStarter.java   |   2 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   2 +
 .../ozone/recon/schema/StatsSchemaDefinition.java  |  61 
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   8 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  22 ++
 .../ozone/recon/api/ContainerKeyService.java   |  22 +-
 .../ozone/recon/api/types/ContainersResponse.java  |  94 ++
 .../hadoop/ozone/recon/api/types/KeysResponse.java |  93 ++
 .../recon/spi/ContainerDBServiceProvider.java  |  58 +++-
 .../spi/impl/ContainerDBServiceProviderImpl.java   | 137 -
 .../recon/spi/impl/ReconContainerDBProvider.java   |   4 +
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  |  57 +++-
 .../recon/GuiceInjectorUtilsForTestsImpl.java} |  25 +-
 .../ozone/recon/api/TestContainerKeyService.java   | 186 +++-
 .../recon/persistence/AbstractSqlDatabaseTest.java |  12 +-
 .../persistence/TestStatsSchemaDefinition.java | 147 ++
 .../impl/TestContainerDBServiceProviderImpl.java   | 326 +
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  53 +---
 .../recon/tasks/TestContainerKeyMapperTask.java| 127 
 .../recon/types/GuiceInjectorUtilsForTests.java| 117 
 22 files changed, 1209 insertions(+), 356 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index f393ed9..7992dad 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -81,7 +81,7 @@ public abstract class Storage {
 
   /**
* Gets the path of the Storage dir.
-   * @return Stoarge dir path
+   * @return Storage dir path
*/
   public String getStorageDir() {
 return storageDir.getAbsoluteFile().toString();
@@ -117,7 +117,7 @@ public abstract class Storage {
   }
 
   /**
-   * Retreives the storageInfo instance to read/write the common
+   * Retrieves the storageInfo instance to read/write the common
* version file properties.
* @return the instance of the storageInfo class
*/
@@ -128,7 +128,7 @@ public abstract class Storage {
   abstract protected Properties getNodeProperties();
 
   /**
-   * Sets the Node properties spaecific to OM/SCM.
+   * Sets the Node properties specific to OM/SCM.
*/
   private void setNodeProperties() {
 Properties nodeProperties = getNodeProperties();
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index c10aa33..219bd29 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -659,7 +659,7 @@
 
 OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED
 
-  This setting is the fallback location for SCM, OM and DataNodes
+  This setting is the fallback location for SCM, OM, Recon and DataNodes
   to store their metadata. This setting may be used only in test/PoC
   clusters to simplify configuration.
 
@@ -2457,7 +2457,7 @@
 
 OZONE, RECON
 
-  Ozone Recon datbase password.
+  Ozone Recon database password.
 
   
   
@@ -2484,7 +2484,7 @@
 
   The max active connections to the SQL database. The default SQLite
   database only allows single active connection, set this to a
-  resonable value like 10, for external production database.
+  reasonable value like 10, for external production database.
 
   
   
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 8a0c317..fa229aa 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -60,7 +60,7 @@ public class OzoneManagerStarter extends GenericCli {
   public Void call() throws Exception {
 /**
  * This method is invoked only when a sub-command is not called. Therefore
- * if someone runs "ozo

[hadoop] 03/08: HDDS-1611. Evaluate ACL on volume bucket key and prefix to authorize access. Contributed by Ajay Kumar. (#973)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 84cdacbb2aabd915dc708322d9978b631f58bf0a
Author: Ajay Yadav <7813154+ajay...@users.noreply.github.com>
AuthorDate: Wed Jul 10 11:03:58 2019 -0700

HDDS-1611. Evaluate ACL on volume bucket key and prefix to authorize 
access. Contributed by Ajay Kumar. (#973)

(cherry picked from commit cdb20adfcce22beb4f232f91822b190119d098ce)
---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   6 +
 .../hdfs/server/diskbalancer/TestDiskBalancer.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   1 +
 .../java/org/apache/hadoop/ozone/OzoneAcl.java |  46 +-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java |  80 +++-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  15 +-
 .../ozone/security/acl/IAccessAuthorizer.java  |  15 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  | 102 -
 .../src/main/proto/OzoneManagerProtocol.proto  |  13 +-
 .../org/apache/hadoop/ozone/TestOzoneAcls.java |   8 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 +
 .../src/main/compose/ozonesecure/docker-config |   5 +-
 .../dist/src/main/smoketest/__init__.robot |   2 +-
 .../src/main/smoketest/basic/ozone-shell.robot |  21 +-
 .../dist/src/main/smoketest/commonlib.robot|   5 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |   2 +-
 .../dist/src/main/smoketest/createmrenv.robot  |   2 +-
 hadoop-ozone/dist/src/main/smoketest/kinit.robot   |   2 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |  12 +-
 .../dist/src/main/smoketest/s3/awss3.robot |   2 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |  50 ++-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +
 .../client/rpc/TestOzoneRpcClientAbstract.java |   5 +-
 .../org/apache/hadoop/ozone/om/TestOmAcls.java |  12 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java |   6 +-
 .../apache/hadoop/ozone/om/TestOzoneManager.java   |  13 +-
 .../security/acl/TestOzoneNativeAuthorizer.java| 464 +
 .../apache/hadoop/ozone/web/client/TestVolume.java |   4 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  46 ++
 .../java/org/apache/hadoop/ozone/om/IOzoneAcl.java |  13 +
 .../org/apache/hadoop/ozone/om/KeyManager.java |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  98 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 141 +--
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  39 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java   |  24 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  66 ++-
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  | 120 ++
 .../hadoop/ozone/security/acl/package-info.java|  22 +
 .../web/ozShell/volume/ListVolumeHandler.java  |   2 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  |  19 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java|   2 +-
 43 files changed, 1315 insertions(+), 185 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 98b3b56..1c82a7a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -120,6 +120,10 @@ public final class OzoneConfigKeys {
* */
   public static final String OZONE_ADMINISTRATORS =
   "ozone.administrators";
+  /**
+   * Used only for testing purpose. Results in making every user an admin.
+   * */
+  public static final String OZONE_ADMINISTRATORS_WILDCARD = "*";
 
   public static final String OZONE_CLIENT_PROTOCOL =
   "ozone.client.protocol";
@@ -390,6 +394,8 @@ public final class OzoneConfigKeys {
   "ozone.acl.authorizer.class";
   public static final String OZONE_ACL_AUTHORIZER_CLASS_DEFAULT =
   "org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer";
+  public static final String OZONE_ACL_AUTHORIZER_CLASS_NATIVE =
+  "org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer";
   public static final String OZONE_ACL_ENABLED =
   "ozone.acl.enabled";
   public static final boolean OZONE_ACL_ENABLED_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index b400391..931bdb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-

[hadoop] 04/08: HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and prefix to authorize access. Contributed by Ajay Kumar. (#973)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 70c42faca9bc9f2247f95affdc1ab133d76fb850
Author: Anu Engineer 
AuthorDate: Wed Jul 10 11:28:18 2019 -0700

HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and prefix to 
authorize access. Contributed by Ajay Kumar. (#973)

Fixes a build break in ozone.

(cherry picked from commit 6872efcabfd8fad5658642baa26df0e74399348b)
---
 .../main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index d8afb91..bd90b2d 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,10 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
@@ -632,8 +629,8 @@ public class BasicOzoneFileSystem extends FileSystem {
 String key = pathToKey(qualifiedPath);
 FileStatus fileStatus = null;
 try {
-  fileStatus = adapter.getFileStatus(key)
-.makeQualified(uri, qualifiedPath, getUsername(), getUsername());
+  fileStatus = convertFileStatus(
+  adapter.getFileStatus(key, uri, qualifiedPath, getUsername()));
 } catch (OMException ex) {
   if (ex.getResult().equals(OMException.ResultCodes.KEY_NOT_FOUND)) {
 throw new FileNotFoundException("File not found. path:" + f);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 07/08: HDDS-1384. TestBlockOutputStreamWithFailures is failing

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9be196740cd23ce5123895f7523b7dd0ec4dedcd
Author: Márton Elek 
AuthorDate: Thu Jul 11 12:46:39 2019 +0200

HDDS-1384. TestBlockOutputStreamWithFailures is failing

Closes #1029

(cherry picked from commit 9119ed07ff32143b548316bf69c49695196f8422)
---
 .../common/transport/server/XceiverServerGrpc.java | 37 +++---
 .../transport/server/ratis/XceiverServerRatis.java | 38 ---
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  | 56 --
 3 files changed, 83 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 6fe8fd4..e224045 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -21,6 +21,7 @@ package 
org.apache.hadoop.ozone.container.common.transport.server;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
@@ -51,9 +52,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.SocketAddress;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
@@ -70,6 +68,8 @@ public final class XceiverServerGrpc extends XceiverServer {
   private Server server;
   private final ContainerDispatcher storageContainer;
   private boolean isStarted;
+  private DatanodeDetails datanodeDetails;
+
 
   /**
* Constructs a Grpc server class.
@@ -83,25 +83,15 @@ public final class XceiverServerGrpc extends XceiverServer {
 Preconditions.checkNotNull(conf);
 
 this.id = datanodeDetails.getUuid();
+this.datanodeDetails = datanodeDetails;
 this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
 OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-// Get an available port on current node and
-// use that as the container port
+
 if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
 OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
-  try (ServerSocket socket = new ServerSocket()) {
-socket.setReuseAddress(true);
-SocketAddress address = new InetSocketAddress(0);
-socket.bind(address);
-this.port = socket.getLocalPort();
-LOG.info("Found a free port for the server : {}", this.port);
-  } catch (IOException e) {
-LOG.error("Unable find a random free port for the server, "
-+ "fallback to use default port {}", this.port, e);
-  }
+  this.port = 0;
 }
-datanodeDetails.setPort(
-DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
+
 NettyServerBuilder nettyServerBuilder =
 ((NettyServerBuilder) ServerBuilder.forPort(port))
 .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
@@ -164,6 +154,19 @@ public final class XceiverServerGrpc extends XceiverServer 
{
   public void start() throws IOException {
 if (!isStarted) {
   server.start();
+  int realPort = server.getPort();
+
+  if (port == 0) {
+LOG.info("{} {} is started using port {}", getClass().getSimpleName(),
+this.id, realPort);
+port = realPort;
+  }
+
+  //register the real port to the datanode details.
+  datanodeDetails.setPort(DatanodeDetails
+  .newPort(Name.STANDALONE,
+  realPort));
+
   isStarted = true;
 }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 246d58a..23c4ea5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -66,6 +66,7 @@ import or

[hadoop] 05/08: HDDS-1784. Missing HostName and IpAddress in the response of register command.

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f6135a5e1928bbfacaa7b7e1020b8f2a33edebf3
Author: Nanda kumar 
AuthorDate: Thu Jul 11 19:01:06 2019 +0530

HDDS-1784. Missing HostName and IpAddress in the response of register 
command.

(cherry picked from commit 0f399b0d57875c64f49df3942743111905fd2198)
---
 .../org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java| 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index f07db62..cd78d3d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -271,6 +271,8 @@ public class SCMDatanodeProtocolServer implements
 .setErrorCode(cmd.getError())
 .setClusterID(cmd.getClusterID())
 .setDatanodeUUID(cmd.getDatanodeUUID())
+.setIpAddress(cmd.getIpAddress())
+.setHostname(cmd.getHostName())
 .build();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/08: HDDS-1718. Increase Ratis Leader election timeout default. Contributed by Aravindan Vijayan & Siddharth Wagle. (#1062)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 31556ffd202d4fca85ddbf8800333ee1f6163f47
Author: Mukul Kumar Singh 
AuthorDate: Tue Jul 9 23:17:50 2019 +0530

HDDS-1718. Increase Ratis Leader election timeout default. Contributed by 
Aravindan Vijayan & Siddharth Wagle. (#1062)

(cherry picked from commit 96d05559132630288126d9e66a66ac31617334a4)
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 2 +-
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 4 ++--
 .../statemachine/commandhandler/TestCloseContainerCommandHandler.java | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index a987399..1213dee 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -147,7 +147,7 @@ public final class ScmConfigKeys {
   "dfs.ratis.leader.election.minimum.timeout.duration";
   public static final TimeDuration
   DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-  TimeDuration.valueOf(1, TimeUnit.SECONDS);
+  TimeDuration.valueOf(5, TimeUnit.SECONDS);
 
   public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
   "dfs.ratis.snapshot.threshold";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 219bd29..27b02e6 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -270,10 +270,10 @@
   
   
 dfs.ratis.leader.election.minimum.timeout.duration
-1s
+5s
 OZONE, RATIS, MANAGEMENT
 The minimum timeout duration for ratis leader election.
-Default is 1s.
+Default is 5s.
 
   
   
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 1f6ed86..f802470 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -298,7 +298,7 @@ public class TestCloseContainerCommandHandler {
 maxOutstandingRequests,
 TimeDuration.valueOf(3, TimeUnit.SECONDS));
 Assert.assertTrue(client.groupAdd(group, peer.getId()).isSuccess());
-Thread.sleep(2000);
+Thread.sleep(1);
 final ContainerID containerId = ContainerID.valueof(
 random.nextLong() & Long.MAX_VALUE);
 ContainerProtos.ContainerCommandRequestProto.Builder request =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 06/08: HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. Contributed by Supratim Deka (#1081)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 75af39398e914960aafb09e2b7ea05486a1b40eb
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Fri Jul 12 10:31:48 2019 +0530

HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. 
Contributed by Supratim Deka (#1081)

(cherry picked from commit 738fab3bff04ab0128146b401b4978d3d60ec97f)
---
 .../java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java  | 8 
 .../apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java| 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 7b5c467..0ecfdac 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -467,4 +467,12 @@ public class ContainerInfo implements 
Comparator,
 return state == HddsProtos.LifeCycleState.OPEN
 || state == HddsProtos.LifeCycleState.CLOSING;
   }
+
+  /**
+   * Check if a container is in Open state, but Close has not been initiated.
+   * @return true if Open, false otherwise.
+   */
+  public boolean isOpenNotClosing() {
+return state == HddsProtos.LifeCycleState.OPEN;
+  }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 6a875e6..769f3ef 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -238,7 +238,7 @@ public class SCMClientProtocolServer implements
   getContainer(id);
   final Pipeline pipeline;
 
-  if (container.isOpen()) {
+  if (container.isOpenNotClosing()) {
 // Ratis pipeline
 pipeline = scm.getPipelineManager()
 .getPipeline(container.getPipelineID());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (5b99872 -> 9b3c034)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 5b99872  HDDS-1791. Update network-tests/src/test/blockade/README.md 
file
 new 758d756  HDDS-1705. Recon: Add estimatedTotalCount to the response of 
containers and containers/{id} endpoints. Contributed by Vivek Ratnavel 
Subramanian.
 new 31556ff  HDDS-1718. Increase Ratis Leader election timeout default. 
Contributed by Aravindan Vijayan & Siddharth Wagle. (#1062)
 new 84cdacb  HDDS-1611. Evaluate ACL on volume bucket key and prefix to 
authorize access. Contributed by Ajay Kumar. (#973)
 new 70c42fa  HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and 
prefix to authorize access. Contributed by Ajay Kumar. (#973)
 new f6135a5  HDDS-1784. Missing HostName and IpAddress in the response of 
register command.
 new 75af393  HDDS-1754. getContainerWithPipeline fails with 
PipelineNotFoundException. Contributed by Supratim Deka (#1081)
 new 9be1967  HDDS-1384. TestBlockOutputStreamWithFailures is failing
 new 9b3c034  HDDS-1766. ContainerStateMachine is unable to increment 
lastAppliedTermIndex. Contributed by  Mukul Kumar Singh. (#1072)

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   2 +-
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   8 +
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   6 +
 .../org/apache/hadoop/ozone/common/Storage.java|   6 +-
 .../common/src/main/resources/ozone-default.xml|  10 +-
 .../common/transport/server/XceiverServerGrpc.java |  37 +-
 .../server/ratis/ContainerStateMachine.java|  40 +-
 .../transport/server/ratis/XceiverServerRatis.java |  38 +-
 .../TestCloseContainerCommandHandler.java  |   2 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |   2 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   2 +
 .../hdfs/server/diskbalancer/TestDiskBalancer.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   1 +
 .../java/org/apache/hadoop/ozone/OzoneAcl.java |  46 +-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java |  80 +++-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  15 +-
 .../ozone/security/acl/IAccessAuthorizer.java  |  15 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  | 102 -
 .../src/main/proto/OzoneManagerProtocol.proto  |  13 +-
 .../org/apache/hadoop/ozone/TestOzoneAcls.java |   8 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 +
 .../src/main/compose/ozonesecure/docker-config |   5 +-
 .../dist/src/main/smoketest/__init__.robot |   2 +-
 .../src/main/smoketest/basic/ozone-shell.robot |  21 +-
 .../dist/src/main/smoketest/commonlib.robot|   5 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |   2 +-
 .../dist/src/main/smoketest/createmrenv.robot  |   2 +-
 hadoop-ozone/dist/src/main/smoketest/kinit.robot   |   2 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |  12 +-
 .../dist/src/main/smoketest/s3/awss3.robot |   2 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |  50 ++-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |  56 ++-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +
 .../client/rpc/TestOzoneRpcClientAbstract.java |   5 +-
 .../org/apache/hadoop/ozone/om/TestOmAcls.java |  12 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java |   6 +-
 .../apache/hadoop/ozone/om/TestOzoneManager.java   |  13 +-
 .../security/acl/TestOzoneNativeAuthorizer.java| 464 +
 .../apache/hadoop/ozone/web/client/TestVolume.java |   4 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  46 ++
 .../java/org/apache/hadoop/ozone/om/IOzoneAcl.java |  13 +
 .../org/apache/hadoop/ozone/om/KeyManager.java |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  98 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 141 +--
 .../hadoop/ozone/om/OzoneManagerStarter.java   |   2 +-
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  39 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java   |  24 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  66 ++-
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  | 120 ++
 .../hadoop/ozone/security/acl/package-info.java|   2 +-
 .../web/ozShell/volume/ListVolumeHandler.java  |   2 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   2 +
 ...aDefinition.java => StatsSchemaDefinition.java} |  38 +-
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   

[hadoop] branch ozone-0.4.1 updated: HDDS-1752. ConcurrentModificationException while handling DeadNodeHandler event. (#1080)

2019-07-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 24ed608   HDDS-1752. ConcurrentModificationException while handling 
DeadNodeHandler event. (#1080)
24ed608 is described below

commit 24ed608afc79591888d2218e44e2e6b5c7770361
Author: Hrishikesh Gadre 
AuthorDate: Fri Jul 12 01:45:48 2019 -0700

 HDDS-1752. ConcurrentModificationException while handling DeadNodeHandler 
event. (#1080)

(cherry picked from commit 14c43f85de86c8547dd23e228584cc5e83449870)
---
 .../java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
index 20fe797..f8633f9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 
-import java.util.HashSet;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * This data structure maintains the list of pipelines which the given
@@ -59,7 +59,7 @@ public class Node2PipelineMap extends 
Node2ObjectsMap {
   public synchronized void addPipeline(Pipeline pipeline) {
 for (DatanodeDetails details : pipeline.getNodes()) {
   UUID dnId = details.getUuid();
-  dn2ObjectMap.computeIfAbsent(dnId, k -> new HashSet<>())
+  dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet())
   .add(pipeline.getId());
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1752. ConcurrentModificationException while handling DeadNodeHandler event. (#1080)

2019-07-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 14c43f8   HDDS-1752. ConcurrentModificationException while handling 
DeadNodeHandler event. (#1080)
14c43f8 is described below

commit 14c43f85de86c8547dd23e228584cc5e83449870
Author: Hrishikesh Gadre 
AuthorDate: Fri Jul 12 01:45:48 2019 -0700

 HDDS-1752. ConcurrentModificationException while handling DeadNodeHandler 
event. (#1080)
---
 .../java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
index 20fe797..f8633f9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 
-import java.util.HashSet;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * This data structure maintains the list of pipelines which the given
@@ -59,7 +59,7 @@ public class Node2PipelineMap extends 
Node2ObjectsMap {
   public synchronized void addPipeline(Pipeline pipeline) {
 for (DatanodeDetails details : pipeline.getNodes()) {
   UUID dnId = details.getUuid();
-  dn2ObjectMap.computeIfAbsent(dnId, k -> new HashSet<>())
+  dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet())
   .add(pipeline.getId());
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. Contributed by Supratim Deka (#1081)

2019-07-11 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 738fab3  HDDS-1754. getContainerWithPipeline fails with 
PipelineNotFoundException. Contributed by Supratim Deka (#1081)
738fab3 is described below

commit 738fab3bff04ab0128146b401b4978d3d60ec97f
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Fri Jul 12 10:31:48 2019 +0530

HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. 
Contributed by Supratim Deka (#1081)
---
 .../java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java  | 8 
 .../apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java| 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 7b5c467..0ecfdac 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -467,4 +467,12 @@ public class ContainerInfo implements 
Comparator,
 return state == HddsProtos.LifeCycleState.OPEN
 || state == HddsProtos.LifeCycleState.CLOSING;
   }
+
+  /**
+   * Check if a container is in Open state, but Close has not been initiated.
+   * @return true if Open, false otherwise.
+   */
+  public boolean isOpenNotClosing() {
+return state == HddsProtos.LifeCycleState.OPEN;
+  }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 6a875e6..769f3ef 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -238,7 +238,7 @@ public class SCMClientProtocolServer implements
   getContainer(id);
   final Pipeline pipeline;
 
-  if (container.isOpen()) {
+  if (container.isOpenNotClosing()) {
 // Ratis pipeline
 pipeline = scm.getPipelineManager()
 .getPipeline(container.getPipelineID());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1201. Reporting corrupted containers info to SCM (#1032)

2019-07-11 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new acef5e0  HDDS-1201. Reporting corrupted containers info to SCM (#1032)
acef5e0 is described below

commit acef5e0cec944d4e1d11541372f85487ec4d1d81
Author: Hrishikesh Gadre 
AuthorDate: Thu Jul 11 02:19:58 2019 -0700

HDDS-1201. Reporting corrupted containers info to SCM (#1032)
---
 .../container/common/interfaces/Container.java |   4 +-
 .../states/endpoint/HeartbeatEndpointTask.java |   6 +-
 .../container/keyvalue/KeyValueContainer.java  |  20 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  46 ++---
 .../ozone/container/keyvalue/KeyValueHandler.java  |  13 +-
 .../container/ozoneimpl/ContainerController.java   |  12 ++
 .../container/ozoneimpl/ContainerScrubber.java |  31 +--
 .../keyvalue/TestKeyValueContainerCheck.java   |  11 +-
 .../IncrementalContainerReportHandler.java |   2 +
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java | 211 +
 10 files changed, 288 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 1fcaaf5..10fec60 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -154,6 +154,8 @@ public interface Container extends RwLock {
 
   /**
* check and report the structural integrity of the container.
+   * @return true if the integrity checks pass
+   * false otherwise
*/
-  void check() throws StorageContainerException;
+  boolean check();
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 513043f..c50f457 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -135,8 +135,12 @@ public class HeartbeatEndpointTask
   addReports(requestBuilder);
   addContainerActions(requestBuilder);
   addPipelineActions(requestBuilder);
+  SCMHeartbeatRequestProto request = requestBuilder.build();
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Sending heartbeat message :: {}", request.toString());
+  }
   SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
-  .sendHeartbeat(requestBuilder.build());
+  .sendHeartbeat(request);
   processResponse(reponse, datanodeDetailsProto);
   rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
   rpcEndpoint.zeroMissedCount();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 6a1ca86..a818b51 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -648,7 +648,7 @@ public class KeyValueContainer implements 
Container {
   /**
* run integrity checks on the Container metadata.
*/
-  public void check() throws StorageContainerException {
+  public boolean check() {
 ContainerCheckLevel level = ContainerCheckLevel.NO_CHECK;
 long containerId = containerData.getContainerID();
 
@@ -671,14 +671,12 @@ public class KeyValueContainer implements 
Container {
   containerData.getState());
   break;
 default:
-  throw new StorageContainerException(
-  "Invalid Container state found for Container : " + containerData
-  .getContainerID(), INVALID_CONTAINER_STATE);
+  break;
 }
 
 if (level == ContainerCheckLevel.NO_CHECK) {
   LOG.debug("Skipping integrity checks for Container Id : {}", 
containerId);
-  return;
+  return true;
 }
 
 KeyValueContainerCheck checker =
@@ -687,17 +685,11 @@ public class KeyValueContainer implements 
Container {
 
 switch (level) {
 case FAST_CHECK:
-  checker.fastCheck();
-  break;
+  return checker.fastCheck();
 case FULL_CHECK:
-  checker.fullCheck();
-  break;
-c

[hadoop] branch ozone-0.4.1 updated: Preparing for Ozone 0.4.1 development

2019-07-02 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 524a75f  Preparing for Ozone 0.4.1 development
524a75f is described below

commit 524a75fa70441f1c432c3381a4553e9072b8a5aa
Author: Nanda kumar 
AuthorDate: Tue Jul 2 19:08:45 2019 +0530

Preparing for Ozone 0.4.1 development
---
 hadoop-hdds/client/pom.xml| 4 ++--
 hadoop-hdds/common/pom.xml| 6 +++---
 hadoop-hdds/config/pom.xml| 4 ++--
 hadoop-hdds/container-service/pom.xml | 4 ++--
 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md| 4 ++--
 hadoop-hdds/docs/pom.xml  | 4 ++--
 hadoop-hdds/framework/pom.xml | 4 ++--
 hadoop-hdds/pom.xml   | 4 ++--
 hadoop-hdds/server-scm/pom.xml| 4 ++--
 hadoop-hdds/tools/pom.xml | 4 ++--
 hadoop-ozone/Jenkinsfile  | 2 +-
 hadoop-ozone/client/pom.xml   | 4 ++--
 hadoop-ozone/common/pom.xml   | 4 ++--
 hadoop-ozone/csi/pom.xml  | 4 ++--
 hadoop-ozone/datanode/pom.xml | 4 ++--
 hadoop-ozone/dist/pom.xml | 4 ++--
 hadoop-ozone/dist/src/main/compose/ozone-mr/docker-config | 2 +-
 hadoop-ozone/fault-injection-test/network-tests/pom.xml   | 2 +-
 hadoop-ozone/fault-injection-test/pom.xml | 4 ++--
 hadoop-ozone/integration-test/pom.xml | 4 ++--
 hadoop-ozone/objectstore-service/pom.xml  | 4 ++--
 hadoop-ozone/ozone-manager/pom.xml| 4 ++--
 hadoop-ozone/ozone-recon-codegen/pom.xml  | 2 +-
 hadoop-ozone/ozone-recon/pom.xml  | 2 +-
 hadoop-ozone/ozonefs-lib-current/pom.xml  | 4 ++--
 hadoop-ozone/ozonefs-lib-legacy/pom.xml   | 4 ++--
 hadoop-ozone/ozonefs/pom.xml  | 4 ++--
 hadoop-ozone/pom.xml  | 6 +++---
 hadoop-ozone/s3gateway/pom.xml| 4 ++--
 hadoop-ozone/tools/pom.xml| 4 ++--
 hadoop-ozone/upgrade/pom.xml  | 4 ++--
 pom.ozone.xml | 2 +-
 32 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 673af41..1f139d7 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.5.0-SNAPSHOT
+0.4.1-SNAPSHOT
   
 
   hadoop-hdds-client
-  0.5.0-SNAPSHOT
+  0.4.1-SNAPSHOT
   Apache Hadoop Distributed Data Store Client 
Library
   Apache Hadoop HDDS Client
   jar
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 85cdbdf..2f383f7 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -20,16 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.5.0-SNAPSHOT
+0.4.1-SNAPSHOT
   
   hadoop-hdds-common
-  0.5.0-SNAPSHOT
+  0.4.1-SNAPSHOT
   Apache Hadoop Distributed Data Store Common
   Apache Hadoop HDDS Common
   jar
 
   
-0.5.0-SNAPSHOT
+0.4.1-SNAPSHOT
 2.11.0
 3.4.2
 ${hdds.version}
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
index bf62949..880faa1 100644
--- a/hadoop-hdds/config/pom.xml
+++ b/hadoop-hdds/config/pom.xml
@@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.5.0-SNAPSHOT
+0.4.1-SNAPSHOT
   
   hadoop-hdds-config
-  0.5.0-SNAPSHOT
+  0.4.1-SNAPSHOT
   Apache Hadoop Distributed Data Store Config Tools
   Apache Hadoop HDDS Config
   jar
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 2f89fa2..730c1ab 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.5.0-SNAPSHOT
+0.4.1-SNAPSHOT
   
   hadoop-hdds-container-service
-  0.5.0-SNAPSHOT
+  0.4.1-SNAPSHOT
   Apache Hadoop Distributed Data Store Container 
Service
   Apache Hadoop HDDS Container Service
   jar
diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md 
b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
index c59789b..37b803a 100644
--- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md
+++ b/hadoop-hdds/docs/co

[hadoop] branch ozone-0.4.1 created (now e966edd)

2019-07-02 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at e966edd  YARN-9644. First RMContext object is always leaked during 
switch over. Contributed by Bibin A Chundatt.

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1706. Replication Manager thread running too frequently. Contributed by Nilotpal Nandi.

2019-06-22 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b28ddb2  HDDS-1706. Replication Manager thread running too frequently. 
Contributed by Nilotpal Nandi.
b28ddb2 is described below

commit b28ddb22a5f42915309a3aa2d8cb03b05fc09bb7
Author: Nanda kumar 
AuthorDate: Sat Jun 22 16:15:45 2019 +0530

HDDS-1706. Replication Manager thread running too frequently. Contributed 
by Nilotpal Nandi.
---
 .../java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index a911e5a..33bf931 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -764,7 +764,7 @@ public class ReplicationManager {
 
 @Config(key = "thread.interval",
 type = ConfigType.TIME,
-defaultValue = "3s",
+defaultValue = "300s",
 tags = {SCM, OZONE},
 description = "When a heartbeat from the data node arrives on SCM, "
 + "It is queued for processing with the time stamp of when the "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1454. GC other system pause events can trigger pipeline destroy for all the nodes in the cluster. Contributed by Supratim Deka (#852)

2019-06-19 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9d68425  HDDS-1454. GC other system pause events can trigger pipeline 
destroy for all the nodes in the cluster. Contributed by Supratim Deka (#852)
9d68425 is described below

commit 9d6842501c88304ca24062d2463480bc7fbe5e57
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Wed Jun 19 20:11:16 2019 +0530

HDDS-1454. GC other system pause events can trigger pipeline destroy for 
all the nodes in the cluster. Contributed by Supratim Deka (#852)
---
 .../hadoop/hdds/scm/node/NodeStateManager.java | 167 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  29 
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  92 
 3 files changed, 258 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index c54944b..08a68be 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
@@ -43,6 +44,7 @@ import org.slf4j.LoggerFactory;
 import java.io.Closeable;
 import java.util.*;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Predicate;
 
@@ -117,6 +119,26 @@ public class NodeStateManager implements Runnable, 
Closeable {
   private final long deadNodeIntervalMs;
 
   /**
+   * The future is used to pause/unpause the scheduled checks.
+   */
+  private ScheduledFuture healthCheckFuture;
+
+  /**
+   * Test utility - tracks if health check has been paused (unit tests).
+   */
+  private boolean checkPaused;
+
+  /**
+   * timestamp of the latest heartbeat check process.
+   */
+  private long lastHealthCheck;
+
+  /**
+   * number of times the heart beat check was skipped.
+   */
+  private long skippedHealthChecks;
+
+  /**
* Constructs a NodeStateManager instance with the given configuration.
*
* @param conf Configuration
@@ -143,10 +165,11 @@ public class NodeStateManager implements Runnable, 
Closeable {
 executorService = HadoopExecutors.newScheduledThreadPool(1,
 new ThreadFactoryBuilder().setDaemon(true)
 .setNameFormat("SCM Heartbeat Processing Thread - %d").build());
-//BUG:BUG TODO: The return value is ignored, if an exception is thrown in
-// the executing funtion, it will be ignored.
-executorService.schedule(this, heartbeatCheckerIntervalMs,
-TimeUnit.MILLISECONDS);
+
+skippedHealthChecks = 0;
+checkPaused = false; // accessed only from test functions
+
+scheduleNextHealthCheck();
   }
 
   /**
@@ -464,6 +487,42 @@ public class NodeStateManager implements Runnable, 
Closeable {
   @Override
   public void run() {
 
+if (shouldSkipCheck()) {
+  skippedHealthChecks++;
+  LOG.info("Detected long delay in scheduling HB processing thread. "
+  + "Skipping heartbeat checks for one iteration.");
+} else {
+  checkNodesHealth();
+}
+
+// we purposefully make this non-deterministic. Instead of using a
+// scheduleAtFixedFrequency  we will just go to sleep
+// and wake up at the next rendezvous point, which is currentTime +
+// heartbeatCheckerIntervalMs. This leads to the issue that we are now
+// heart beating not at a fixed cadence, but clock tick + time taken to
+// work.
+//
+// This time taken to work can skew the heartbeat processor thread.
+// The reason why we don't care is because of the following reasons.
+//
+// 1. checkerInterval is general many magnitudes faster than datanode HB
+// frequency.
+//
+// 2. if we have too much nodes, the SCM would be doing only HB
+// processing, this could lead to SCM's CPU starvation. With this
+// approach we always guarantee that  HB thread sleeps for a little while.
+//
+// 3. It is possible that we will never finish processing the HB's in the
+// thread. But that means we have a mis-configured system. We will warn
+// the users by logging that information.
+//
+// 4. And the most important reason, heartbeats are not blocked even if
+// this thread does not run, they will go into the processing queue.
+scheduleNextH

[hadoop] branch trunk updated: HDDS-1650. Fix Ozone tests leaking volume checker thread. Contributed by Xiaoyu Yao. (#915)

2019-06-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c7e6f07  HDDS-1650. Fix Ozone tests leaking volume checker thread. 
Contributed by Xiaoyu Yao. (#915)
c7e6f07 is described below

commit c7e6f076df5b38702579db352475113e5f3ae5fb
Author: Xiaoyu Yao 
AuthorDate: Thu Jun 6 11:20:04 2019 -0700

HDDS-1650. Fix Ozone tests leaking volume checker thread. Contributed by 
Xiaoyu Yao. (#915)
---
 .../ozone/container/common/impl/TestHddsDispatcher.java  |  9 ++---
 .../volume/TestRoundRobinVolumeChoosingPolicy.java   | 13 -
 .../ozone/container/common/volume/TestVolumeSet.java |  1 +
 .../container/common/volume/TestVolumeSetDiskChecks.java |  3 +++
 .../ozone/container/keyvalue/TestKeyValueHandler.java|  7 ---
 .../ozone/container/ozoneimpl/TestOzoneContainer.java| 16 +++-
 6 files changed, 37 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index d425820..54dbe94 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -73,13 +73,15 @@ public class TestHddsDispatcher {
   public void testContainerCloseActionWhenFull() throws IOException {
 String testDir = GenericTestUtils.getTempPath(
 TestHddsDispatcher.class.getSimpleName());
+OzoneConfiguration conf = new OzoneConfiguration();
+DatanodeDetails dd = randomDatanodeDetails();
+VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
+
 try {
   UUID scmId = UUID.randomUUID();
-  OzoneConfiguration conf = new OzoneConfiguration();
   conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-  DatanodeDetails dd = randomDatanodeDetails();
   ContainerSet containerSet = new ContainerSet();
-  VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
+
   DatanodeStateMachine stateMachine = Mockito.mock(
   DatanodeStateMachine.class);
   StateContext context = Mockito.mock(StateContext.class);
@@ -118,6 +120,7 @@ public class TestHddsDispatcher {
   .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
 
 } finally {
+  volumeSet.shutdown();
   FileUtils.deleteDirectory(new File(testDir));
 }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 80594d35..d0fbf10 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -40,10 +41,12 @@ public class TestRoundRobinVolumeChoosingPolicy {
 
   private RoundRobinVolumeChoosingPolicy policy;
   private List volumes;
+  private VolumeSet volumeSet;
 
   private final String baseDir = MiniDFSCluster.getBaseDirectory();
   private final String volume1 = baseDir + "disk1";
   private final String volume2 = baseDir + "disk2";
+
   private static final String DUMMY_IP_ADDR = "0.0.0.0";
 
   @Before
@@ -53,10 +56,18 @@ public class TestRoundRobinVolumeChoosingPolicy {
 conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
 policy = ReflectionUtils.newInstance(
 RoundRobinVolumeChoosingPolicy.class, null);
-VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
+volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
 volumes = volumeSet.getVolumesList();
   }
 
+  @After
+  public void cleanUp() {
+if (volumeSet != null) {
+  volumeSet.shutdown();
+  volumeSet = null;
+}
+  }
+
   @Test
   public void testRRVolumeChoosingPolicy() throws Exception {
 HddsVolume hddsVolume1 = volumes.get(0);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop

[hadoop] branch trunk updated: HDDS-1652. HddsDispatcher should not shutdown volumeSet. Contributed by Xiaoyu Yao. (#916)

2019-06-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 76c0183  HDDS-1652. HddsDispatcher should not shutdown volumeSet. 
Contributed by Xiaoyu Yao. (#916)
76c0183 is described below

commit 76c0183ae3f7feeed108925a929a2bcc0fd31658
Author: Xiaoyu Yao 
AuthorDate: Thu Jun 6 11:17:59 2019 -0700

HDDS-1652. HddsDispatcher should not shutdown volumeSet. Contributed by 
Xiaoyu Yao. (#916)
---
 .../apache/hadoop/ozone/container/common/impl/HddsDispatcher.java| 2 --
 .../org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java | 5 +++--
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 30de893..39e163e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -114,8 +114,6 @@ public class HddsDispatcher implements ContainerDispatcher, 
Auditor {
 
   @Override
   public void shutdown() {
-// Shutdown the volumes
-volumeSet.shutdown();
   }
 
   /**
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 91d0968..c05ecb9 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -87,6 +87,7 @@ public class BenchMarkDatanodeDispatcher {
   private List containers;
   private List keys;
   private List chunks;
+  private VolumeSet volumeSet;
 
   @Setup(Level.Trial)
   public void initialize() throws IOException {
@@ -103,7 +104,7 @@ public class BenchMarkDatanodeDispatcher {
 conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data");
 
 ContainerSet containerSet = new ContainerSet();
-VolumeSet volumeSet = new VolumeSet(datanodeUuid, conf);
+volumeSet = new VolumeSet(datanodeUuid, conf);
 StateContext context = new StateContext(
 conf, DatanodeStates.RUNNING, null);
 ContainerMetrics metrics = ContainerMetrics.create(conf);
@@ -161,7 +162,7 @@ public class BenchMarkDatanodeDispatcher {
 
   @TearDown(Level.Trial)
   public void cleanup() throws IOException {
-dispatcher.shutdown();
+volumeSet.shutdown();
 FileUtils.deleteDirectory(new File(baseDir));
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1647 : Recon config tag does not show up on Ozone UI. (#914)

2019-06-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fe06957  HDDS-1647 : Recon config tag does not show up on Ozone UI. 
(#914)
fe06957 is described below

commit fe069570d8962e6d679f38d0ca44a1838f2f287c
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Thu Jun 6 11:13:29 2019 -0700

HDDS-1647 : Recon config tag does not show up on Ozone UI. (#914)
---
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d637979..33f0584 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1196,13 +1196,14 @@
 
   
 hadoop.tags.custom
-
OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY
+OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
+  
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON
   
 
   
 ozone.tags.system
 OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
-  
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS
+  
CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1201. Reporting Corruptions in Containers to SCM (#912)

2019-06-06 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c8276f3  HDDS-1201. Reporting Corruptions in Containers to SCM (#912)
c8276f3 is described below

commit c8276f3e7669c6fbafe3c6ce17cfa8d6c01431b0
Author: Shweta Yakkali 
AuthorDate: Thu Jun 6 11:06:48 2019 -0700

HDDS-1201. Reporting Corruptions in Containers to SCM (#912)
---
 .../ozone/container/common/impl/HddsDispatcher.java  |  2 +-
 .../ozone/container/common/interfaces/Handler.java   |  2 +-
 .../ozone/container/keyvalue/KeyValueHandler.java| 12 ++--
 .../container/ozoneimpl/ContainerController.java |  4 ++--
 .../ozone/container/ozoneimpl/ContainerScrubber.java | 20 +---
 5 files changed, 19 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 6f56b3c..30de893 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -302,7 +302,7 @@ public class HddsDispatcher implements ContainerDispatcher, 
Auditor {
 containerState == State.OPEN || containerState == State.CLOSING);
 // mark and persist the container state to be unhealthy
 try {
-  handler.markContainerUhealthy(container);
+  handler.markContainerUnhealthy(container);
 } catch (IOException ioe) {
   // just log the error here in case marking the container fails,
   // Return the actual failure response to the client
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 52d14db..97413f4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -135,7 +135,7 @@ public abstract class Handler {
* @param container container to update
* @throws IOException in case of exception
*/
-  public abstract void markContainerUhealthy(Container container)
+  public abstract void markContainerUnhealthy(Container container)
   throws IOException;
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 72f48fa..7249271 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -884,20 +884,20 @@ public class KeyValueHandler extends Handler {
   @Override
   public void markContainerForClose(Container container)
   throws IOException {
-State currentState = container.getContainerState();
 // Move the container to CLOSING state only if it's OPEN
-if (currentState == State.OPEN) {
+if (container.getContainerState() == State.OPEN) {
   container.markContainerForClose();
   sendICR(container);
 }
   }
 
   @Override
-  public void markContainerUhealthy(Container container)
+  public void markContainerUnhealthy(Container container)
   throws IOException {
-// this will mark the container unhealthy and a close container action will
-// be sent from the dispatcher ton SCM to close down this container.
-container.markContainerUnhealthy();
+if (container.getContainerState() != State.UNHEALTHY) {
+  container.markContainerUnhealthy();
+  sendICR(container);
+}
   }
 
   @Override
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 11cb8ee..10cb330 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -133,11 +133,11 @@ public class ContainerController {
* @param container Container
* @return handler of the container
*/
-  Handler getHandler(final Container container) {
+  private Handler getHandler(final Container

[hadoop] branch trunk updated: Opening of rocksDB in datanode fails with "No locks available"

2019-06-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 277e9a8  Opening of rocksDB in datanode fails with "No locks available"
277e9a8 is described below

commit 277e9a835b5b45af8df70b0dca52c03074f0d6b5
Author: Mukul Kumar Singh 
AuthorDate: Tue Jun 4 02:12:44 2019 +0530

Opening of rocksDB in datanode fails with "No locks available"

Signed-off-by: Nanda kumar 
---
 .../container/common/utils/ContainerCache.java |  14 +--
 .../container/common/utils/ReferenceCountedDB.java |  28 ++---
 .../ozone/container/common/TestContainerCache.java | 128 +
 3 files changed, 145 insertions(+), 25 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index ef75ec1..d25e53b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -77,7 +77,8 @@ public final class ContainerCache extends LRUMap {
   while (iterator.hasNext()) {
 iterator.next();
 ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue();
-db.setEvicted(true);
+Preconditions.checkArgument(db.cleanup(), "refCount:",
+db.getReferenceCount());
   }
   // reset the cache
   cache.clear();
@@ -92,14 +93,9 @@ public final class ContainerCache extends LRUMap {
   @Override
   protected boolean removeLRU(LinkEntry entry) {
 ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue();
-String dbFile = (String)entry.getKey();
 lock.lock();
 try {
-  db.setEvicted(false);
-  return true;
-} catch (Exception e) {
-  LOG.error("Eviction for db:{} failed", dbFile, e);
-  return false;
+  return db.cleanup();
 } finally {
   lock.unlock();
 }
@@ -156,8 +152,8 @@ public final class ContainerCache extends LRUMap {
 try {
   ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath);
   if (db != null) {
-// marking it as evicted will close the db as well.
-db.setEvicted(true);
+Preconditions.checkArgument(db.cleanup(), "refCount:",
+db.getReferenceCount());
   }
   this.remove(containerDBPath);
 } finally {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
index 31aca64..81cde5b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
@@ -24,7 +24,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 /**
@@ -38,17 +37,19 @@ public class ReferenceCountedDB implements Closeable {
   private static final Logger LOG =
   LoggerFactory.getLogger(ReferenceCountedDB.class);
   private final AtomicInteger referenceCount;
-  private final AtomicBoolean isEvicted;
   private final MetadataStore store;
   private final String containerDBPath;
 
   public ReferenceCountedDB(MetadataStore store, String containerDBPath) {
 this.referenceCount = new AtomicInteger(0);
-this.isEvicted = new AtomicBoolean(false);
 this.store = store;
 this.containerDBPath = containerDBPath;
   }
 
+  public long getReferenceCount() {
+return referenceCount.get();
+  }
+
   public void incrementReference() {
 this.referenceCount.incrementAndGet();
 if (LOG.isDebugEnabled()) {
@@ -59,35 +60,30 @@ public class ReferenceCountedDB implements Closeable {
   }
 
   public void decrementReference() {
-this.referenceCount.decrementAndGet();
+int refCount = this.referenceCount.decrementAndGet();
+Preconditions.checkArgument(refCount >= 0, "refCount:", refCount);
 if (LOG.isDebugEnabled()) {
   LOG.debug("DecRef {} to refCnt {} \n", containerDBPath,
   referenceCount.get());
   new Exception().printStackTrace();
 }
-cleanup();
-  }
-
-  public void setEvicted(boolean checkNoReferences) {
-Preconditions.checkState(!checkNoReferences ||
-(referenceCount.get() == 0),
-"checkNoReferences:%b, referencount:%d, dbPath:%s",
-  

[hadoop] branch trunk updated: HDDS-1625 : ConcurrentModificationException when SCM has containers of different owners. (#883)

2019-06-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 21de9af  HDDS-1625 : ConcurrentModificationException when SCM has 
containers of different owners. (#883)
21de9af is described below

commit 21de9af9038961e36e7335dc1f688f5f48056d1c
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Mon Jun 3 12:45:04 2019 -0700

HDDS-1625 : ConcurrentModificationException when SCM has containers of 
different owners. (#883)
---
 .../hdds/scm/container/SCMContainerManager.java|  9 +---
 .../TestContainerStateManagerIntegration.java  | 24 ++
 2 files changed, 30 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 359731c..1c1ffe1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -43,6 +43,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
@@ -469,15 +470,17 @@ public class SCMContainerManager implements 
ContainerManager {
*/
   private NavigableSet getContainersForOwner(
   NavigableSet containerIDs, String owner) {
-for (ContainerID cid : containerIDs) {
+Iterator containerIDIterator = containerIDs.iterator();
+while (containerIDIterator.hasNext()) {
+  ContainerID cid = containerIDIterator.next();
   try {
 if (!getContainer(cid).getOwner().equals(owner)) {
-  containerIDs.remove(cid);
+  containerIDIterator.remove();
 }
   } catch (ContainerNotFoundException e) {
 LOG.error("Could not find container info for container id={} {}", cid,
 e);
-containerIDs.remove(cid);
+containerIDIterator.remove();
   }
 }
 return containerIDs;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 9f90a2d..e4f1a37 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -123,6 +123,30 @@ public class TestContainerStateManagerIntegration {
   }
 
   @Test
+  public void testAllocateContainerWithDifferentOwner() throws IOException {
+
+// Allocate a container and verify the container info
+ContainerWithPipeline container1 = scm.getClientProtocolServer()
+.allocateContainer(xceiverClientManager.getType(),
+xceiverClientManager.getFactor(), containerOwner);
+ContainerInfo info = containerManager
+.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
+container1.getPipeline());
+Assert.assertNotNull(info);
+
+String newContainerOwner = "OZONE_NEW";
+ContainerWithPipeline container2 = scm.getClientProtocolServer()
+.allocateContainer(xceiverClientManager.getType(),
+xceiverClientManager.getFactor(), newContainerOwner);
+ContainerInfo info2 = containerManager
+.getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner,
+container1.getPipeline());
+Assert.assertNotNull(info2);
+
+Assert.assertNotEquals(info.containerID(), info2.containerID());
+  }
+
+  @Test
   public void testContainerStateManagerRestart() throws IOException,
   TimeoutException, InterruptedException, AuthenticationException {
 // Allocate 5 containers in ALLOCATED state and 5 in CREATING state


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1558. IllegalArgumentException while processing container Reports.

2019-06-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f327112  HDDS-1558. IllegalArgumentException while processing 
container Reports.
f327112 is described below

commit f3271126fc9a3ad178b7dadd8edf851e16cf76d0
Author: Shashikant Banerjee 
AuthorDate: Tue Jun 4 00:59:02 2019 +0530

HDDS-1558. IllegalArgumentException while processing container Reports.

Signed-off-by: Nanda kumar 
---
 .../container/common/impl/HddsDispatcher.java  | 15 +++-
 .../ozone/container/common/interfaces/Handler.java |  9 +++
 .../container/keyvalue/KeyValueContainer.java  |  6 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  | 14 
 .../rpc/TestContainerStateMachineFailures.java | 85 ++
 5 files changed, 125 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 4e8d5b9..6f56b3c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -67,6 +67,7 @@ import io.opentracing.Scope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
@@ -299,8 +300,18 @@ public class HddsDispatcher implements 
ContainerDispatcher, Auditor {
 State containerState = container.getContainerData().getState();
 Preconditions.checkState(
 containerState == State.OPEN || containerState == State.CLOSING);
-container.getContainerData()
-.setState(ContainerDataProto.State.UNHEALTHY);
+// mark and persist the container state to be unhealthy
+try {
+  handler.markContainerUhealthy(container);
+} catch (IOException ioe) {
+  // just log the error here in case marking the container fails,
+  // Return the actual failure response to the client
+  LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ",
+  ioe);
+}
+// in any case, the in memory state of the container should be 
unhealthy
+Preconditions.checkArgument(
+container.getContainerData().getState() == State.UNHEALTHY);
 sendCloseContainerActionIfNeeded(container);
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index a3bb34b..52d14db 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -130,6 +130,15 @@ public abstract class Handler {
   throws IOException;
 
   /**
+   * Marks the container Unhealthy. Moves the container to UHEALTHY state.
+   *
+   * @param container container to update
+   * @throws IOException in case of exception
+   */
+  public abstract void markContainerUhealthy(Container container)
+  throws IOException;
+
+  /**
* Moves the Container to QUASI_CLOSED state.
*
* @param container container to be quasi closed
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 38257c3..6a1ca86 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -339,8 +339,10 @@ public class KeyValueContainer implements 
Container {
   updateContainerFile(containerFile);
 
 } catch (StorageContainerException ex) {
-  if (oldState != null) {
-// Failed to update .container file. Reset the state to CLOSING
+  if (oldState != null
+  && containerData.getState() != ContainerDataProto.State.UNHEALTHY) {
+// Failed to update .container file. Reset the state to old state only
+// if the current state is not unhealthy.
 containerData.setState(oldState);
   }
   throw ex;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/

[hadoop] branch trunk updated: HDDS-1448 : RatisPipelineProvider should only consider open pipeline while excluding dn for pipeline allocation. (#786)

2019-05-03 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f194540  HDDS-1448 : RatisPipelineProvider should only consider open 
pipeline while excluding dn for pipeline allocation. (#786)
f194540 is described below

commit f194540520dedd6f7f5b64bb40ef1ad148fe16cb
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Fri May 3 11:49:00 2019 -0700

HDDS-1448 : RatisPipelineProvider should only consider open pipeline while 
excluding dn for pipeline allocation. (#786)
---
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  4 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |  3 +-
 .../scm/pipeline/TestRatisPipelineProvider.java| 63 ++
 3 files changed, 68 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 6563e3f..df21420 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -91,7 +91,9 @@ public class RatisPipelineProvider implements 
PipelineProvider {
   public Pipeline create(ReplicationFactor factor) throws IOException {
 // Get set of datanodes already used for ratis pipeline
 Set dnsUsed = new HashSet<>();
-stateManager.getPipelines(ReplicationType.RATIS, factor)
+stateManager.getPipelines(ReplicationType.RATIS, factor).stream().filter(
+p -> p.getPipelineState().equals(PipelineState.OPEN) ||
+p.getPipelineState().equals(PipelineState.ALLOCATED))
 .forEach(p -> dnsUsed.addAll(p.getNodes()));
 
 // Get list of healthy nodes
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 129644e..c10bc44 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.assertj.core.util.Preconditions;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -184,7 +185,7 @@ public class MockNodeManager implements NodeManager {
*/
   @Override
   public List getAllNodes() {
-return null;
+return new ArrayList<>(nodeMetricMap.keySet());
   }
 
   /**
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 28f47cc..00144e4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -135,4 +135,67 @@ public class TestRatisPipelineProvider {
 Pipeline.PipelineState.OPEN);
 Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
   }
+
+  @Test
+  public void testCreatePipelinesDnExclude() throws IOException {
+
+// We have 10 DNs in MockNodeManager.
+// Use up first 3 DNs for an open pipeline.
+List openPiplineDns = nodeManager.getAllNodes()
+.subList(0, 3);
+HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
+
+Pipeline openPipeline = Pipeline.newBuilder()
+.setType(HddsProtos.ReplicationType.RATIS)
+.setFactor(factor)
+.setNodes(openPiplineDns)
+.setState(Pipeline.PipelineState.OPEN)
+.setId(PipelineID.randomId())
+.build();
+
+stateManager.addPipeline(openPipeline);
+
+// Use up next 3 DNs also for an open pipeline.
+List moreOpenPiplineDns = nodeManager.getAllNodes()
+.subList(3, 6);
+Pipeline anotherOpenPipeline = Pipeline.newBuilder()
+.setType(HddsProtos.ReplicationType.RATIS)
+.setFactor(factor)
+.setNodes(moreOpenPiplineDns)
+.setState(Pipeline.PipelineState.OPEN)
+.setId(PipelineID.randomId())
+.build();
+stateManager.addPipeline(anotherOpenPipeline);
+
+// Use up next 3 DNs also for a closed pipeline.
+List closedPiplineDns = nodeManager.getAllNodes()
+.subList(6, 9);
+Pipeline anotherClosedPipeline = Pipeline.newBui

[hadoop] branch trunk updated: HDDS-1411. Add unit test to check if SCM correctly sends close commands for containers in closing state after a restart. (#755)

2019-04-23 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 59ded76  HDDS-1411. Add unit test to check if SCM correctly sends 
close commands for containers in closing state after a restart. (#755)
59ded76 is described below

commit 59ded7641f5dfcaca6df96aba5243ead3610d005
Author: Siddharth 
AuthorDate: Tue Apr 23 08:34:14 2019 -0700

HDDS-1411. Add unit test to check if SCM correctly sends close commands for 
containers in closing state after a restart. (#755)
---
 .../hdds/scm/server/StorageContainerManager.java   |   4 +-
 .../hadoop/ozone/TestStorageContainerManager.java  | 127 +++--
 2 files changed, 122 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 270d356..cbd1ac2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -400,14 +400,14 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   new SCMPipelineManager(conf, scmNodeManager, eventQueue);
 }
 
-if(configurator.getContainerManager() != null) {
+if (configurator.getContainerManager() != null) {
   containerManager = configurator.getContainerManager();
 } else {
   containerManager = new SCMContainerManager(
   conf, scmNodeManager, pipelineManager, eventQueue);
 }
 
-if(configurator.getScmBlockManager() != null) {
+if (configurator.getScmBlockManager() != null) {
   scmBlockManager = configurator.getScmBlockManager();
 } else {
   scmBlockManager = new BlockManagerImpl(conf, this);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index a0c58db..e882657 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -20,10 +20,17 @@ package org.apache.hadoop.ozone;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Collections;
@@ -33,6 +40,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
+
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -46,33 +54,44 @@ import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
 import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import

[hadoop] branch trunk updated: HDDS-1368. Cleanup old ReplicationManager code from SCM.

2019-04-23 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7e1f8d3  HDDS-1368. Cleanup old ReplicationManager code from SCM.
7e1f8d3 is described below

commit 7e1f8d3a1b7b48d3debcce1d7096ed4c46fdeb0f
Author: Nanda kumar 
AuthorDate: Tue Apr 23 17:35:39 2019 +0530

HDDS-1368. Cleanup old ReplicationManager code from SCM.
---
 .../common/statemachine/StateContext.java  |  32 +-
 .../DeleteContainerCommandHandler.java |   4 -
 .../ReplicateContainerCommandHandler.java  |  28 +-
 .../scm/command/CommandStatusReportHandler.java|  43 +--
 .../container/DeleteContainerCommandWatcher.java   |  56 ---
 .../replication/ReplicationCommandWatcher.java |  56 ---
 .../container/replication/ReplicationManager.java  | 384 -
 .../container/replication/ReplicationQueue.java|  73 
 .../container/replication/ReplicationRequest.java  | 123 ---
 .../apache/hadoop/hdds/scm/events/SCMEvents.java   |  65 +---
 .../hadoop/hdds/scm/node/DeadNodeHandler.java  | 186 ++
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |   4 +-
 .../hdds/scm/server/StorageContainerManager.java   |   2 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java|  11 +-
 .../command/TestCommandStatusReportHandler.java|   5 -
 .../replication/TestReplicationManager.java| 290 
 .../replication/TestReplicationQueue.java  | 134 ---
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  63 +---
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |   4 +-
 .../ozone/container/common/TestEndPoint.java   |  13 +-
 20 files changed, 150 insertions(+), 1426 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 7e06473..56151f8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -19,10 +19,11 @@ package 
org.apache.hadoop.ozone.container.common.statemachine;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
-import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.PipelineAction;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.ContainerAction;
@@ -34,8 +35,6 @@ import 
org.apache.hadoop.ozone.container.common.states.datanode
 import org.apache.hadoop.ozone.container.common.states.datanode
 .RunningDatanodeState;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus
-.CommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands
 .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -432,27 +431,14 @@ public class StateContext {
* @param cmd - {@link SCMCommand}.
*/
   public void addCmdStatus(SCMCommand cmd) {
-final Optional cmdStatusBuilder;
-switch (cmd.getType()) {
-case replicateContainerCommand:
-  cmdStatusBuilder = Optional.of(CommandStatusBuilder.newBuilder());
-  break;
-case deleteBlocksCommand:
-  cmdStatusBuilder = Optional.of(
-  DeleteBlockCommandStatusBuilder.newBuilder());
-  break;
-case deleteContainerCommand:
-  cmdStatusBuilder = Optional.of(CommandStatusBuilder.newBuilder());
-  break;
-default:
-  cmdStatusBuilder = Optional.empty();
+if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
+  addCmdStatus(cmd.getId(),
+  DeleteBlockCommandStatusBuilder.newBuilder()
+  .setCmdId(cmd.getId())
+  .setStatus(Status.PENDING)
+  .setType(cmd.getType())
+  .build());
 }
-cmdStatusBuilder.ifPresent(statusBuilder ->
-addCmdStatus(cmd.getId(), statusBuilder
-.setCmdId(cmd.getId())
-.setStatus(Status.PENDING)
-.setType(cmd.getType())
-.build()));
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/contai

[hadoop] branch trunk updated: HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs (#691)

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3b08ac4  HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs 
(#691)
3b08ac4 is described below

commit 3b08ac46591e11323d8b1b1be742028bf5502bc0
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Apr 12 11:30:08 2019 +0200

HDDS-1363. ozone.metadata.dirs doesn't pick multiple dirs (#691)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java |  24 
 .../org/apache/hadoop/hdds/scm/HddsServerUtil.java |  22 
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   5 +-
 .../common/states/datanode/InitDatanodeState.java  |   4 +-
 .../org/apache/hadoop/hdds/server/ServerUtils.java |  25 ++---
 .../apache/hadoop/hdds/server/TestServerUtils.java | 123 +
 .../java/org/apache/hadoop/hdds/scm/ScmUtils.java  |  20 +---
 .../hadoop/hdds/scm/TestHddsServerUtils.java   |  49 
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  |  10 --
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |   2 +-
 10 files changed, 164 insertions(+), 120 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 2ca42d5..92ed9b6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -24,7 +24,6 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
-import java.nio.file.Paths;
 import java.util.Calendar;
 import java.util.Collection;
 import java.util.HashSet;
@@ -50,7 +49,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 
-import com.google.common.base.Strings;
 import com.google.common.net.HostAndPort;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
@@ -313,28 +311,6 @@ public final class HddsUtils {
 
 
   /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIdFilePath(Configuration conf) {
-String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-if (dataNodeIDPath == null) {
-  String metaPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS);
-  if (Strings.isNullOrEmpty(metaPath)) {
-// this means meta data is not found, in theory should not happen at
-// this point because should've failed earlier.
-throw new IllegalArgumentException("Unable to locate meta data" +
-"directory when getting datanode id path");
-  }
-  dataNodeIDPath = Paths.get(metaPath,
-  ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
-}
-return dataNodeIDPath;
-  }
-
-  /**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index 3ff6e66..9d1880c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -372,4 +372,26 @@ public final class HddsServerUtil {
 File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
 return (new File(metaDirPath, "ratis")).getPath();
   }
+
+  /**
+   * Get the path for datanode id file.
+   *
+   * @param conf - Configuration
+   * @return the path of datanode id as string
+   */
+  public static String getDatanodeIdFilePath(Configuration conf) {
+String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
+if (dataNodeIDPath == null) {
+  File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
+  if (metaDirPath == null) {
+// this means meta data is not found, in theory should not happen at
+// this point because should've failed earlier.
+throw new IllegalArgumentException("Unable to locate meta data" +
+"directory when getting datanode id path");
+  }
+  dataNodeIDPath = new File(metaDirPath,
+  ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
+}
+return dataNodeIDPath;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/ma

[hadoop] branch trunk updated: HDDS-1416. MiniOzoneCluster should set custom value for hdds.datanode.replication.work.dir. Contributed by chencan.

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4003849  HDDS-1416. MiniOzoneCluster should set custom value for 
hdds.datanode.replication.work.dir. Contributed by chencan.
4003849 is described below

commit 4003849fa48f7ba041c35901272744f9bd089724
Author: Nanda kumar 
AuthorDate: Fri Apr 12 14:45:57 2019 +0530

HDDS-1416. MiniOzoneCluster should set custom value for 
hdds.datanode.replication.work.dir.
Contributed by chencan.
---
 .../src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 8018bab..4cfc950 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -540,13 +540,18 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 Path metaDir = Paths.get(datanodeBaseDir, "meta");
 Path dataDir = Paths.get(datanodeBaseDir, "data", "containers");
 Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis");
+Path wrokDir = Paths.get(datanodeBaseDir, "data", "replication",
+"work");
 Files.createDirectories(metaDir);
 Files.createDirectories(dataDir);
 Files.createDirectories(ratisDir);
+Files.createDirectories(wrokDir);
 dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
 dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
dataDir.toString());
 dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
 ratisDir.toString());
+dnConf.set(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR,
+wrokDir.toString());
 
 hddsDatanodes.add(
 HddsDatanodeService.createHddsDatanodeService(args, dnConf));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1404. Fix typos in HDDS. Contributed by bianqi.

2019-04-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c298415  HDDS-1404. Fix typos in HDDS. Contributed by bianqi.
c298415 is described below

commit c298415eb5c1922f9bd75dd6d0c4d90fb60c0ed3
Author: Nanda kumar 
AuthorDate: Fri Apr 12 14:12:10 2019 +0530

HDDS-1404. Fix typos in HDDS. Contributed by bianqi.
---
 hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto | 4 ++--
 .../common/src/main/proto/StorageContainerLocationProtocol.proto  | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 7396eb3..fd572ad 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -462,6 +462,6 @@ service XceiverClientProtocolService {
 }
 
 service IntraDatanodeProtocolService {
-  // An intradatanode service to copy the raw containerdata betwen nodes
+  // An intradatanode service to copy the raw container data between nodes
   rpc download (CopyContainerRequestProto) returns (stream 
CopyContainerResponseProto);
-}
\ No newline at end of file
+}
diff --git 
a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index d0f6c13..ade54a4 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -34,7 +34,7 @@ import "hdds.proto";
 * Request send to SCM asking where the container should be created.
 */
 message ContainerRequestProto {
-  // Ozone only support replciation of either 1 or 3.
+  // Ozone only support replication of either 1 or 3.
   required ReplicationFactor replicationFactor = 2;
   required ReplicationType  replicationType = 3;
   required string owner = 4;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1401. Static ContainerCache in Datanodes can result in overwrite of container db. Contributed by Mukul Kumar Singh. (#708)

2019-04-10 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new df01469  HDDS-1401. Static ContainerCache in Datanodes can result in 
overwrite of container db. Contributed by Mukul Kumar Singh. (#708)
df01469 is described below

commit df01469141e3933ca35785c25e1e29f59129cc85
Author: Mukul Kumar Singh 
AuthorDate: Wed Apr 10 18:00:10 2019 +0530

HDDS-1401. Static ContainerCache in Datanodes can result in overwrite of 
container db. Contributed by Mukul Kumar Singh. (#708)
---
 .../container/common/utils/ContainerCache.java | 36 --
 .../container/keyvalue/helpers/BlockUtils.java |  2 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |  2 +-
 .../container/keyvalue/TestKeyValueContainer.java  |  2 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java|  4 +--
 .../common/impl/TestContainerPersistence.java  |  2 +-
 .../TestCloseContainerByPipeline.java  | 17 ++
 7 files changed, 42 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index a533684..25d1bdf 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -69,15 +69,15 @@ public final class ContainerCache extends LRUMap {
   /**
* Closes a db instance.
*
-   * @param containerID - ID of the container to be closed.
+   * @param containerPath - path of the container db to be closed.
* @param db - db instance to close.
*/
-  private void closeDB(long containerID, MetadataStore db) {
+  private void closeDB(String containerPath, MetadataStore db) {
 if (db != null) {
   try {
 db.close();
-  } catch (IOException e) {
-LOG.error("Error closing DB. Container: " + containerID, e);
+  } catch (Exception e) {
+LOG.error("Error closing DB. Container: " + containerPath, e);
   }
 }
   }
@@ -93,7 +93,7 @@ public final class ContainerCache extends LRUMap {
   while (iterator.hasNext()) {
 iterator.next();
 MetadataStore db = (MetadataStore) iterator.getValue();
-closeDB(((Number)iterator.getKey()).longValue(), db);
+closeDB((String)iterator.getKey(), db);
   }
   // reset the cache
   cache.clear();
@@ -107,14 +107,18 @@ public final class ContainerCache extends LRUMap {
*/
   @Override
   protected boolean removeLRU(LinkEntry entry) {
+MetadataStore db = (MetadataStore) entry.getValue();
+String dbFile = (String)entry.getKey();
 lock.lock();
 try {
-  MetadataStore db = (MetadataStore) entry.getValue();
-  closeDB(((Number)entry.getKey()).longValue(), db);
+  closeDB(dbFile, db);
+  return true;
+} catch (Exception e) {
+  LOG.error("Eviction for db:{} failed", dbFile, e);
+  return false;
 } finally {
   lock.unlock();
 }
-return true;
   }
 
   /**
@@ -133,7 +137,7 @@ public final class ContainerCache extends LRUMap {
 "Container ID cannot be negative.");
 lock.lock();
 try {
-  MetadataStore db = (MetadataStore) this.get(containerID);
+  MetadataStore db = (MetadataStore) this.get(containerDBPath);
 
   if (db == null) {
 db = MetadataStoreBuilder.newBuilder()
@@ -142,7 +146,7 @@ public final class ContainerCache extends LRUMap {
 .setConf(conf)
 .setDBType(containerDBType)
 .build();
-this.put(containerID, db);
+this.put(containerDBPath, db);
   }
   return db;
 } catch (Exception e) {
@@ -157,16 +161,14 @@ public final class ContainerCache extends LRUMap {
   /**
* Remove a DB handler from cache.
*
-   * @param containerID - ID of the container.
+   * @param containerPath - path of the container db file.
*/
-  public void removeDB(long containerID) {
-Preconditions.checkState(containerID >= 0,
-"Container ID cannot be negative.");
+  public void removeDB(String containerPath) {
 lock.lock();
 try {
-  MetadataStore db = (MetadataStore)this.get(containerID);
-  closeDB(containerID, db);
-  this.remove(containerID);
+  MetadataStore db = (MetadataStore)this.get(containerPath);
+  closeDB(containerPath, db);
+  this.remove(containerPath);
 } finally {
   lock.unlock();
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
 
b/hadoop-hdds/container-se

[hadoop] branch trunk updated: HDDS-1372. getContainerWithPipeline for a standalone pipeline fails with ConcurrentModificationException. (#682)

2019-04-09 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 73f43ac  HDDS-1372. getContainerWithPipeline for a standalone pipeline 
fails with ConcurrentModificationException. (#682)
73f43ac is described below

commit 73f43ac2dc53294a15e8f794586483a9b8309b2e
Author: Nanda kumar 
AuthorDate: Tue Apr 9 15:10:07 2019 +0530

HDDS-1372. getContainerWithPipeline for a standalone pipeline fails with 
ConcurrentModificationException. (#682)
---
 .../scm/container/states/ContainerStateMap.java|  3 +-
 .../scm/container/TestSCMContainerManager.java | 44 ++
 2 files changed, 45 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 7411055..d85028b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -32,7 +32,6 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.HashSet;
 import java.util.Set;
 import java.util.Collections;
 import java.util.Map;
@@ -138,7 +137,7 @@ public class ContainerStateMap {
   ownerMap.insert(info.getOwner(), id);
   factorMap.insert(info.getReplicationFactor(), id);
   typeMap.insert(info.getReplicationType(), id);
-  replicaMap.put(id, new HashSet<>());
+  replicaMap.put(id, ConcurrentHashMap.newKeySet());
 
   // Flush the cache of this container type, will be added later when
   // get container queries are executed.
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 43aaa85..91a36b7 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -44,12 +44,15 @@ import org.junit.rules.ExpectedException;
 import java.io.File;
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.Optional;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
+import java.util.stream.IntStream;
 
 /**
  * Tests for Container ContainerManager.
@@ -195,6 +198,47 @@ public class TestSCMContainerManager {
   }
 
   @Test
+  public void testGetContainerReplicaWithParallelUpdate() throws Exception {
+testGetContainerWithPipeline();
+final Optional id = containerManager.getContainerIDs()
+.stream().findFirst();
+Assert.assertTrue(id.isPresent());
+final ContainerID cId = id.get();
+final Optional replica = containerManager
+.getContainerReplicas(cId).stream().findFirst();
+Assert.assertTrue(replica.isPresent());
+final ContainerReplica cReplica = replica.get();
+final AtomicBoolean runUpdaterThread =
+new AtomicBoolean(true);
+
+Thread updaterThread = new Thread(() -> {
+  while (runUpdaterThread.get()) {
+try {
+  containerManager.removeContainerReplica(cId, cReplica);
+  containerManager.updateContainerReplica(cId, cReplica);
+} catch (ContainerException e) {
+  Assert.fail("Container Exception: " + e.getMessage());
+}
+  }
+});
+
+updaterThread.setDaemon(true);
+updaterThread.start();
+
+IntStream.range(0, 100).forEach(i -> {
+  try {
+Assert.assertNotNull(containerManager
+.getContainerReplicas(cId)
+.stream().map(ContainerReplica::getDatanodeDetails)
+.collect(Collectors.toSet()));
+  } catch (ContainerNotFoundException e) {
+Assert.fail("Missing Container " + id);
+  }
+});
+runUpdaterThread.set(false);
+  }
+
+  @Test
   public void testgetNoneExistentContainer() {
 try {
   containerManager.getContainer(ContainerID.valueof(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   >