[hadoop] branch trunk updated: HDDS-1379. Convert all OM Volume related operations to HA model. (#689)

2019-04-04 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 77fe51e  HDDS-1379. Convert all OM Volume related operations to HA 
model. (#689)
77fe51e is described below

commit 77fe51e13666f7e10ce5fa7bf53b35cdcd4602b6
Author: Bharat Viswanadham 
AuthorDate: Thu Apr 4 21:09:50 2019 -0700

HDDS-1379. Convert all OM Volume related operations to HA model. (#689)
---
 .../ozone/om/helpers/OmDeleteVolumeResponse.java   |  49 
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java  |  19 +-
 .../om/helpers/OmVolumeOwnerChangeResponse.java|  56 
 .../ozone/om/protocol/OzoneManagerHAProtocol.java  |  78 ++
 .../src/main/proto/OzoneManagerProtocol.proto  |   7 +
 .../org/apache/hadoop/ozone/om/TestOmMetrics.java  |   6 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |  29 +++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  77 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java   |  11 +-
 .../org/apache/hadoop/ozone/om/VolumeManager.java  |  52 +++-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  | 289 ++---
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |  65 +++--
 ...dler.java => OzoneManagerHARequestHandler.java} |  36 +--
 .../OzoneManagerHARequestHandlerImpl.java  | 247 ++
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../hadoop/ozone/protocolPB/RequestHandler.java|   2 +-
 .../om/ratis/TestOzoneManagerStateMachine.java |   8 +-
 17 files changed, 897 insertions(+), 140 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteVolumeResponse.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteVolumeResponse.java
new file mode 100644
index 000..6e96674
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteVolumeResponse.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.VolumeList;
+
+/**
+ * OM response for delete volume request for a ozone volume.
+ */
+public  class OmDeleteVolumeResponse {
+  private String volume;
+  private String owner;
+  private VolumeList updatedVolumeList;
+
+  public OmDeleteVolumeResponse(String volume, String owner,
+  VolumeList updatedVolumeList) {
+this.volume = volume;
+this.owner = owner;
+this.updatedVolumeList = updatedVolumeList;
+  }
+
+  public String getVolume() {
+return volume;
+  }
+
+  public String getOwner() {
+return owner;
+  }
+
+  public VolumeList getUpdatedVolumeList() {
+return updatedVolumeList;
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 08c17ec..7b25d78 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -36,10 +36,10 @@ import com.google.common.base.Preconditions;
  */
 public final class OmVolumeArgs extends WithMetadata implements Auditable {
   private final String adminName;
-  private final String ownerName;
+  private String ownerName;
   private final String volume;
-  private final long creationTime;
-  private final long quotaInBytes;
+  private long creationTime;
+  private long quotaInBytes;
   private final OmOzoneAclMap aclMap;
 
   /**
@@ -64,6 +64,19 @@ public final class OmVolumeArgs extends WithMetadata 
implements Auditable {
 this.creationTime = creationTime;
   }
 
+
+  public void setOwnerName(String newOwner) {
+this.ownerName = newOwner;
+  }
+
+  public void setQuotaInBytes(long quotaInBytes) {
+this.quotaInBytes = quotaInBytes;
+  }
+
+  public void setCreationTime(long time) {
+this.creationTime = time;
+  }
+
   /**
* Returns 

[hadoop] branch branch-2.9 updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 9855510  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
9855510 is described below

commit 98555100a6a0b7995f03cbdb49e853264a88da64
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

(cherry picked from commit 67020f09502a4f07342dee457e47bb52b03441ae)
(cherry picked from commit 2abc7927d1ec65444e38e013eb5541bfa0b448b4)
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index d5f2035..cba6710 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -230,7 +230,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2abc792  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
2abc792 is described below

commit 2abc7927d1ec65444e38e013eb5541bfa0b448b4
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

(cherry picked from commit 67020f09502a4f07342dee457e47bb52b03441ae)
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index dd4729b..0f59b84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -232,7 +232,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.0 updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new f824f4d  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
f824f4d is described below

commit f824f4dccbc9ab20b797c5cc4ff3d0770a8ed427
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

(cherry picked from commit 67020f09502a4f07342dee457e47bb52b03441ae)
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 30602c0..2ddd4e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -241,7 +241,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new a70076b  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
a70076b is described below

commit a70076b3a51ff57717bcc095457ceaec27a225b7
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

(cherry picked from commit 67020f09502a4f07342dee457e47bb52b03441ae)
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 5c590f6..382d827 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -241,7 +241,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f020ac2  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
f020ac2 is described below

commit f020ac2c06597f97e0288190786df032ed9a7445
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

(cherry picked from commit 67020f09502a4f07342dee457e47bb52b03441ae)
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 5c590f6..382d827 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -241,7 +241,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14407. Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.

2019-04-04 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 67020f0  HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
67020f0 is described below

commit 67020f09502a4f07342dee457e47bb52b03441ae
Author: Akira Ajisaka 
AuthorDate: Fri Apr 5 12:29:11 2019 +0900

HDFS-14407. Fix misuse of SLF4j logging API in 
DatasetVolumeChecker#checkAllVolumes. Contributed by Wanqiang Ji.
---
 .../hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 7c13ed0..b492287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -242,7 +242,7 @@ public class DatasetVolumeChecker {
 // Wait until our timeout elapses, after which we give up on
 // the remaining volumes.
 if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) {
-  LOG.warn("checkAllVolumes timed out after {} ms" +
+  LOG.warn("checkAllVolumes timed out after {} ms",
   maxAllowedTimeForCheckMs);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HDFS-13891 updated: HDFS-13853. RBF: RouterAdmin update cmd is overwriting the entry not updating the existing. Contributed by Ayush Saxena.

2019-04-04 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new 007b8ea  HDFS-13853. RBF: RouterAdmin update cmd is overwriting the 
entry not updating the existing. Contributed by Ayush Saxena.
007b8ea is described below

commit 007b8ea1a5a020162582968b527cf78567dc4e97
Author: Ayush Saxena 
AuthorDate: Fri Apr 5 08:11:16 2019 +0530

HDFS-13853. RBF: RouterAdmin update cmd is overwriting the entry not 
updating the existing. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/tools/federation/RouterAdmin.java  | 219 +++--
 .../federation/router/TestRouterAdminCLI.java  | 130 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  |   4 +-
 3 files changed, 232 insertions(+), 121 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 61da7e9..9d03a44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -22,8 +22,10 @@ import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
@@ -138,9 +140,10 @@ public class RouterAdmin extends Configured implements 
Tool {
   + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
   + "-owner  -group  -mode ]";
 } else if (cmd.equals("-update")) {
-  return "\t[-update   "
-  + " "
-  + "[-readonly] [-faulttolerant] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+  return "\t[-update "
+  + " [ ] "
+  + "[-readonly true|false] [-faulttolerant true|false]"
+  + " [-order HASH|LOCAL|RANDOM|HASH_ALL] "
   + "-owner  -group  -mode ]";
 } else if (cmd.equals("-rm")) {
   return "\t[-rm ]";
@@ -294,6 +297,8 @@ public class RouterAdmin extends Configured implements Tool 
{
   } else if ("-update".equals(cmd)) {
 if (updateMount(argv, i)) {
   System.out.println("Successfully updated mount point " + argv[i]);
+  System.out.println(
+  "WARN: Changing order/destinations may lead to inconsistencies");
 } else {
   exitCode = -1;
 }
@@ -366,6 +371,10 @@ public class RouterAdmin extends Configured implements 
Tool {
 e.printStackTrace();
 debugException = ex;
   }
+} catch (IOException ioe) {
+  exitCode = -1;
+  System.err.println(cmd.substring(1) + ": " + ioe.getLocalizedMessage());
+  printUsage(cmd);
 } catch (Exception e) {
   exitCode = -1;
   debugException = e;
@@ -473,17 +482,7 @@ public class RouterAdmin extends Configured implements 
Tool {
 mount = normalizeFileSystemPath(mount);
 // Get the existing entry
 MountTableManager mountTable = client.getMountTableManager();
-GetMountTableEntriesRequest getRequest =
-GetMountTableEntriesRequest.newInstance(mount);
-GetMountTableEntriesResponse getResponse =
-mountTable.getMountTableEntries(getRequest);
-List results = getResponse.getEntries();
-MountTable existingEntry = null;
-for (MountTable result : results) {
-  if (mount.equals(result.getSourcePath())) {
-existingEntry = result;
-  }
-}
+MountTable existingEntry = getMountEntry(mount, mountTable);
 
 if (existingEntry == null) {
   // Create and add the entry if it doesn't exist
@@ -579,100 +578,81 @@ public class RouterAdmin extends Configured implements 
Tool {
* @throws IOException If there is an error.
*/
   public boolean updateMount(String[] parameters, int i) throws IOException {
-// Mandatory parameters
 String mount = parameters[i++];
-String[] nss = parameters[i++].split(",");
-String dest = parameters[i++];
-
-// Optional parameters
-boolean readOnly = false;
-boolean faultTolerant = false;
-String owner = null;
-String group = null;
-FsPermission mode = null;
-DestinationOrder order = null;
-while (i < parameters.length) {
-  if (parameters[i].equals("-readonly")) {
-readOnly = true;
-  } else if (parameters[i].equals("-faulttolerant")) {
-faultTolerant = true;
-  } else if (parameters[i].equals("-order")) {
-i++;
-try {
-  order = DestinationOrder.valueOf(parameters[i]);
-} catch(Exceptio

[hadoop] branch trunk updated: HDDS-1189. Build failing due to rat check failure introduced by HDDS-1189. Contributed by Siddharth Wagle.

2019-04-04 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d2141ef  HDDS-1189. Build failing due to rat check failure introduced 
by HDDS-1189. Contributed by Siddharth Wagle.
d2141ef is described below

commit d2141ef886632d5ce53deb98bfbfb01fc24eff4c
Author: Bharat Viswanadham 
AuthorDate: Thu Apr 4 17:38:02 2019 -0700

HDDS-1189. Build failing due to rat check failure introduced by HDDS-1189. 
Contributed by Siddharth Wagle.
---
 hadoop-ozone/ozone-recon-codegen/pom.xml | 13 +
 1 file changed, 13 insertions(+)

diff --git a/hadoop-ozone/ozone-recon-codegen/pom.xml 
b/hadoop-ozone/ozone-recon-codegen/pom.xml
index 336fc1a..0854aac 100644
--- a/hadoop-ozone/ozone-recon-codegen/pom.xml
+++ b/hadoop-ozone/ozone-recon-codegen/pom.xml
@@ -1,4 +1,17 @@
 
+
 http://maven.apache.org/POM/4.0.0";
  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1333. OzoneFileSystem can't work with spark/hadoop2.7 because incompatible security classes. Contributed by Elek, Marton. (#653)

2019-04-04 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 7ec6a31  HDDS-1333. OzoneFileSystem can't work with spark/hadoop2.7 
because incompatible security classes. Contributed by Elek, Marton. (#653)
7ec6a31 is described below

commit 7ec6a31eb314b9ecc1c2affbcc7d63c07bd33523
Author: Elek, Márton 
AuthorDate: Thu Apr 4 23:00:54 2019 +0200

HDDS-1333. OzoneFileSystem can't work with spark/hadoop2.7 because 
incompatible security classes. Contributed by Elek, Marton. (#653)
---
 hadoop-hdds/docs/content/SparkOzoneFSK8S.md|   6 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java   |   2 +-
 .../dist/dev-support/bin/dist-layout-stitching |   3 +-
 hadoop-ozone/dist/pom.xml  |  22 +
 .../src/main/compose/ozonefs/docker-compose.yaml   |  40 +-
 .../dist/src/main/compose/ozonefs/hadoopo3fs.robot |  56 ++
 .../src/main/compose/ozonesecure/docker-config |   2 +
 .../dist/src/main/smoketest/commonlib.robot|   6 +
 .../dist/src/main/smoketest/createbucketenv.robot  |  43 +
 .../src/main/smoketest/env-compose.robot}  |  18 +-
 ...rImpl.java => BasicOzoneClientAdapterImpl.java} |  89 +-
 ...neFileSystem.java => BasicOzoneFileSystem.java} | 177 ++--
 .../hadoop/fs/ozone/FilteredClassLoader.java   |   1 +
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   2 +-
 .../hadoop/fs/ozone/OzoneClientAdapterFactory.java |  30 +-
 .../hadoop/fs/ozone/OzoneClientAdapterImpl.java| 367 +---
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java| 922 +
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |   2 +-
 .../services/org.apache.hadoop.fs.FileSystem   |   0
 19 files changed, 352 insertions(+), 1436 deletions(-)

diff --git a/hadoop-hdds/docs/content/SparkOzoneFSK8S.md 
b/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
index 3e598d9..fa6cacd 100644
--- a/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
+++ b/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
@@ -78,11 +78,13 @@ And create a custom `core-site.xml`:
 
 
 fs.o3fs.impl
-org.apache.hadoop.fs.ozone.OzoneFileSystem
+org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
 
 
 ```
 
+_Note_: You may also use `org.apache.hadoop.fs.ozone.OzoneFileSystem` without 
the `Basic` prefix. The `Basic` version doesn't support FS statistics and 
encryption zones but can work together with older hadoop versions.
+
 Copy the `ozonefs.jar` file from an ozone distribution (__use the legacy 
version!__)
 
 ```
@@ -134,7 +136,7 @@ Write down the ozone filesystem uri as it should be used 
with the spark-submit c
 
 ```
 kubectl create serviceaccount spark -n yournamespace
-kubectl create clusterrolebinding spark-role --clusterrole=edit 
--serviceaccount=poc:yournamespace --namespace=yournamespace
+kubectl create clusterrolebinding spark-role --clusterrole=edit 
--serviceaccount=yournamespace:spark --namespace=yournamespace
 ```
 ## Execute the job
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index d5baf9d..0e6d483 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -83,7 +83,7 @@ public class OMFailoverProxyProvider implements
   /**
* Class to store proxy information.
*/
-  public final class OMProxyInfo
+  public class OMProxyInfo
   extends FailoverProxyProvider.ProxyInfo {
 private InetSocketAddress address;
 private Text dtService;
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 9a470d5..840abbc 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -114,6 +114,7 @@ run cp 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore
 cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./
 
 #Copy docker compose files
-run cp -p -R "${ROOT}/hadoop-ozone/dist/src/main/compose" .
+#compose files are preprocessed: properties (eg. project.version) are replaced 
first by maven.
+run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" .
 run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" .
 run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/blockade" .
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index 5e9cbf5..a9cc6b7 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -121,6 +121,28 @@
 
   
   
+maven-resources-plugin
+3.1.0
+
+  
+copy-resources
+compile

[hadoop] branch trunk updated: HADOOP-16197 S3AUtils.translateException to map CredentialInitializationException to AccessDeniedException

2019-04-04 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 215ffc7  HADOOP-16197 S3AUtils.translateException to map 
CredentialInitializationException to AccessDeniedException
215ffc7 is described below

commit 215ffc792ef4be599e9434ee64f958744d2508f1
Author: Steve Loughran 
AuthorDate: Thu Apr 4 21:14:18 2019 +0100

HADOOP-16197 S3AUtils.translateException to map 
CredentialInitializationException to AccessDeniedException

Contributed by Steve Loughran.

Change-Id: Ie98ca5210bf0009f297edbcacf1fc6dfe5ea70cd.
---
 .../hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java| 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index f323554..8d204d7 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider;
-import org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException;
 import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.security.ProviderUtils;
@@ -193,7 +192,7 @@ public final class S3AUtils {
 // call considered an sign of connectivity failure
 return (EOFException)new EOFException(message).initCause(exception);
   }
-  if (exception instanceof NoAuthWithAWSException) {
+  if (exception instanceof CredentialInitializationException) {
 // the exception raised by AWSCredentialProvider list if the
 // credentials were not accepted.
 return (AccessDeniedException)new AccessDeniedException(path, null,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16208. Do Not Log InterruptedException in Client.

2019-04-04 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 41f1bc2  HADOOP-16208. Do Not Log InterruptedException in Client.
41f1bc2 is described below

commit 41f1bc20566584dfd97f7d82d7f46e96aa11f3ce
Author: David Mollitor 
AuthorDate: Thu Apr 4 21:13:09 2019 +0100

HADOOP-16208. Do Not Log InterruptedException in Client.

Contributed by David Mollitor.

(cherry picked from commit c90736350ba158c7872a39426e7a29c5e5e0bb48)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 07a2f13..c6ac732 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1409,10 +1409,12 @@ public class Client implements AutoCloseable {
 connection.sendRpcRequest(call); // send the rpc 
request
   } catch (RejectedExecutionException e) {
 throw new IOException("connection has been closed", e);
-  } catch (InterruptedException e) {
+  } catch (InterruptedException ie) {
 Thread.currentThread().interrupt();
-LOG.warn("interrupted waiting to send rpc request to server", e);
-throw new IOException(e);
+IOException ioe = new InterruptedIOException(
+"Interrupted waiting to send RPC request to server");
+ioe.initCause(ie);
+throw ioe;
   }
 } catch(Exception e) {
   if (isAsynchronousMode()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9441. Updated YARN app catalog name for consistency. Contributed by Weiwei Yang

2019-04-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2bf82ae  YARN-9441.  Updated YARN app catalog name for consistency.
 Contributed by Weiwei Yang
2bf82ae is described below

commit 2bf82aee821a1737aa70feb24481454e626abb8c
Author: Eric Yang 
AuthorDate: Thu Apr 4 13:21:30 2019 -0400

YARN-9441.  Updated YARN app catalog name for consistency.
Contributed by Weiwei Yang
---
 .../hadoop-yarn-applications-catalog-docker/pom.xml | 2 +-
 .../hadoop-yarn-applications-catalog-webapp/pom.xml | 2 +-
 .../hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
index 7c0d3ef..c7129f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml
@@ -26,7 +26,7 @@
 3.3.0-SNAPSHOT
   
 
-  YARN Application Catalog Docker Image
+  Apache Hadoop YARN Application Catalog Docker Image
   http://maven.apache.org
   
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
index 35beb5c..58646bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml
@@ -26,7 +26,7 @@
 3.3.0-SNAPSHOT
   
 
-  YARN Application Catalog Webapp
+  Apache Hadoop YARN Application Catalog Webapp
 
   http://hadoop.apache.org
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
index 770bf24..da395b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml
@@ -26,7 +26,7 @@
 hadoop-yarn-applications-catalog
 pom
 
-YARN Application Catalog
+Apache Hadoop YARN Application Catalog
 
 http://hadoop.apache.org
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9396. Fixed duplicated RM Container created event to ATS. Contributed by Prabhu Joseph

2019-04-04 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8d15006  YARN-9396.  Fixed duplicated RM Container created event to 
ATS. Contributed by Prabhu Joseph
8d15006 is described below

commit 8d150067e2e4d6c15b319d250e2e1b804066b6b6
Author: Eric Yang 
AuthorDate: Thu Apr 4 13:01:56 2019 -0400

YARN-9396.  Fixed duplicated RM Container created event to ATS.
Contributed by Prabhu Joseph
---
 .../yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java| 5 -
 .../server/resourcemanager/rmcontainer/TestRMContainerImpl.java | 6 +++---
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 1185170..004c170 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -608,7 +608,10 @@ public class RMContainerImpl implements RMContainer {
   container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
   .getApplicationAttemptId().getApplicationId(), container.nodeId));
 
-  publishNonAMContainerEventstoATS(container);
+  // Opportunistic containers move directly from NEW to ACQUIRED
+  if (container.getState() == RMContainerState.NEW) {
+publishNonAMContainerEventstoATS(container);
+  }
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 2dfbf20..256bd94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -150,7 +150,7 @@ public class TestRMContainerImpl {
 RMContainerEventType.LAUNCHED));
 drainDispatcher.await();
 assertEquals(RMContainerState.RUNNING, rmContainer.getState());
-verify(publisher, times(2)).containerCreated(any(RMContainer.class),
+verify(publisher, times(1)).containerCreated(any(RMContainer.class),
 anyLong());
 
assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_01/user";,
 rmContainer.getLogURL());
@@ -253,7 +253,7 @@ public class TestRMContainerImpl {
 RMContainerEventType.ACQUIRED));
 drainDispatcher.await();
 assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
-verify(publisher, times(2)).containerCreated(any(RMContainer.class),
+verify(publisher, times(1)).containerCreated(any(RMContainer.class),
 anyLong());
 
 rmContainer.handle(new RMContainerEvent(containerId,
@@ -345,7 +345,7 @@ public class TestRMContainerImpl {
 // RMContainer should be publishing system metrics for all containers.
 // Since there is 1 AM container and 1 non-AM container, there should be 2
 // container created events and 2 container finished events.
-verify(publisher, times(4)).containerCreated(any(RMContainer.class),
+verify(publisher, times(2)).containerCreated(any(RMContainer.class),
 anyLong());
 verify(publisher, times(2)).containerFinished(any(RMContainer.class), 
anyLong());
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16208. Do Not Log InterruptedException in Client.

2019-04-04 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c907363  HADOOP-16208. Do Not Log InterruptedException in Client.
c907363 is described below

commit c90736350ba158c7872a39426e7a29c5e5e0bb48
Author: David Mollitor 
AuthorDate: Thu Apr 4 16:15:57 2019 +0100

HADOOP-16208. Do Not Log InterruptedException in Client.

Contributed by David Mollitor.
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 0121967..3afa6d8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1445,10 +1445,12 @@ public class Client implements AutoCloseable {
 connection.sendRpcRequest(call); // send the rpc 
request
   } catch (RejectedExecutionException e) {
 throw new IOException("connection has been closed", e);
-  } catch (InterruptedException e) {
+  } catch (InterruptedException ie) {
 Thread.currentThread().interrupt();
-LOG.warn("interrupted waiting to send rpc request to server", e);
-throw new IOException(e);
+IOException ioe = new InterruptedIOException(
+"Interrupted waiting to send RPC request to server");
+ioe.initCause(ie);
+throw ioe;
   }
 } catch(Exception e) {
   if (isAsynchronousMode()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16208. Do Not Log InterruptedException in Client.

2019-04-04 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 50fc957  HADOOP-16208. Do Not Log InterruptedException in Client.
50fc957 is described below

commit 50fc9577c0296d6c89cac7608bf3689f00b17370
Author: David Mollitor 
AuthorDate: Thu Apr 4 16:16:57 2019 +0100

HADOOP-16208. Do Not Log InterruptedException in Client.

Contributed by David Mollitor.

(cherry picked from commit c90736350ba158c7872a39426e7a29c5e5e0bb48)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 07a2f13..c6ac732 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1409,10 +1409,12 @@ public class Client implements AutoCloseable {
 connection.sendRpcRequest(call); // send the rpc 
request
   } catch (RejectedExecutionException e) {
 throw new IOException("connection has been closed", e);
-  } catch (InterruptedException e) {
+  } catch (InterruptedException ie) {
 Thread.currentThread().interrupt();
-LOG.warn("interrupted waiting to send rpc request to server", e);
-throw new IOException(e);
+IOException ioe = new InterruptedIOException(
+"Interrupted waiting to send RPC request to server");
+ioe.initCause(ie);
+throw ioe;
   }
 } catch(Exception e) {
   if (isAsynchronousMode()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1353 : Metrics scm_pipeline_metrics_num_pipeline_creation_failed keeps increasing because of BackgroundPipelineCreator. (#681)

2019-04-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 993f36e  HDDS-1353 : Metrics 
scm_pipeline_metrics_num_pipeline_creation_failed keeps increasing because of 
BackgroundPipelineCreator. (#681)
993f36e is described below

commit 993f36ee3341a32945ee54644fe8cc76065135a6
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Thu Apr 4 06:48:32 2019 -0700

HDDS-1353 : Metrics scm_pipeline_metrics_num_pipeline_creation_failed keeps 
increasing because of BackgroundPipelineCreator. (#681)
---
 .../pipeline/InsufficientDatanodesException.java   | 36 +
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  2 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java  |  2 +
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  | 61 ++
 4 files changed, 100 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java
new file mode 100644
index 000..a6a5a69
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown when there are not enough Datanodes to create a pipeline.
+ */
+public class InsufficientDatanodesException extends IOException {
+
+
+  public InsufficientDatanodesException() {
+super();
+  }
+
+  public InsufficientDatanodesException(String message) {
+super(message);
+  }
+}
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 6952200..6563e3f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -105,7 +105,7 @@ public class RatisPipelineProvider implements 
PipelineProvider {
   String e = String
   .format("Cannot create pipeline of factor %d using %d nodes.",
   factor.getNumber(), dns.size());
-  throw new IOException(e);
+  throw new InsufficientDatanodesException(e);
 }
 
 Pipeline pipeline = Pipeline.newBuilder()
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index f274829..c72a528 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -152,6 +152,8 @@ public class SCMPipelineManager implements PipelineManager {
   nodeManager.addPipeline(pipeline);
   metrics.incNumPipelineCreated();
   return pipeline;
+} catch (InsufficientDatanodesException idEx) {
+  throw idEx;
 } catch (IOException ex) {
   metrics.incNumPipelineCreationFailed();
   throw ex;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 990d73a..53e968b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.Me

[hadoop] 01/01: upgrade

2019-04-04 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-1383
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 84e9243f1a6ca1ed6104e44e6a22cfcb2a9554c4
Author: Márton Elek 
AuthorDate: Tue Apr 2 12:41:52 2019 +0200

upgrade
---
 hadoop-ozone/common/src/main/bin/ozone |  5 ++
 hadoop-ozone/dist/pom.xml  | 11 +
 hadoop-ozone/pom.xml   |  6 +++
 hadoop-ozone/upgrade/pom.xml   | 54 ++
 .../org/apache/hadoop/ozone/upgrade/Balance.java   | 38 +++
 .../org/apache/hadoop/ozone/upgrade/Execute.java   | 37 +++
 .../hadoop/ozone/upgrade/InPlaceUpgrade.java   | 39 
 .../java/org/apache/hadoop/ozone/upgrade/Plan.java | 38 +++
 8 files changed, 228 insertions(+)

diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index a6ad1eb..26906c4 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -52,6 +52,7 @@ function hadoop_usage
   hadoop_add_subcommand "s3" client "command line interface for s3 related 
operations"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "dtutil" client "operations related to delegation 
tokens"
+  hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -174,6 +175,10 @@ function ozonecmd_case
   HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
+upgrade)
+  HADOOP_CLASSNAME=org.apache.hadoop.ozone.upgrade.InPlaceUpgrade
+  OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-upgrade"
+;;
 *)
   HADOOP_CLASSNAME="${subcmd}"
   if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index dc9aab2..4b8dcf0 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -110,6 +110,13 @@
   classpath
   hadoop-ozone-recon.classpath
 
+
+  org.apache.hadoop
+  hadoop-ozone-upgrade
+  ${ozone.version}
+  classpath
+  hadoop-ozone-upgrade.classpath
+
   
 
   
@@ -233,5 +240,9 @@
   org.apache.hadoop
   hadoop-hdds-docs
 
+
+  org.apache.hadoop
+  hadoop-ozone-upgrade
+
   
 
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 7010878..c24e511 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -50,6 +50,7 @@
 dist
 ozone-recon
 ozone-recon-codegen
+upgrade
   
 
   
@@ -167,6 +168,11 @@
   
   
 org.apache.hadoop
+hadoop-ozone-upgrade
+${ozone.version}
+  
+  
+org.apache.hadoop
 hadoop-hdds-container-service
 ${hdds.version}
 test-jar
diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml
new file mode 100644
index 000..072f2b1
--- /dev/null
+++ b/hadoop-ozone/upgrade/pom.xml
@@ -0,0 +1,54 @@
+
+
+http://maven.apache.org/POM/4.0.0";
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-ozone
+0.5.0-SNAPSHOT
+  
+  hadoop-ozone-upgrade
+  0.5.0-SNAPSHOT
+  Apache Hadoop Ozone In-Place Upgrade
+  Apache Hadoop Ozone In-Place Upgrade
+  jar
+
+  
+
+  org.apache.hadoop
+  hadoop-hdds-common
+
+
+  com.google.code.findbugs
+  findbugs
+  3.0.1
+  provided
+
+
+  junit
+  junit
+  test
+
+
+  org.mockito
+  mockito-core
+  2.15.0
+  test
+
+  
+
diff --git 
a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java
 
b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java
new file mode 100644
index 000..2e034d8
--- /dev/null
+++ 
b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,

[hadoop] branch HDDS-1383 created (now 84e9243)

2019-04-04 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch HDDS-1383
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 84e9243  upgrade

This branch includes the following new commits:

 new 84e9243  upgrade

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14389. getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

2019-04-04 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 388f445  HDFS-14389. getAclStatus returns incorrect permissions and 
owner when an iNodeAttributeProvider is configured. Contributed by Stephen 
O'Donnell.
388f445 is described below

commit 388f445dde577999b2d81f809adcfca8f0958499
Author: Stephen O'Donnell 
AuthorDate: Thu Apr 4 04:04:08 2019 -0700

HDFS-14389. getAclStatus returns incorrect permissions and owner when an 
iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit c528e427aa6745434672b1c1850738795ad1d6d2)
---
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java|  9 +++---
 .../namenode/TestINodeAttributeProvider.java   | 33 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 8d77f89..31dc51a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -163,12 +163,11 @@ class FSDirAclOp {
   if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
 return new AclStatus.Builder().owner("").group("").build();
   }
-  INode inode = FSDirectory.resolveLastINode(iip);
-  int snapshotId = iip.getPathSnapshotId();
-  List acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
-  FsPermission fsPermission = inode.getFsPermission(snapshotId);
+  INodeAttributes inodeAttrs = fsd.getAttributes(iip);
+  List acl = AclStorage.readINodeAcl(inodeAttrs);
+  FsPermission fsPermission = inodeAttrs.getFsPermission();
   return new AclStatus.Builder()
-  .owner(inode.getUserName()).group(inode.getGroupName())
+  .owner(inodeAttrs.getUserName()).group(inodeAttrs.getGroupName())
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index b3bab06..788ee30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -405,4 +405,37 @@ public class TestINodeAttributeProvider {
   return null;
 });
   }
+
+  @Test
+  // HDFS-14389 - Ensure getAclStatus returns the owner, group and permissions
+  // from the Attribute Provider, and not from HDFS.
+  public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+final Path userPath = new Path("/user");
+final Path authz = new Path("/user/authz");
+final Path authzChild = new Path("/user/authz/child2");
+
+fs.mkdirs(userPath);
+fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authz);
+fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authzChild);
+fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
+UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
+new String[]{"g1"});
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getFileStatus(authzChild).getPermission().toShort());
+
+Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
+Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getAclStatus(authzChild).getPermission().toShort());
+return null;
+  }
+});
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14389. getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

2019-04-04 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new d989901  HDFS-14389. getAclStatus returns incorrect permissions and 
owner when an iNodeAttributeProvider is configured. Contributed by Stephen 
O'Donnell.
d989901 is described below

commit d9899015ebf8a27e9ac339d8a8b3c9d88bcbacb9
Author: Stephen O'Donnell 
AuthorDate: Thu Apr 4 04:04:08 2019 -0700

HDFS-14389. getAclStatus returns incorrect permissions and owner when an 
iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit c528e427aa6745434672b1c1850738795ad1d6d2)
(cherry picked from commit 388f445dde577999b2d81f809adcfca8f0958499)
---
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java|  9 +++---
 .../namenode/TestINodeAttributeProvider.java   | 33 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..917708b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -153,12 +153,11 @@ class FSDirAclOp {
   if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
 return new AclStatus.Builder().owner("").group("").build();
   }
-  INode inode = FSDirectory.resolveLastINode(iip);
-  int snapshotId = iip.getPathSnapshotId();
-  List acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
-  FsPermission fsPermission = inode.getFsPermission(snapshotId);
+  INodeAttributes inodeAttrs = fsd.getAttributes(iip);
+  List acl = AclStorage.readINodeAcl(inodeAttrs);
+  FsPermission fsPermission = inodeAttrs.getFsPermission();
   return new AclStatus.Builder()
-  .owner(inode.getUserName()).group(inode.getGroupName())
+  .owner(inodeAttrs.getUserName()).group(inodeAttrs.getGroupName())
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index b3bab06..788ee30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -405,4 +405,37 @@ public class TestINodeAttributeProvider {
   return null;
 });
   }
+
+  @Test
+  // HDFS-14389 - Ensure getAclStatus returns the owner, group and permissions
+  // from the Attribute Provider, and not from HDFS.
+  public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+final Path userPath = new Path("/user");
+final Path authz = new Path("/user/authz");
+final Path authzChild = new Path("/user/authz/child2");
+
+fs.mkdirs(userPath);
+fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authz);
+fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authzChild);
+fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
+UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
+new String[]{"g1"});
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getFileStatus(authzChild).getPermission().toShort());
+
+Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
+Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getAclStatus(authzChild).getPermission().toShort());
+return null;
+  }
+});
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.0 updated: HDFS-14389. getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

2019-04-04 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 79714fd  HDFS-14389. getAclStatus returns incorrect permissions and 
owner when an iNodeAttributeProvider is configured. Contributed by Stephen 
O'Donnell.
79714fd is described below

commit 79714fdfabebb1216c2675bdaf4a386da5683454
Author: Stephen O'Donnell 
AuthorDate: Thu Apr 4 04:04:08 2019 -0700

HDFS-14389. getAclStatus returns incorrect permissions and owner when an 
iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit c528e427aa6745434672b1c1850738795ad1d6d2)
(cherry picked from commit 388f445dde577999b2d81f809adcfca8f0958499)
(cherry picked from commit d9899015ebf8a27e9ac339d8a8b3c9d88bcbacb9)
---
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java|  9 +++---
 .../namenode/TestINodeAttributeProvider.java   | 33 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..917708b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -153,12 +153,11 @@ class FSDirAclOp {
   if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
 return new AclStatus.Builder().owner("").group("").build();
   }
-  INode inode = FSDirectory.resolveLastINode(iip);
-  int snapshotId = iip.getPathSnapshotId();
-  List acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
-  FsPermission fsPermission = inode.getFsPermission(snapshotId);
+  INodeAttributes inodeAttrs = fsd.getAttributes(iip);
+  List acl = AclStorage.readINodeAcl(inodeAttrs);
+  FsPermission fsPermission = inodeAttrs.getFsPermission();
   return new AclStatus.Builder()
-  .owner(inode.getUserName()).group(inode.getGroupName())
+  .owner(inodeAttrs.getUserName()).group(inodeAttrs.getGroupName())
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index b3bab06..788ee30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -405,4 +405,37 @@ public class TestINodeAttributeProvider {
   return null;
 });
   }
+
+  @Test
+  // HDFS-14389 - Ensure getAclStatus returns the owner, group and permissions
+  // from the Attribute Provider, and not from HDFS.
+  public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+final Path userPath = new Path("/user");
+final Path authz = new Path("/user/authz");
+final Path authzChild = new Path("/user/authz/child2");
+
+fs.mkdirs(userPath);
+fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authz);
+fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authzChild);
+fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
+UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
+new String[]{"g1"});
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getFileStatus(authzChild).getPermission().toShort());
+
+Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
+Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getAclStatus(authzChild).getPermission().toShort());
+return null;
+  }
+});
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14389. getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

2019-04-04 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d2637cb  HDFS-14389. getAclStatus returns incorrect permissions and 
owner when an iNodeAttributeProvider is configured. Contributed by Stephen 
O'Donnell.
d2637cb is described below

commit d2637cb1766674afb4a3d4627396f8981eaf9964
Author: Stephen O'Donnell 
AuthorDate: Thu Apr 4 04:04:08 2019 -0700

HDFS-14389. getAclStatus returns incorrect permissions and owner when an 
iNodeAttributeProvider is configured. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
---
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java|  9 +++---
 .../namenode/TestINodeAttributeProvider.java   | 33 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 8d77f89..31dc51a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -163,12 +163,11 @@ class FSDirAclOp {
   if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
 return new AclStatus.Builder().owner("").group("").build();
   }
-  INode inode = FSDirectory.resolveLastINode(iip);
-  int snapshotId = iip.getPathSnapshotId();
-  List acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
-  FsPermission fsPermission = inode.getFsPermission(snapshotId);
+  INodeAttributes inodeAttrs = fsd.getAttributes(iip);
+  List acl = AclStorage.readINodeAcl(inodeAttrs);
+  FsPermission fsPermission = inodeAttrs.getFsPermission();
   return new AclStatus.Builder()
-  .owner(inode.getUserName()).group(inode.getGroupName())
+  .owner(inodeAttrs.getUserName()).group(inodeAttrs.getGroupName())
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index b3bab06..788ee30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -405,4 +405,37 @@ public class TestINodeAttributeProvider {
   return null;
 });
   }
+
+  @Test
+  // HDFS-14389 - Ensure getAclStatus returns the owner, group and permissions
+  // from the Attribute Provider, and not from HDFS.
+  public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+final Path userPath = new Path("/user");
+final Path authz = new Path("/user/authz");
+final Path authzChild = new Path("/user/authz/child2");
+
+fs.mkdirs(userPath);
+fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authz);
+fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
+fs.mkdirs(authzChild);
+fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
+UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
+new String[]{"g1"});
+ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getFileStatus(authzChild).getPermission().toShort());
+
+Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
+Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
+Assert.assertEquals(PROVIDER_PERMISSION,
+fs.getAclStatus(authzChild).getPermission().toShort());
+return null;
+  }
+});
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1349. Remove watchClient from XceiverClientRatis. Contributed by Shashikant Banerjee.

2019-04-04 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 33e159b  HDDS-1349. Remove watchClient from XceiverClientRatis. 
Contributed by Shashikant Banerjee.
33e159b is described below

commit 33e159bf8de8781f6fd0bed3b6e0cfb2ed32c29d
Author: Shashikant Banerjee 
AuthorDate: Thu Apr 4 16:29:38 2019 +0530

HDDS-1349. Remove watchClient from XceiverClientRatis. Contributed by 
Shashikant Banerjee.
---
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java | 28 ++
 1 file changed, 2 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index a2e65e2..899bba8 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -100,9 +100,6 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   // Map to track commit index at every server
   private final ConcurrentHashMap commitInfoMap;
 
-  // create a separate RaftClient for watchForCommit API
-  private RaftClient watchClient;
-
   private XceiverClientMetrics metrics;
 
   /**
@@ -117,7 +114,6 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 this.maxOutstandingRequests = maxOutStandingChunks;
 this.retryPolicy = retryPolicy;
 commitInfoMap = new ConcurrentHashMap<>();
-watchClient = null;
 this.tlsConfig = tlsConfig;
 this.clientRequestTimeout = timeout;
 metrics = XceiverClientManager.getXceiverClientMetrics();
@@ -187,9 +183,6 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 if (c != null) {
   closeRaftClient(c);
 }
-if (watchClient != null) {
-  closeRaftClient(watchClient);
-}
   }
 
   private void closeRaftClient(RaftClient raftClient) {
@@ -255,31 +248,14 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   return clientReply;
 }
 LOG.debug("commit index : {} watch timeout : {}", index, timeout);
-// create a new RaftClient instance for watch request
-if (watchClient == null) {
-  watchClient =
-  RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-  maxOutstandingRequests, tlsConfig, clientRequestTimeout);
-}
-CompletableFuture replyFuture = watchClient
+CompletableFuture replyFuture = getClient()
 .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
 RaftClientReply reply;
 try {
   replyFuture.get(timeout, TimeUnit.MILLISECONDS);
 } catch (TimeoutException toe) {
   LOG.warn("3 way commit failed ", toe);
-
-  closeRaftClient(watchClient);
-  // generate a new raft client instance again so that next watch request
-  // does not get blocked for the previous one
-
-  // TODO : need to remove the code to create the new RaftClient instance
-  // here once the watch request bypassing sliding window in Raft Client
-  // gets fixed.
-  watchClient =
-  RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-  maxOutstandingRequests, tlsConfig, clientRequestTimeout);
-  reply = watchClient
+  reply = getClient()
   .sendWatchAsync(index, 
RaftProtos.ReplicationLevel.MAJORITY_COMMITTED)
   .get(timeout, TimeUnit.MILLISECONDS);
   List commitInfoProtoList =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1207. Refactor Container Report Processing logic and plugin new Replication Manager. (#662)

2019-04-04 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 48a58bc  HDDS-1207. Refactor Container Report Processing logic and 
plugin new Replication Manager. (#662)
48a58bc is described below

commit 48a58bce37dfddf37a4a6888228f7c7fc80bccdd
Author: Nanda kumar 
AuthorDate: Thu Apr 4 16:32:59 2019 +0530

HDDS-1207. Refactor Container Report Processing logic and plugin new 
Replication Manager. (#662)
---
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   7 -
 .../hdds/scm/chillmode/ChillModeHandler.java   |  36 +-
 .../container/AbstractContainerReportHandler.java  | 236 +
 .../hdds/scm/container/ContainerReportHandler.java | 234 -
 .../hdds/scm/container/ContainerStateManager.java  |  19 +-
 .../IncrementalContainerReportHandler.java |  47 +-
 .../hdds/scm/container/ReplicationManager.java |   9 +
 .../hdds/scm/container/ReportHandlerHelper.java| 365 -
 .../hdds/scm/container/SCMContainerManager.java|  23 +-
 .../scm/container/states/ContainerStateMap.java|   8 +-
 .../hadoop/hdds/scm/server/SCMConfigurator.java|   2 +-
 .../hdds/scm/server/StorageContainerManager.java   |  37 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |   1 +
 .../hdds/scm/chillmode/TestChillModeHandler.java   |  28 +-
 .../scm/container/TestContainerReportHandler.java  | 585 -
 .../scm/container/TestContainerReportHelper.java   |  73 ---
 .../TestIncrementalContainerReportHandler.java | 158 +++---
 .../replication/TestReplicationActivityStatus.java |  85 ---
 .../scm/server/TestSCMClientProtocolServer.java|   8 +-
 19 files changed, 723 insertions(+), 1238 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 05d4e77..7b5c467 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -108,13 +108,6 @@ public class ContainerInfo implements 
Comparator,
 this.replicationType = repType;
   }
 
-  public ContainerInfo(ContainerInfo info) {
-this(info.getContainerID(), info.getState(), info.getPipelineID(),
-info.getUsedBytes(), info.getNumberOfKeys(),
-info.getStateEnterTime(), info.getOwner(),
-info.getDeleteTransactionId(), info.getSequenceId(),
-info.getReplicationFactor(), info.getReplicationType());
-  }
   /**
* Needed for serialization findbugs.
*/
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
index 95e0d93..fff1fb2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeHandler.java
@@ -20,8 +20,7 @@ package org.apache.hadoop.hdds.scm.chillmode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.container.replication.
-ReplicationActivityStatus;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import 
org.apache.hadoop.hdds.scm.chillmode.SCMChillModeManager.ChillModeStatus;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -41,7 +40,7 @@ public class ChillModeHandler implements 
EventHandler {
   private final BlockManager scmBlockManager;
   private final long waitTime;
   private final AtomicBoolean isInChillMode = new AtomicBoolean(true);
-  private final ReplicationActivityStatus replicationActivityStatus;
+  private final ReplicationManager replicationManager;
 
 
   /**
@@ -49,27 +48,27 @@ public class ChillModeHandler implements 
EventHandler {
* @param configuration
* @param clientProtocolServer
* @param blockManager
-   * @param replicationStatus
+   * @param replicationManager
*/
   public ChillModeHandler(Configuration configuration,
   SCMClientProtocolServer clientProtocolServer,
   BlockManager blockManager,
-  ReplicationActivityStatus replicationStatus) {
+  ReplicationManager replicationManager) {
 Objects.requireNonNull(configuration, "Configuration cannot be null");
 Objects.requireNonNull(clientProtocolServer, "SCMClientProtocolServer " +
 "object cannot be null");
 Objects.requireNonNull(blockManager, "BlockManager object cannot be null");
-Objects.requireNonNull(replicationStatus, "Replic

[hadoop] branch branch-3.2 updated: YARN-9394. Use new API of RackResolver to get better performance. Contributed by Lantao Jin.

2019-04-04 Thread wwei
This is an automated email from the ASF dual-hosted git repository.

wwei pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 06cf8ce  YARN-9394. Use new API of RackResolver to get better 
performance. Contributed by Lantao Jin.
06cf8ce is described below

commit 06cf8ce30fe764d1fe6f57c858c7879e91ca8d1a
Author: Weiwei Yang 
AuthorDate: Thu Apr 4 18:05:01 2019 +0800

YARN-9394. Use new API of RackResolver to get better performance. 
Contributed by Lantao Jin.

(cherry picked from commit 945e8c60640ceb938ad8d27767d44eec53a15038)
---
 .../org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java | 7 ---
 .../yarn/client/api/impl/TestAMRMClientContainerRequest.java   | 4 ++--
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index c05f7ac..84d67b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -763,10 +764,11 @@ public class AMRMClientImpl 
extends AMRMClient {
   private Set resolveRacks(List nodes) {
 Set racks = new HashSet();
 if (nodes != null) {
-  for (String node : nodes) {
+  List tmpList = RackResolver.resolve(nodes);
+  for (Node node : tmpList) {
+String rack = node.getNetworkLocation();
 // Ensure node requests are accompanied by requests for
 // corresponding rack
-String rack = RackResolver.resolve(node).getNetworkLocation();
 if (rack == null) {
   LOG.warn("Failed to resolve rack for node " + node + ".");
 } else {
@@ -774,7 +776,6 @@ public class AMRMClientImpl 
extends AMRMClient {
 }
   }
 }
-
 return racks;
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
index 595c3f7..5f77552 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
 
-import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -254,7 +254,7 @@ public class TestAMRMClientContainerRequest {
 
 @Override
 public List resolve(List names) {
-  return Arrays.asList("/rack1");
+  return Collections.nCopies(names.size(), "/rack1");
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9394. Use new API of RackResolver to get better performance. Contributed by Lantao Jin.

2019-04-04 Thread wwei
This is an automated email from the ASF dual-hosted git repository.

wwei pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 945e8c6  YARN-9394. Use new API of RackResolver to get better 
performance. Contributed by Lantao Jin.
945e8c6 is described below

commit 945e8c60640ceb938ad8d27767d44eec53a15038
Author: Weiwei Yang 
AuthorDate: Thu Apr 4 18:05:01 2019 +0800

YARN-9394. Use new API of RackResolver to get better performance. 
Contributed by Lantao Jin.
---
 .../org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java | 7 ---
 .../yarn/client/api/impl/TestAMRMClientContainerRequest.java   | 4 ++--
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index c05f7ac..84d67b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -763,10 +764,11 @@ public class AMRMClientImpl 
extends AMRMClient {
   private Set resolveRacks(List nodes) {
 Set racks = new HashSet();
 if (nodes != null) {
-  for (String node : nodes) {
+  List tmpList = RackResolver.resolve(nodes);
+  for (Node node : tmpList) {
+String rack = node.getNetworkLocation();
 // Ensure node requests are accompanied by requests for
 // corresponding rack
-String rack = RackResolver.resolve(node).getNetworkLocation();
 if (rack == null) {
   LOG.warn("Failed to resolve rack for node " + node + ".");
 } else {
@@ -774,7 +776,6 @@ public class AMRMClientImpl 
extends AMRMClient {
 }
   }
 }
-
 return racks;
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
index 595c3f7..5f77552 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
 
-import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -254,7 +254,7 @@ public class TestAMRMClientContainerRequest {
 
 @Override
 public List resolve(List names) {
-  return Arrays.asList("/rack1");
+  return Collections.nCopies(names.size(), "/rack1");
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1189. Recon Aggregate DB schema and ORM. Contributed by Siddharth Wagle.

2019-04-04 Thread yqlin
This is an automated email from the ASF dual-hosted git repository.

yqlin pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a92806d  HDDS-1189. Recon Aggregate DB schema and ORM. Contributed by 
Siddharth Wagle.
a92806d is described below

commit a92806d05a2eb1f586463fa07aa2f17ce9180401
Author: Yiqun Lin 
AuthorDate: Thu Apr 4 17:33:37 2019 +0800

HDDS-1189. Recon Aggregate DB schema and ORM. Contributed by Siddharth 
Wagle.
---
 .../common/src/main/resources/ozone-default.xml|  95 
 hadoop-hdds/tools/pom.xml  |   3 +-
 hadoop-ozone/ozone-recon-codegen/pom.xml   |  58 +++
 .../ozone/recon/codegen/JooqCodeGenerator.java | 170 +
 .../recon/codegen/ReconSchemaGenerationModule.java |  39 +
 .../ozone/recon/codegen/TableNamingStrategy.java   |  48 ++
 .../hadoop/ozone/recon/codegen/package-info.java   |  22 +++
 .../ozone/recon/schema/ReconSchemaDefinition.java  |  34 +
 .../recon/schema/UtilizationSchemaDefinition.java  |  69 +
 .../hadoop/ozone/recon/schema/package-info.java|  22 +++
 .../dev-support/findbugsExcludeFile.xml|  28 
 hadoop-ozone/ozone-recon/pom.xml   | 147 ++
 .../hadoop/ozone/recon/ReconControllerModule.java  | 102 -
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |  25 +++
 .../recon/persistence/DataSourceConfiguration.java |  86 +++
 .../persistence/DefaultDataSourceProvider.java |  74 +
 .../recon/persistence/JooqPersistenceModule.java   | 111 ++
 .../TransactionalMethodInterceptor.java|  76 +
 .../ozone/recon/persistence/package-info.java  |  22 +++
 .../recon/persistence/AbstractSqlDatabaseTest.java | 146 ++
 .../TestUtilizationSchemaDefinition.java   | 160 +++
 .../ozone/recon/persistence/package-info.java  |  22 +++
 hadoop-ozone/pom.xml   |   1 +
 23 files changed, 1524 insertions(+), 36 deletions(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5580548..731bf28 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2394,4 +2394,99 @@
   If enabled, tracing information is sent to tracing server.
 
   
+  
+ozone.recon.sql.db.driver
+org.sqlite.JDBC
+OZONE, RECON
+
+  Database driver class name available on the
+  Ozone Recon classpath.
+
+  
+  
+ozone.recon.sql.db.jdbc.url
+jdbc:sqlite:/${ozone.recon.db.dir}/ozone_recon_sqlite.db
+OZONE, RECON
+
+  Ozone Recon SQL database jdbc url.
+
+  
+  
+ozone.recon.sql.db.username
+
+OZONE, RECON
+
+  Ozone Recon SQL database username.
+
+  
+  
+ozone.recon.sql.db.password
+
+OZONE, RECON
+
+  Ozone Recon datbase password.
+
+  
+  
+ozone.recon.sql.db.auto.commit
+false
+OZONE, RECON
+
+  Sets the Ozone Recon database connection property of auto-commit to
+  true/false.
+
+  
+  
+ozone.recon.sql.db.conn.timeout
+3
+OZONE, RECON
+
+  Sets time in milliseconds before call to getConnection is timed out.
+
+  
+  
+ozone.recon.sql.db.conn.max.active
+1
+OZONE, RECON
+
+  The max active connections to the SQL database. The default SQLite
+  database only allows single active connection, set this to a
+  resonable value like 10, for external production database.
+
+  
+  
+ozone.recon.sql.db.conn.max.age
+1800
+OZONE, RECON
+
+  Sets maximum time a connection can be active in seconds.
+
+  
+  
+ozone.recon.sql.db.conn.idle.max.age
+3600
+OZONE, RECON
+
+  Sets maximum time to live for idle connection in seconds.
+
+  
+  
+ozone.recon.sql.db.conn.idle.test.period
+60
+OZONE, RECON
+
+  This sets the time (in seconds), for a connection to remain idle before
+  sending a test query to the DB. This is useful to prevent a DB from
+  timing out connections on its end.
+
+  
+  
+ozone.recon.sql.db.conn.idle.test
+SELECT 1
+OZONE, RECON
+
+  The query to send to the DB to maintain keep-alives and test for dead
+  connections.
+
+  
 
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 0e39330..689bca7 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -49,9 +49,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.xerial
   sqlite-jdbc
-  3.8.7
+  3.25.2
 
 
-
   
 
diff --git a/hadoop-ozone/ozone-recon-codegen/pom.xml 
b/hadoop-ozone/ozone-recon-codegen/pom.xml
new file mode 100644
index 000..336fc