[hadoop] branch trunk updated (2ffec34 -> 6f0190d)

2019-11-04 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 2ffec34  HDFS-14946. Erasure Coding: Block recovery failed during 
decommissioning. Contributed by Fei Hui.
 add 6f0190d  HADOOP-16678: Review of ArrayWritable (#1692)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/io/ArrayWritable.java   | 30 ++
 .../org/apache/hadoop/io/TestArrayWritable.java| 21 +--
 2 files changed, 25 insertions(+), 26 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2245. Use dynamic ports for SCM in TestSecureOzoneCluster Contributed by kevin su.

2019-10-07 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4fdf016  HDDS-2245. Use dynamic ports for SCM in 
TestSecureOzoneCluster Contributed by kevin su.
4fdf016 is described below

commit 4fdf01635835a1b8f1107a50c112a3601a6a61f9
Author: Anu Engineer 
AuthorDate: Mon Oct 7 15:41:42 2019 -0700

HDDS-2245. Use dynamic ports for SCM in TestSecureOzoneCluster
Contributed by kevin su.
---
 .../org/apache/hadoop/ozone/TestSecureOzoneCluster.java | 13 +
 1 file changed, 13 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 68035f8..ca1f179 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
 import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -153,6 +154,18 @@ public final class TestSecureOzoneCluster {
 try {
   conf = new OzoneConfiguration();
   conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
+
+  conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil
+  .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
+  conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil
+  .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
+  conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+  ServerSocketUtil.getPort(ScmConfigKeys
+  .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
+  conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+  ServerSocketUtil.getPort(ScmConfigKeys
+  .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
+
   DefaultMetricsSystem.setMiniClusterMode(true);
   final String path = folder.newFolder().toString();
   metaDirPath = Paths.get(path, "om-meta");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2262. SLEEP_SECONDS: command not found

2019-10-07 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 012d897  HDDS-2262. SLEEP_SECONDS: command not found
012d897 is described below

commit 012d897e5b13228152ca31ad97fae87e4b1e4b54
Author: Doroszlai, Attila 
AuthorDate: Mon Oct 7 12:07:33 2019 +0200

HDDS-2262. SLEEP_SECONDS: command not found

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh 
b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh
index f90942e..cb5f016 100755
--- a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh
+++ b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh
@@ -63,7 +63,7 @@ if [ -n "$KERBEROS_ENABLED" ]; then
   echo "KDC ISSUER_SERVER => $ISSUER_SERVER"
 
   if [ -n "$SLEEP_SECONDS" ]; then
-echo "Sleeping for $(SLEEP_SECONDS) seconds"
+echo "Sleeping for ${SLEEP_SECONDS} seconds"
 sleep "$SLEEP_SECONDS"
   fi
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2259. Container Data Scrubber computes wrong checksum

2019-10-07 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new aaa94c3  HDDS-2259. Container Data Scrubber computes wrong checksum
aaa94c3 is described below

commit aaa94c3da6e725cbf8118993d17502f852de6fc0
Author: Doroszlai, Attila 
AuthorDate: Sun Oct 6 08:45:37 2019 +0200

HDDS-2259. Container Data Scrubber computes wrong checksum

Signed-off-by: Anu Engineer 
---
 .../container/keyvalue/KeyValueContainerCheck.java | 34 +--
 .../keyvalue/TestKeyValueContainerCheck.java   | 69 +-
 2 files changed, 43 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index d2b26f9..a4bd376 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -110,7 +110,7 @@ public class KeyValueContainerCheck {
* @return true : integrity checks pass, false : otherwise.
*/
   public boolean fullCheck(DataTransferThrottler throttler, Canceler canceler) 
{
-boolean valid = false;
+boolean valid;
 
 try {
   valid = fastCheck();
@@ -141,7 +141,7 @@ public class KeyValueContainerCheck {
   private void checkDirPath(String path) throws IOException {
 
 File dirPath = new File(path);
-String errStr = null;
+String errStr;
 
 try {
   if (!dirPath.isDirectory()) {
@@ -162,7 +162,7 @@ public class KeyValueContainerCheck {
   }
 
   private void checkContainerFile() throws IOException {
-/**
+/*
  * compare the values in the container file loaded from disk,
  * with the values we are expecting
  */
@@ -193,10 +193,10 @@ public class KeyValueContainerCheck {
 }
 
 KeyValueContainerData kvData = onDiskContainerData;
-if (!metadataPath.toString().equals(kvData.getMetadataPath())) {
+if (!metadataPath.equals(kvData.getMetadataPath())) {
   String errStr =
   "Bad metadata path in Containerdata for " + containerID + "Expected 
["
-  + metadataPath.toString() + "] Got [" + kvData.getMetadataPath()
+  + metadataPath + "] Got [" + kvData.getMetadataPath()
   + "]";
   throw new IOException(errStr);
 }
@@ -204,15 +204,12 @@ public class KeyValueContainerCheck {
 
   private void scanData(DataTransferThrottler throttler, Canceler canceler)
   throws IOException {
-/**
+/*
  * Check the integrity of the DB inside each container.
- * In Scope:
  * 1. iterate over each key (Block) and locate the chunks for the block
- * 2. garbage detection : chunks which exist in the filesystem,
- *but not in the DB. This function is implemented as HDDS-1202
- * Not in scope:
- * 1. chunk checksum verification. this is left to a separate
- * slow chunk scanner
+ * 2. garbage detection (TBD): chunks which exist in the filesystem,
+ *but not in the DB. This function will be implemented in HDDS-1202
+ * 3. chunk checksum verification.
  */
 Preconditions.checkState(onDiskContainerData != null,
 "invoke loadContainerData prior to calling this function");
@@ -255,21 +252,20 @@ public class KeyValueContainerCheck {
 chunk.getChecksumData().getType(),
 chunk.getChecksumData().getBytesPerChecksum(),
 chunk.getChecksumData().getChecksumsList());
+Checksum cal = new Checksum(cData.getChecksumType(),
+cData.getBytesPerChecksum());
 long bytesRead = 0;
 byte[] buffer = new byte[cData.getBytesPerChecksum()];
 try (InputStream fs = new FileInputStream(chunkFile)) {
-  int i = 0, v = 0;
-  for (; i < length; i++) {
-v = fs.read(buffer);
+  for (int i = 0; i < length; i++) {
+int v = fs.read(buffer);
 if (v == -1) {
   break;
 }
 bytesRead += v;
 throttler.throttle(v, canceler);
-Checksum cal = new Checksum(cData.getChecksumType(),
-cData.getBytesPerChecksum());
 ByteString expected = cData.getChecksums().get(i);
-ByteString actual = cal.computeChecksum(buffer)
+ByteString actual = cal.computeChecksum(buffer, 0, v)
 .getChecksums().get(0);
 if (!Arrays.equals(expected.toByteArray(),
  

[hadoop] branch trunk updated: HDDS-2264. Improve output of TestOzoneContainer

2019-10-07 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cfba6ac  HDDS-2264. Improve output of TestOzoneContainer
cfba6ac is described below

commit cfba6ac9512b180d598a7a477a1ee0ea251e7b41
Author: Doroszlai, Attila 
AuthorDate: Mon Oct 7 13:08:14 2019 +0200

HDDS-2264. Improve output of TestOzoneContainer

Signed-off-by: Anu Engineer 
---
 .../container/ozoneimpl/TestOzoneContainer.java| 23 --
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index b0d3a0f..2d679a1 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -41,12 +41,15 @@ import 
org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Random;
 import java.util.UUID;
@@ -62,6 +65,9 @@ import static org.junit.Assert.assertEquals;
  */
 public class TestOzoneContainer {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestOzoneContainer.class);
+
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
 
@@ -148,7 +154,6 @@ public class TestOzoneContainer {
   @Test
   public void testContainerCreateDiskFull() throws Exception {
 long containerSize = (long) StorageUnit.MB.toBytes(100);
-boolean diskSpaceException = false;
 
 // Format the volumes
 for (HddsVolume volume : volumeSet.getVolumesList()) {
@@ -164,16 +169,14 @@ public class TestOzoneContainer {
 keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
 
 // we expect an out of space Exception
-try {
-  keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-} catch (StorageContainerException e) {
-  if (e.getResult() == DISK_OUT_OF_SPACE) {
-diskSpaceException = true;
-  }
+StorageContainerException e = LambdaTestUtils.intercept(
+StorageContainerException.class,
+() -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId)
+);
+if (!DISK_OUT_OF_SPACE.equals(e.getResult())) {
+  LOG.info("Unexpected error during container creation", e);
 }
-
-// Test failed if there was no exception
-assertEquals(true, diskSpaceException);
+assertEquals(DISK_OUT_OF_SPACE, e.getResult());
   }
 
   //verify committed space on each volume


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2238. Container Data Scrubber spams log in empty cluster

2019-10-07 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1877312  HDDS-2238. Container Data Scrubber spams log in empty cluster
1877312 is described below

commit 187731244067f6bf817ad352851cb27850b81c92
Author: Doroszlai, Attila 
AuthorDate: Fri Oct 4 08:08:05 2019 +0200

HDDS-2238. Container Data Scrubber spams log in empty cluster

Signed-off-by: Anu Engineer 
---
 .../ozone/container/common/impl/ContainerSet.java  | 54 ++
 .../container/ozoneimpl/ContainerController.java   |  6 +--
 .../container/ozoneimpl/ContainerDataScanner.java  | 46 ++
 .../ozoneimpl/ContainerDataScrubberMetrics.java|  4 +-
 .../ozoneimpl/ContainerMetadataScanner.java| 19 
 .../ContainerMetadataScrubberMetrics.java  |  5 +-
 .../ozoneimpl/ContainerScrubberConfiguration.java  | 17 +++
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  8 +---
 .../container/common/impl/TestContainerSet.java| 18 
 .../ozoneimpl/TestContainerScrubberMetrics.java| 25 +-
 .../container/common/TestBlockDeletingService.java | 24 +-
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |  7 ++-
 12 files changed, 120 insertions(+), 113 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 784f56c..680d683 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -32,6 +32,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Set;
 import java.util.List;
@@ -40,7 +41,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.stream.Collectors;
 
 
 /**
@@ -50,17 +50,17 @@ public class ContainerSet {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ContainerSet.class);
 
-  private final ConcurrentSkipListMap containerMap = new
+  private final ConcurrentSkipListMap> containerMap = new
   ConcurrentSkipListMap<>();
   private final ConcurrentSkipListSet missingContainerSet =
   new ConcurrentSkipListSet<>();
   /**
* Add Container to container map.
-   * @param container
+   * @param container container to be added
* @return If container is added to containerMap returns true, otherwise
* false
*/
-  public boolean addContainer(Container container) throws
+  public boolean addContainer(Container container) throws
   StorageContainerException {
 Preconditions.checkNotNull(container, "container cannot be null");
 
@@ -81,10 +81,10 @@ public class ContainerSet {
 
   /**
* Returns the Container with specified containerId.
-   * @param containerId
+   * @param containerId ID of the container to get
* @return Container
*/
-  public Container getContainer(long containerId) {
+  public Container getContainer(long containerId) {
 Preconditions.checkState(containerId >= 0,
 "Container Id cannot be negative.");
 return containerMap.get(containerId);
@@ -92,14 +92,14 @@ public class ContainerSet {
 
   /**
* Removes the Container matching with specified containerId.
-   * @param containerId
+   * @param containerId ID of the container to remove
* @return If container is removed from containerMap returns true, otherwise
* false
*/
   public boolean removeContainer(long containerId) {
 Preconditions.checkState(containerId >= 0,
 "Container Id cannot be negative.");
-Container removed = containerMap.remove(containerId);
+Container removed = containerMap.remove(containerId);
 if(removed == null) {
   LOG.debug("Container with containerId {} is not present in " +
   "containerMap", containerId);
@@ -122,9 +122,9 @@ public class ContainerSet {
 
   /**
* Return an container Iterator over {@link ContainerSet#containerMap}.
-   * @return {@literal Iterator}
+   * @return {@literal Iterator>}
*/
-  public Iterator getContainerIterator() {
+  public Iterator> getContainerIterator() {
 return containerMap.values().iterator();
   }
 
@@ -132,26 +132,23 @@ public class ContainerSet {
* Return an iterator of containers associated with the specified volume.
*
* @param  volume the HDDS volume which should be used to filter 

[hadoop] branch trunk updated (10bdc59 -> f3eaa84)

2019-10-04 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 10bdc59  HADOOP-16579. Upgrade to Apache Curator 4.2.0 excluding ZK 
(#1531). Contributed by Norbert Kalmár.
 add f3eaa84  HDDS-2164 : om.db.checkpoints is getting filling up fast. 
(#1536)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hdds/utils/db/RDBCheckpointManager.java |  2 +-
 .../hadoop/hdds/utils/db/RocksDBCheckpoint.java|  3 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java | 97 ++
 .../java/org/apache/hadoop/ozone/TestOmUtils.java  | 79 ++
 .../hadoop/ozone/om/TestOMDbCheckpointServlet.java |  4 -
 .../hadoop/ozone/om/OMDBCheckpointServlet.java | 59 +
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java | 10 ---
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  | 61 ++
 .../apache/hadoop/ozone/recon/TestReconUtils.java  | 44 +-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  6 +-
 10 files changed, 240 insertions(+), 125 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2200 : Recon does not handle the NULL snapshot from OM DB cleanly.

2019-10-03 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7cb8fe  HDDS-2200 : Recon does not handle the NULL snapshot from OM 
DB cleanly.
b7cb8fe is described below

commit b7cb8fe07c25f31caae89d6406be54c505343f3c
Author: Aravindan Vijayan 
AuthorDate: Wed Oct 2 12:50:25 2019 -0700

HDDS-2200 : Recon does not handle the NULL snapshot from OM DB cleanly.

Signed-off-by: Anu Engineer 
---
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |  35 ++
 .../recon/recovery/ReconOmMetadataManagerImpl.java |  21 +++-
 .../spi/impl/ContainerDBServiceProviderImpl.java   |  28 +++--
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  33 +++--
 .../recon/spi/impl/ReconContainerDBProvider.java   |  32 -
 .../ozone/recon/AbstractOMMetadataManagerTest.java |   2 +-
 .../apache/hadoop/ozone/recon/TestReconUtils.java  |  31 +
 .../recovery/TestReconOmMetadataManagerImpl.java   | 133 ++---
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  29 -
 .../spi/impl/TestReconContainerDBProvider.java |  13 --
 10 files changed, 268 insertions(+), 89 deletions(-)

diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 95e6f9b..ecd47f2 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -175,4 +175,39 @@ public class ReconUtils {
 }
   }
 
+  /**
+   * Load last known DB in Recon.
+   * @param reconDbDir
+   * @param fileNamePrefix
+   * @return
+   */
+  public File getLastKnownDB(File reconDbDir, String fileNamePrefix) {
+String lastKnownSnapshotFileName = null;
+long lastKnonwnSnapshotTs = Long.MIN_VALUE;
+if (reconDbDir != null) {
+  File[] snapshotFiles = reconDbDir.listFiles((dir, name) ->
+  name.startsWith(fileNamePrefix));
+  if (snapshotFiles != null) {
+for (File snapshotFile : snapshotFiles) {
+  String fileName = snapshotFile.getName();
+  try {
+String[] fileNameSplits = fileName.split("_");
+if (fileNameSplits.length <= 1) {
+  continue;
+}
+long snapshotTimestamp = Long.parseLong(fileNameSplits[1]);
+if (lastKnonwnSnapshotTs < snapshotTimestamp) {
+  lastKnonwnSnapshotTs = snapshotTimestamp;
+  lastKnownSnapshotFileName = fileName;
+}
+  } catch (NumberFormatException nfEx) {
+LOG.warn("Unknown file found in Recon DB dir : {}", fileName);
+  }
+}
+  }
+}
+return lastKnownSnapshotFileName == null ? null :
+new File(reconDbDir.getPath(), lastKnownSnapshotFileName);
+  }
+
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index e554b25..3d55c99 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.ozone.recon.recovery;
 
+import static 
org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB;
+import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
+
 import java.io.File;
 import java.io.IOException;
 
@@ -28,6 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,17 +46,28 @@ public class ReconOmMetadataManagerImpl extends 
OmMetadataManagerImpl
   private static final Logger LOG =
   LoggerFactory.getLogger(ReconOmMetadataManagerImpl.class);
 
-  @Inject
   private OzoneConfiguration ozoneConfiguration;
+  private ReconUtils reconUtils;
 
   @Inject
-  public ReconOmMetadataManagerImpl(OzoneConfiguration configuration) {
+  public ReconOmMetadataManagerImpl(OzoneConfiguration configuration,
+ReconUtils reconUtils) {
+this.reconUtils = reconUtils;
 this.ozoneConfiguration = configuration;
   }
 
   @Override
   public void start(OzoneConfiguration configuration) throws IOException {
 LOG.info("Starting ReconOMMetadataManagerImpl");
+File reconDbDir =
+   

[hadoop] branch trunk updated: HDDS-1720 : Add ability to configure RocksDB logs for Ozone Manager.

2019-10-03 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 76605f1  HDDS-1720 : Add ability to configure RocksDB logs for Ozone 
Manager.
76605f1 is described below

commit 76605f17dd15a48bc40c1b2fe6c8d0c2f4631959
Author: Aravindan Vijayan 
AuthorDate: Fri Sep 27 00:10:08 2019 -0700

HDDS-1720 : Add ability to configure RocksDB logs for Ozone Manager.

Signed-off-by: Anu Engineer 
---
 .../hadoop/hdds/utils/db/DBStoreBuilder.java   | 24 +-
 .../hadoop/hdds/utils/db/RocksDBConfiguration.java | 62 ++
 .../hadoop/hdds/utils/db/TestDBStoreBuilder.java   | 16 ++--
 .../ozone/om/TestOzoneManagerRocksDBLogging.java   | 97 ++
 4 files changed, 189 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 4d5ecab..263864f 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -22,11 +22,13 @@ package org.apache.hadoop.hdds.utils.db;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.eclipse.jetty.util.StringUtil;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyOptions;
 import org.rocksdb.DBOptions;
+import org.rocksdb.InfoLogLevel;
 import org.rocksdb.RocksDB;
 import org.rocksdb.Statistics;
 import org.rocksdb.StatsLevel;
@@ -54,6 +56,8 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKS
 public final class DBStoreBuilder {
   private static final Logger LOG =
   LoggerFactory.getLogger(DBStoreBuilder.class);
+  public static final Logger ROCKS_DB_LOGGER =
+  LoggerFactory.getLogger(RocksDB.class);
   private Set tables;
   private DBProfile dbProfile;
   private DBOptions rocksDBOption;
@@ -63,8 +67,9 @@ public final class DBStoreBuilder {
   private Configuration configuration;
   private CodecRegistry registry;
   private String rocksDbStat;
+  private RocksDBConfiguration rocksDBConfiguration;
 
-  private DBStoreBuilder(Configuration configuration) {
+  private DBStoreBuilder(OzoneConfiguration configuration) {
 tables = new HashSet<>();
 tableNames = new LinkedList<>();
 this.configuration = configuration;
@@ -72,9 +77,11 @@ public final class DBStoreBuilder {
 this.rocksDbStat = configuration.getTrimmed(
 OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
 OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+this.rocksDBConfiguration =
+configuration.getObject(RocksDBConfiguration.class);
   }
 
-  public static DBStoreBuilder newBuilder(Configuration configuration) {
+  public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) {
 return new DBStoreBuilder(configuration);
   }
 
@@ -199,6 +206,19 @@ public final class DBStoreBuilder {
   option = dbProfile.getDBOptions();
 }
 
+if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
+  org.rocksdb.Logger logger = new org.rocksdb.Logger(option) {
+@Override
+protected void log(InfoLogLevel infoLogLevel, String s) {
+  ROCKS_DB_LOGGER.info(s);
+}
+  };
+  InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
+  .getRocksdbLogLevel() + "_LEVEL");
+  logger.setInfoLogLevel(level);
+  option.setLogger(logger);
+}
+
 if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
   Statistics statistics = new Statistics();
   statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
new file mode 100644
index 000..1a8c846
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  

[hadoop] branch trunk updated: HDDS-2231. test-single.sh cannot copy results (#1575)

2019-10-03 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9446686  HDDS-2231. test-single.sh cannot copy results (#1575)
9446686 is described below

commit 944668674b57291050262d2d6f84a39ca437671d
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Thu Oct 3 23:50:04 2019 +0200

HDDS-2231. test-single.sh cannot copy results (#1575)
---
 hadoop-ozone/dist/src/main/compose/test-single.sh | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/hadoop-ozone/dist/src/main/compose/test-single.sh 
b/hadoop-ozone/dist/src/main/compose/test-single.sh
index f1203d3..629a9bc 100755
--- a/hadoop-ozone/dist/src/main/compose/test-single.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-single.sh
@@ -48,6 +48,8 @@ fi
 # shellcheck source=testlib.sh
 source "$COMPOSE_DIR/../testlib.sh"
 
+create_results_dir
+
 execute_robot_test "$1" "$2"
 
 generate_report


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (51eaeca -> 47d721d)

2019-10-03 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 51eaeca  HDDS-2211. Collect docker logs if env fails to start (#1553)
 add 47d721d  HDDS-2234. rat.sh fails due to ozone-recon-web/build files 
(#1580)

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (5a7483c -> d59bcbf)

2019-10-03 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 5a7483c  HDFS-14888. RBF: Enable Parallel Test Profile for builds. 
Contributed by Ayush Saxena.
 add d59bcbf  HDDS-2226. S3 Secrets should use a strong RNG. (#1572)

No new revisions were added by this update.

Summary of changes:
 .../common/src/main/java/org/apache/hadoop/ozone/OmUtils.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2072. Make StorageContainerLocationProtocolService message based Contributed by Elek, Marton.

2019-10-02 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4c24f24  HDDS-2072. Make StorageContainerLocationProtocolService 
message based Contributed by Elek, Marton.
4c24f24 is described below

commit 4c24f2434dd8c09bb104ee660975855eca287fe6
Author: Anu Engineer 
AuthorDate: Wed Oct 2 16:15:31 2019 -0700

HDDS-2072. Make StorageContainerLocationProtocolService message based
Contributed by Elek, Marton.
---
 ...inerLocationProtocolClientSideTranslatorPB.java | 411 +-
 .../src/main/proto/ScmBlockLocationProtocol.proto  |   2 +-
 .../proto/StorageContainerLocationProtocol.proto   | 185 
 ...inerLocationProtocolServerSideTranslatorPB.java | 476 +++--
 .../hdds/scm/server/SCMClientProtocolServer.java   |  12 +-
 .../ozone/insight/BaseInsightSubCommand.java   |   3 +
 .../scm/ScmProtocolBlockLocationInsight.java   |   2 +-
 ...va => ScmProtocolContainerLocationInsight.java} |  22 +-
 8 files changed, 570 insertions(+), 543 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index ab3fcd1..01db597 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -16,64 +16,57 @@
  */
 package org.apache.hadoop.hdds.scm.protocolPB;
 
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+imp

[hadoop] branch trunk updated (ffd4e52 -> 685918e)

2019-10-02 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ffd4e52  HDDS-2073. Make SCMSecurityProtocol message based. 
Contributed by Elek, Marton.
 add 685918e  HDDS-2227. GDPR key generation could benefit from 
secureRandom. (#1574)

No new revisions were added by this update.

Summary of changes:
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 3 ++-
 .../java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java   | 8 +---
 .../org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java| 4 +++-
 3 files changed, 10 insertions(+), 5 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2073. Make SCMSecurityProtocol message based. Contributed by Elek, Marton.

2019-10-02 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ffd4e52  HDDS-2073. Make SCMSecurityProtocol message based. 
Contributed by Elek, Marton.
ffd4e52 is described below

commit ffd4e527256389d91dd8e4c49ca1681f70a790e2
Author: Anu Engineer 
AuthorDate: Wed Oct 2 12:19:58 2019 -0700

HDDS-2073. Make SCMSecurityProtocol message based.
Contributed by Elek, Marton.
---
 .../SCMSecurityProtocolClientSideTranslatorPB.java | 104 +++-
 .../SCMSecurityProtocolServerSideTranslatorPB.java | 132 ---
 .../src/main/proto/SCMSecurityProtocol.proto   |  96 +++
 .../SCMSecurityProtocolServerSideTranslatorPB.java | 186 +
 .../hdds/scm/server/SCMSecurityProtocolServer.java |  27 ++-
 .../ozone/insight/BaseInsightSubCommand.java   |   6 +-
 .../insight/scm/ScmProtocolSecurityInsight.java|  71 
 7 files changed, 401 insertions(+), 221 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index d7d53a4..efe79a7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -16,22 +16,29 @@
  */
 package org.apache.hadoop.hdds.protocolPB;
 
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.function.Consumer;
+
+import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto.Builder;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
+import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
+import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder;
+import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import static 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
 
 /**
@@ -53,6 +60,28 @@ public class SCMSecurityProtocolClientSideTranslatorPB 
implements
   }
 
   /**
+   * Helper method to wrap the request and send the message.
+   */
+  private SCMSecurityResponse submitRequest(
+  SCMSecurityProtocolProtos.Type type,
+  Consumer builderConsumer) throws IOException {
+final SCMSecurityResponse response;
+try {
+
+  Builder builder = SCMSecurityRequest.newBuilder()
+  .setCmdType(type)
+  .setTraceID(TracingUtil.exportCurrentSpan());
+  builderConsumer.accept(builder);
+  SCMSecurityRequest wrapper = builder.build();
+
+  response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+} catch (ServiceException ex) {
+  throw ProtobufHelper.getRemoteException(ex);
+}
+return response;
+  }
+
+  /**
* Closes this stream and releases any system resources associated
* with it. If the stream is already closed then invoking this
* method has no effect.
@@ -87,8 +116,8 @@ public class SCMSecurityProtocolClientSideTranslatorPB 
implements
   /**
* Get SCM signed certificate for OM.
*
-   * @param omDetails   - OzoneManager Details.
-   * @param certSignReq - Certificate signing request.
+   * @param omDetails   - OzoneManager Details.
+   * @param certSignReq - Certificate signing request.
* @return byte[] - SCM signed certificate.
*/
   @Override
@@ -100,64 +129,61 @@ public class

[hadoop] branch trunk updated: HDDS-2068. Make StorageContainerDatanodeProtocolService message based

2019-10-02 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e8ae632  HDDS-2068. Make StorageContainerDatanodeProtocolService 
message based
e8ae632 is described below

commit e8ae632d4c4f13788b0c42dbf297c8f7b9d889f3
Author: Márton Elek 
AuthorDate: Mon Sep 23 16:40:08 2019 +0200

HDDS-2068. Make StorageContainerDatanodeProtocolService message based

Signed-off-by: Anu Engineer 
---
 ...inerDatanodeProtocolClientSideTranslatorPB.java |  60 ++-
 ...inerDatanodeProtocolServerSideTranslatorPB.java | 115 -
 .../proto/StorageContainerDatanodeProtocol.proto   |  58 ---
 .../ozone/container/common/SCMTestUtils.java   |   4 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java | 102 --
 .../ozone/insight/BaseInsightSubCommand.java   |   4 +-
 .../scm/ScmProtocolBlockLocationInsight.java   |   6 +-
 ...nsight.java => ScmProtocolDatanodeInsight.java} |  27 ++---
 8 files changed, 207 insertions(+), 169 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index 4e1e27e..9b44666 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -24,6 +24,9 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest.Builder;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -38,6 +41,7 @@ import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
@@ -45,6 +49,7 @@ import 
org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.function.Consumer;
 
 /**
  * This class is the client-side translator to translate the requests made on
@@ -97,6 +102,25 @@ public class 
StorageContainerDatanodeProtocolClientSideTranslatorPB
   }
 
   /**
+   * Helper method to wrap the request and send the message.
+   */
+  private SCMDatanodeResponse submitRequest(Type type,
+  Consumer builderConsumer) throws IOException 
{
+final SCMDatanodeResponse response;
+try {
+  Builder builder = SCMDatanodeRequest.newBuilder()
+  .setCmdType(type);
+  builderConsumer.accept(builder);
+  SCMDatanodeRequest wrapper = builder.build();
+
+  response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+} catch (ServiceException ex) {
+  throw ProtobufHelper.getRemoteException(ex);
+}
+return response;
+  }
+
+  /**
* Returns SCM version.
*
* @param unused - set to null and unused.
@@ -104,16 +128,11 @@ public class 
StorageContainerDatanodeProtocolClientSideTranslatorPB
*/
   @Override
   public SCMVersionResponseProto getVersion(SCMVersionRequestProto
-  unused) throws IOException {
-SCMVersionRequestProto request =
-SCMVersionRequestProto.newBuilder().build();
-final SCMVersionResponseProto response;
-try {
-  response = rpcProxy.getVersion(NULL_RPC_CONTROLLER, request);
-} catch (ServiceException ex) {
-  throw ProtobufHelper.getRemoteException(ex);
-}
-return response;
+  request) throws IOException {
+return submitRequest(Type.GetVersion,
+(builder) -> builder
+.setGetVersionRequest(SCMVersionRequestProto.newBuilder().build()))
+.getGetVersionResponse();
   }
 
   /**
@@ -126,13 +145,9 @@ public class 
StorageContainerDatanodeProtocolClientSideTranslatorPB
   @Override
   public SCMHeartbeatResponseProto sendHeartbeat(
   SCMHeartbeatReques

[hadoop] branch trunk updated (61a8436 -> 2e1fd44)

2019-10-02 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 61a8436  YARN-9870. Remove unused function from 
OpportunisticContainerAllocatorAMService. Contributed by Abhishek Modi.
 add 2e1fd44  HDDS-2201. Rename VolumeList to UserVolumeInfo. (#1566)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  4 +--
 ...lumeListCodec.java => UserVolumeInfoCodec.java} | 13 
 .../src/main/proto/OzoneManagerProtocol.proto  |  2 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java | 25 
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  | 35 --
 .../request/s3/bucket/S3BucketCreateRequest.java   |  5 ++--
 .../om/request/volume/OMVolumeCreateRequest.java   |  5 ++--
 .../om/request/volume/OMVolumeDeleteRequest.java   |  2 +-
 .../ozone/om/request/volume/OMVolumeRequest.java   | 14 -
 .../om/request/volume/OMVolumeSetOwnerRequest.java |  4 +--
 .../om/response/volume/OMVolumeCreateResponse.java | 11 ---
 .../om/response/volume/OMVolumeDeleteResponse.java |  8 ++---
 .../response/volume/OMVolumeSetOwnerResponse.java  |  8 ++---
 .../ozone/om/request/TestOMRequestUtils.java   |  6 ++--
 .../hadoop/ozone/om/request}/package-info.java |  4 +--
 .../request/volume/TestOMVolumeCreateRequest.java  |  6 ++--
 .../volume/TestOMVolumeSetOwnerRequest.java|  4 +--
 .../ozone/om/response/TestOMResponseUtils.java |  6 ++--
 .../hadoop/ozone/om/response}/package-info.java|  4 +--
 .../volume/TestOMVolumeCreateResponse.java |  6 ++--
 .../volume/TestOMVolumeDeleteResponse.java |  8 ++---
 .../volume/TestOMVolumeSetOwnerResponse.java   | 10 +++
 .../response/volume}/package-info.java |  4 +--
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java|  4 +--
 24 files changed, 100 insertions(+), 98 deletions(-)
 rename 
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/{VolumeListCodec.java
 => UserVolumeInfoCodec.java} (78%)
 copy hadoop-ozone/{integration-test/src/test/java/org/apache/hadoop/ozone/om 
=> 
ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request}/package-info.java
 (92%)
 copy {hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db => 
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response}/package-info.java
 (92%)
 copy 
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/{web/ozShell 
=> om/response/volume}/package-info.java (91%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2205. checkstyle.sh reports wrong failure count

2019-09-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5bba59  HDDS-2205. checkstyle.sh reports wrong failure count
e5bba59 is described below

commit e5bba592a84a94e0545479b668e6925eb4b8858c
Author: Doroszlai, Attila 
AuthorDate: Mon Sep 30 09:35:14 2019 +0200

HDDS-2205. checkstyle.sh reports wrong failure count

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dev-support/checks/checkstyle.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh 
b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 7a218a4..685bf14 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -36,7 +36,7 @@ find "." -name checkstyle-errors.xml -print0 \
   | tee "$REPORT_FILE"
 
 ## generate counter
-wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures"
+grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures"
 
 if [[ -s "${REPORT_FILE}" ]]; then
exit 1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2149. Replace findbugs with spotbugs

2019-09-26 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9bf7a6e  HDDS-2149. Replace findbugs with spotbugs
9bf7a6e is described below

commit 9bf7a6e5b26a361fd08552793852208d817fdfbd
Author: Doroszlai, Attila 
AuthorDate: Tue Sep 24 09:23:48 2019 +0200

HDDS-2149. Replace findbugs with spotbugs

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/common/pom.xml  |  4 ++--
 hadoop-hdds/container-service/pom.xml   |  9 -
 hadoop-hdds/pom.xml | 15 ---
 hadoop-hdds/server-scm/pom.xml  |  4 ++--
 hadoop-ozone/common/pom.xml |  4 ++--
 hadoop-ozone/csi/pom.xml|  4 ++--
 hadoop-ozone/dev-support/checks/findbugs.sh | 12 +---
 hadoop-ozone/insight/pom.xml|  9 -
 hadoop-ozone/ozone-manager/pom.xml  |  5 ++---
 hadoop-ozone/ozonefs-lib-current/pom.xml|  4 ++--
 hadoop-ozone/ozonefs-lib-legacy/pom.xml |  4 ++--
 hadoop-ozone/ozonefs/pom.xml|  5 ++---
 hadoop-ozone/pom.xml|  8 
 hadoop-ozone/recon/pom.xml  |  4 ++--
 hadoop-ozone/s3gateway/pom.xml  |  5 ++---
 hadoop-ozone/tools/pom.xml  |  9 -
 hadoop-ozone/upgrade/pom.xml|  5 ++---
 pom.ozone.xml   | 29 ++---
 18 files changed, 57 insertions(+), 82 deletions(-)

diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 2a6d44a..9af807f 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -274,8 +274,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
   
-org.codehaus.mojo
-findbugs-maven-plugin
+com.github.spotbugs
+spotbugs-maven-plugin
 
   
${basedir}/dev-support/findbugsExcludeFile.xml
 
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 2f89fa2..0eef961 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -55,9 +55,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
   1.16
 
 
-  com.google.code.findbugs
-  findbugs
-  3.0.1
+  com.github.spotbugs
+  spotbugs
   provided
 
   
@@ -93,8 +92,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
   
-org.codehaus.mojo
-findbugs-maven-plugin
+com.github.spotbugs
+spotbugs-maven-plugin
 
   
${basedir}/dev-support/findbugsExcludeFile.xml
 
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 5c98e38..a1efb5b 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -195,13 +195,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 ${junit.jupiter.version}
 test
   
-
-  
-com.google.code.findbugs
-findbugs
-3.0.1
-provided
-  
 
   
   
@@ -308,14 +301,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
   
-org.codehaus.mojo
-findbugs-maven-plugin
-3.0.4
-
-  
-
-  
-  
 org.apache.maven.plugins
 maven-dependency-plugin
 
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 99d5922..68a5cd8 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -101,8 +101,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
   bcprov-jdk15on
 
 
-  com.google.code.findbugs
-  findbugs
+  com.github.spotbugs
+  spotbugs
   provided
 
   
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 9cbee56..09ac27a 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -154,8 +154,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
   
-org.codehaus.mojo
-findbugs-maven-plugin
+com.github.spotbugs
+spotbugs-maven-plugin
 
   
${basedir}/dev-support/findbugsExcludeFile.xml
 
diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml
index fb04d8c..6e7b807 100644
--- a/hadoop-ozone/csi/pom.xml
+++ b/hadoop-ozone/csi/pom.xml
@@ -176,8 +176,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
   
-org.codehaus.mojo
-findbugs-maven-plugin
+com.github.spotbugs
+spotbugs-maven-plugin
 
   ${basedir}/dev-support/findbugsExcludeFile.xml
   
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh 
b/hadoop-ozone/dev-support/checks/findbugs.sh
index 3108bdd..ccbf2e

[hadoop] branch trunk updated: HDDS-2179. ConfigFileGenerator fails with Java 10 or newer

2019-09-26 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0371e95  HDDS-2179. ConfigFileGenerator fails with Java 10 or newer
0371e95 is described below

commit 0371e953ac51d991f2bfed9ffd1724ff80733752
Author: Doroszlai, Attila 
AuthorDate: Wed Sep 25 21:43:33 2019 +0200

HDDS-2179. ConfigFileGenerator fails with Java 10 or newer

Signed-off-by: Anu Engineer 
---
 .../main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
index 64c20ac..471b679 100644
--- 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
@@ -33,6 +33,7 @@ import java.io.InputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.NoSuchFileException;
 import java.util.Set;
 
 /**
@@ -60,7 +61,7 @@ public class ConfigFileGenerator extends AbstractProcessor {
   .getResource(StandardLocation.CLASS_OUTPUT, "",
   OUTPUT_FILE_NAME).openInputStream()) {
 appender.load(input);
-  } catch (FileNotFoundException ex) {
+  } catch (FileNotFoundException | NoSuchFileException ex) {
 appender.init();
   }
 
@@ -105,7 +106,7 @@ public class ConfigFileGenerator extends AbstractProcessor {
 
 } catch (IOException e) {
   processingEnv.getMessager().printMessage(Kind.ERROR,
-  "Can't generate the config file from annotation: " + e.getMessage());
+  "Can't generate the config file from annotation: " + e);
 }
 return false;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDD-2193. Adding container related metrics in SCM.

2019-09-26 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b6ef8cc  HDD-2193. Adding container related metrics in SCM.
b6ef8cc is described below

commit b6ef8cc1205d93a3c19d5f052b593758503d689c
Author: Bharat Viswanadham 
AuthorDate: Thu Sep 26 15:22:32 2019 -0700

HDD-2193. Adding container related metrics in SCM.

Signed-off-by: Anu Engineer 
---
 .../hdds/scm/container/SCMContainerManager.java|  31 +-
 .../metrics/SCMContainerManagerMetrics.java|  90 +
 .../hdds/scm/container/metrics/package-info.java   |  22 
 .../metrics/TestSCMContainerManagerMetrics.java| 112 +
 4 files changed, 252 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 3c44c4e..8f82b57 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -72,6 +73,8 @@ public class SCMContainerManager implements ContainerManager {
   private final ContainerStateManager containerStateManager;
   private final int numContainerPerOwnerInPipeline;
 
+  private final SCMContainerManagerMetrics scmContainerManagerMetrics;
+
   /**
* Constructs a mapping class that creates mapping between container names
* and pipelines.
@@ -109,6 +112,8 @@ public class SCMContainerManager implements 
ContainerManager {
 ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
 
 loadExistingContainers();
+
+scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
   }
 
   private void loadExistingContainers() throws IOException {
@@ -204,6 +209,7 @@ public class SCMContainerManager implements 
ContainerManager {
   int count) {
 lock.lock();
 try {
+  scmContainerManagerMetrics.incNumListContainersOps();
   final long startId = startContainerID == null ?
   0 : startContainerID.getId();
   final List containersIds =
@@ -241,11 +247,17 @@ public class SCMContainerManager implements 
ContainerManager {
   public ContainerInfo allocateContainer(final ReplicationType type,
   final ReplicationFactor replicationFactor, final String owner)
   throws IOException {
-lock.lock();
 try {
-  final ContainerInfo containerInfo =
-  containerStateManager.allocateContainer(pipelineManager, type,
+  lock.lock();
+  ContainerInfo containerInfo = null;
+  try {
+containerInfo =
+containerStateManager.allocateContainer(pipelineManager, type,
   replicationFactor, owner);
+  } catch (IOException ex) {
+scmContainerManagerMetrics.incNumFailureCreateContainers();
+throw ex;
+  }
   // Add container to DB.
   try {
 addContainerToDB(containerInfo);
@@ -286,7 +298,9 @@ public class SCMContainerManager implements 
ContainerManager {
 LOG.warn("Unable to remove the container {} from container store," +
 " it's missing!", containerID);
   }
+  scmContainerManagerMetrics.incNumSuccessfulDeleteContainers();
 } catch (ContainerNotFoundException cnfe) {
+  scmContainerManagerMetrics.incNumFailureDeleteContainers();
   throw new SCMException(
   "Failed to delete container " + containerID + ", reason : " +
   "container doesn't exist.",
@@ -447,9 +461,16 @@ public class SCMContainerManager implements 
ContainerManager {
   containerInfo.getContainerID());
   containerStore.put(containerIDBytes,
   containerInfo.getProtobuf().toByteArray());
+  // Incrementing here, as allocateBlock to create a container calls
+  // getMatchingContainer() and finally calls this API to add newly
+  // created container to DB.
+  // Even allocateContainer calls this API to add newly allocated
+  // container to DB. So we need to increment metrics here.
+  scmContainerManagerMetrics.incNumSuccessfulCreateContainers();
 } catch (IOException 

[hadoop] branch trunk updated: HDDS-2174. Delete GDPR Encryption Key from metadata when a Key is deleted

2019-09-26 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c55ac6a  HDDS-2174. Delete GDPR Encryption Key from metadata when a 
Key is deleted
c55ac6a is described below

commit c55ac6a1c7d1dc65a0d2e735b315bbf6898f6ff1
Author: dchitlangia 
AuthorDate: Tue Sep 24 23:39:34 2019 -0400

HDDS-2174. Delete GDPR Encryption Key from metadata when a Key is deleted

Signed-off-by: Anu Engineer 
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java | 40 ++--
 .../client/rpc/TestOzoneRpcClientAbstract.java | 76 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 49 ++
 .../ozone/om/response/key/OMKeyDeleteResponse.java |  8 +--
 .../multipart/S3MultipartUploadAbortResponse.java  |  9 ++-
 .../S3MultipartUploadCommitPartResponse.java   | 34 +-
 .../ozone/om/request/TestOMRequestUtils.java   | 11 ++--
 7 files changed, 158 insertions(+), 69 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index b7a6c2f..1417d89 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -46,6 +46,9 @@ import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
@@ -498,13 +501,36 @@ public final class OmUtils {
   }
 
   /**
-   * Returns the DB key name of a deleted key in OM metadata store. The
-   * deleted key name is the _.
-   * @param key Original key name
-   * @param timestamp timestamp of deletion
-   * @return Deleted key name
+   * Prepares key info to be moved to deletedTable.
+   * 1. It strips GDPR metadata from key info
+   * 2. For given object key, if the repeatedOmKeyInfo instance is null, it
+   * implies that no entry for the object key exists in deletedTable so we
+   * create a new instance to include this key, else we update the existing
+   * repeatedOmKeyInfo instance.
+   * @param keyInfo args supplied by client
+   * @param repeatedOmKeyInfo key details from deletedTable
+   * @return {@link RepeatedOmKeyInfo}
+   * @throws IOException if I/O Errors when checking for key
*/
-  public static String getDeletedKeyName(String key, long timestamp) {
-return key + "_" + timestamp;
+  public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
+  RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{
+// If this key is in a GDPR enforced bucket, then before moving
+// KeyInfo to deletedTable, remove the GDPR related metadata from
+// KeyInfo.
+if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
+  keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
+  keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
+  keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
+}
+
+if(repeatedOmKeyInfo == null) {
+  //The key doesn't exist in deletedTable, so create a new instance.
+  repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
+} else {
+  //The key exists in deletedTable, so update existing instance.
+  repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
+}
+
+return repeatedOmKeyInfo;
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index d91f739..9189c2f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -2667,7 +2668,7 @@ public abstract class TestOzoneRp

[hadoop] branch trunk updated (18a8c24 -> 06998a1)

2019-09-26 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 18a8c24  YARN-9857. TestDelegationTokenRenewer throws NPE but tests 
pass. Contributed by Ahmed Hussein
 add 06998a1  HDDS-2180. Add Object ID and update ID on VolumeList Object. 
(#1526)

No new revisions were added by this update.

Summary of changes:
 .../common/src/main/proto/OzoneManagerProtocol.proto   |  2 ++
 .../om/request/s3/bucket/S3BucketCreateRequest.java|  3 ++-
 .../ozone/om/request/volume/OMVolumeCreateRequest.java |  2 +-
 .../ozone/om/request/volume/OMVolumeDeleteRequest.java |  3 ++-
 .../ozone/om/request/volume/OMVolumeRequest.java   | 18 ++
 .../om/request/volume/OMVolumeSetOwnerRequest.java |  6 --
 .../hadoop/ozone/om/request/TestOMRequestUtils.java|  8 ++--
 .../hadoop/ozone/om/response/TestOMResponseUtils.java  |  2 ++
 .../om/response/volume/TestOMVolumeCreateResponse.java |  1 +
 .../om/response/volume/TestOMVolumeDeleteResponse.java |  5 -
 .../response/volume/TestOMVolumeSetOwnerResponse.java  |  9 -
 11 files changed, 46 insertions(+), 13 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2067. Create generic service facade with tracing/metrics/logging support

2019-09-25 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f647185  HDDS-2067. Create generic service facade with 
tracing/metrics/logging support
f647185 is described below

commit f647185905f6047fc9734b8aa37d6ef59b6082c2
Author: Márton Elek 
AuthorDate: Mon Sep 23 13:08:04 2019 +0200

HDDS-2067. Create generic service facade with tracing/metrics/logging 
support

Signed-off-by: Anu Engineer 
Co-Authored-By: Doroszlai, Attila 
<6454655+adorosz...@users.noreply.github.com>
---
 .../function/FunctionWithServiceException.java | 36 
 .../apache/hadoop/hdds/function/package-info.java  | 22 +
 .../server/OzoneProtocolMessageDispatcher.java | 88 
 ...lockLocationProtocolServerSideTranslatorPB.java | 56 -
 ...inerLocationProtocolServerSideTranslatorPB.java | 96 --
 .../hadoop/hdds/scm/protocol/package-info.java | 21 +
 .../hdds/scm/server/SCMBlockProtocolServer.java|  2 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  3 +-
 .../scm/server/TestSCMBlockProtocolServer.java |  3 +-
 .../scm/ScmProtocolBlockLocationInsight.java   |  2 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java | 41 ++---
 11 files changed, 231 insertions(+), 139 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
new file mode 100644
index 000..b9d7bce
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.function;
+
+import com.google.protobuf.ServiceException;
+
+/**
+ * Functional interface like java.util.function.Function but with
+ * checked exception.
+ */
+@FunctionalInterface
+public interface FunctionWithServiceException {
+
+  /**
+   * Applies this function to the given argument.
+   *
+   * @param t the function argument
+   * @return the function result
+   */
+  R apply(T t) throws ServiceException;
+}
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
new file mode 100644
index 000..915fe35
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional interfaces for ozone, similar to java.util.function.
+ */
+package org.apache.hadoop.hdds.function;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
new file mode 100644
index 000..d67a759
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreeme

[hadoop] branch trunk updated (afa1006 -> f16cf87)

2019-09-24 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from afa1006  HDFS-14843. Double Synchronization in 
BlockReportLeaseManager. Contributed by David Mollitor.
 add f16cf87  HDDS-2170. Add Object IDs and Update ID to Volume Object 
(#1510)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   2 +
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java  | 100 ++---
 .../src/main/proto/OzoneManagerProtocol.proto  |  10 +--
 .../om/request/volume/OMVolumeCreateRequest.java   |   5 ++
 .../request/volume/TestOMVolumeCreateRequest.java  |  10 ++-
 5 files changed, 105 insertions(+), 22 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2159. Fix Race condition in ProfileServlet#pid.

2019-09-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0a716bd  HDDS-2159. Fix Race condition in ProfileServlet#pid.
0a716bd is described below

commit 0a716bd3a5b38779bb07450acb3279e859bb7471
Author: Hanisha Koneru 
AuthorDate: Fri Sep 20 13:06:29 2019 -0700

HDDS-2159. Fix Race condition in ProfileServlet#pid.

Signed-off-by: Anu Engineer 
---
 .../java/org/apache/hadoop/hdds/server/ProfileServlet.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
index 016445c..7cea582 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
@@ -119,7 +119,7 @@ public class ProfileServlet extends HttpServlet {
   Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
 
   private Lock profilerLock = new ReentrantLock();
-  private Integer pid;
+  private final Integer pid;
   private String asyncProfilerHome;
   private transient Process process;
 
@@ -208,11 +208,11 @@ public class ProfileServlet extends HttpServlet {
   return;
 }
 // if pid is explicitly specified, use it else default to current process
-pid = getInteger(req, "pid", pid);
+Integer processId = getInteger(req, "pid", pid);
 
 // if pid is not specified in query param and if current process pid
 // cannot be determined
-if (pid == null) {
+if (processId == null) {
   resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
   setResponseHeader(resp);
   resp.getWriter().write(
@@ -243,7 +243,7 @@ public class ProfileServlet extends HttpServlet {
 //Should be in sync with FILE_NAME_PATTERN
 File outputFile =
 OUTPUT_DIR.resolve(
-ProfileServlet.generateFileName(pid, output, event))
+ProfileServlet.generateFileName(processId, output, event))
 .toFile();
 List cmd = new ArrayList<>();
 cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
@@ -288,7 +288,7 @@ public class ProfileServlet extends HttpServlet {
 if (reverse) {
   cmd.add("--reverse");
 }
-cmd.add(pid.toString());
+cmd.add(processId.toString());
 process = runCmdAsync(cmd);
 
 // set response and set refresh header to output location


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (3fd3d74 -> 6cbe5d3)

2019-09-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 3fd3d74  HDDS-2161. Create RepeatedKeyInfo structure to be saved in 
deletedTable
 add 6cbe5d3  HDDS-2160. Add acceptance test for ozonesecure-mr compose. 
Contributed by Xiaoyu Yao. (#1490)

No new revisions were added by this update.

Summary of changes:
 .../compose/{ozone-mr/hadoop32 => ozonesecure-mr}/test.sh| 12 
 .../src/main/smoketest/{kinit.robot => kinit-hadoop.robot}   |  2 +-
 hadoop-ozone/dist/src/main/smoketest/kinit.robot |  5 -
 hadoop-ozone/dist/src/main/smoketest/mapreduce.robot |  2 +-
 4 files changed, 14 insertions(+), 7 deletions(-)
 copy hadoop-ozone/dist/src/main/compose/{ozone-mr/hadoop32 => 
ozonesecure-mr}/test.sh (83%)
 copy hadoop-ozone/dist/src/main/smoketest/{kinit.robot => kinit-hadoop.robot} 
(94%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2161. Create RepeatedKeyInfo structure to be saved in deletedTable

2019-09-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3fd3d74  HDDS-2161. Create RepeatedKeyInfo structure to be saved in 
deletedTable
3fd3d74 is described below

commit 3fd3d746fc4033cb4ab2265c7b9c9aaf8b39c10c
Author: dchitlangia 
AuthorDate: Fri Sep 20 18:06:30 2019 -0400

HDDS-2161. Create RepeatedKeyInfo structure to be saved in deletedTable

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  6 +-
 .../ozone/om/codec/RepeatedOmKeyInfoCodec.java | 52 +
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java | 91 ++
 .../src/main/proto/OzoneManagerProtocol.proto  |  4 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 47 +--
 .../hadoop/ozone/om/OmMetadataManagerImpl.java | 90 +++--
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |  3 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |  5 +-
 .../S3MultipartUploadCommitPartRequest.java|  4 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java | 27 ---
 .../multipart/S3MultipartUploadAbortResponse.java  | 21 +++--
 .../S3MultipartUploadCommitPartResponse.java   | 34 +---
 .../ozone/om/request/TestOMRequestUtils.java   | 14 +++-
 .../om/response/key/TestOMKeyDeleteResponse.java   | 20 ++---
 .../s3/multipart/TestS3MultipartResponse.java  |  3 +-
 .../TestS3MultipartUploadAbortResponse.java| 19 ++---
 16 files changed, 324 insertions(+), 116 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index cc908fc..1d80f97 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -26,9 +26,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.VolumeList;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -251,7 +253,7 @@ public interface OMMetadataManager {
*
* @return Deleted Table.
*/
-  Table getDeletedTable();
+  Table getDeletedTable();
 
   /**
* Gets the OpenKeyTable.
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
new file mode 100644
index 000..a0ef4a5
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.RepeatedKeyInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode RepeatedOmKeyInfo as byte array.
+ */
+public class RepeatedOmKeyInfoCodec implements Codec {
+  @Override
+  public byte[] toPersistedFormat(RepeatedOmKeyInfo object)
+  throws IOException {
+Preconditions.checkNotNull(object,
+"Null object can't be converted to byte array.");
+r

[hadoop] branch HDDS-1880-Decom updated (ee8f24c -> fd5e877)

2019-09-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ee8f24c  HDDS-1982. Extend SCMNodeManager to support decommission and 
maintenance states. Contributed by Stephen O'Donnell.
 add 3f223be  HDFS-14844. Make buffer of 
BlockReaderRemote#newBlockReader#BufferedOutputStream configurable. Contributed 
by Lisheng Sun.
 add 5363730  HDDS-2157. checkstyle: print filenames relative to project 
root (#1485)
 add d7d6ec8  HDDS-2128. Make ozone sh command work with OM HA service ids 
(#1445)
 add aa93866  HDFS-14833. RBF: Router Update Doesn't Sync Quota. 
Contributed by Ayush Saxena.
 add efed445  HADOOP-16589. [pb-upgrade] Update docker image to make 3.7.1 
protoc as default (#1482). Contributed by Vinayakumar B.
 add dbdc612  HDDS-2163. Add 'Replication factor' to the output of list 
keys (#1493)
 add e02b102  HADOOP-16445. Allow separate custom signing algorithms for S3 
and DDB (#1332)
 add a94aa1f  HDDS-2150. Update dependency versions to avoid security 
vulnerabilities. (#1472)
 add 659c888  HDFS-14818. Check native pmdk lib by 'hadoop checknative' 
command. Contributed by Feilong He.
 add 4c0a7a9  Make upstream aware of 3.2.1 release.
 add 07c81e9  HADOOP-16558. [COMMON+HDFS] use protobuf-maven-plugin to 
generate protobuf classes (#1494). Contributed by Vinayakumar B.
 add aa664d7  HADOOP-16138. hadoop fs mkdir / of nonexistent abfs container 
raises NPE (#1302). Contributed by Gabor Bota.
 add 2b5fc95  HADOOP-16591 Fix S3A ITest*MRjob failures.
 add c30e495  HDFS-14853. NPE in 
DFSNetworkTopology#chooseRandomWithStorageType() when the excludedNode is not 
present. Contributed by Ranith Sardar.
 new fd5e877  Merge branch 'trunk' into HDDS-1880-Decom

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 dev-support/docker/Dockerfile  |  20 +-
 hadoop-common-project/hadoop-common/pom.xml|  66 +--
 .../hadoop-common/src/CMakeLists.txt   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Mkdir.java |  11 +-
 .../org/apache/hadoop/io/nativeio/NativeIO.java|  28 +-
 .../apache/hadoop/util/NativeLibraryChecker.java   |  10 +
 .../src/org/apache/hadoop/io/nativeio/NativeIO.c   |  14 +-
 .../src/org/apache/hadoop/io/nativeio/pmdk_load.c  |  28 +-
 .../src/org/apache/hadoop/io/nativeio/pmdk_load.h  |   5 -
 .../hadoop-common/src/main/proto/FSProtos.proto|   2 +-
 .../src/main/proto/GenericRefreshProtocol.proto|   2 +-
 .../src/main/proto/GetUserMappingsProtocol.proto   |   2 +-
 .../src/main/proto/HAServiceProtocol.proto |   2 +-
 .../src/main/proto/IpcConnectionContext.proto  |   2 +-
 .../src/main/proto/ProtobufRpcEngine.proto |   2 +-
 .../src/main/proto/ProtocolInfo.proto  |   2 +-
 .../proto/RefreshAuthorizationPolicyProtocol.proto |   2 +-
 .../src/main/proto/RefreshCallQueueProtocol.proto  |   2 +-
 .../main/proto/RefreshUserMappingsProtocol.proto   |   2 +-
 .../hadoop-common/src/main/proto/RpcHeader.proto   |   2 +-
 .../hadoop-common/src/main/proto/Security.proto|   2 +-
 .../hadoop-common/src/main/proto/TraceAdmin.proto  |   2 +-
 .../src/main/proto/ZKFCProtocol.proto  |   2 +-
 .../site/markdown/release/3.2.1/CHANGELOG.3.2.1.md | 553 +
 .../markdown/release/3.2.1/RELEASENOTES.3.2.1.md   |  80 +++
 .../hadoop-common/src/test/proto/test.proto|   2 +-
 .../src/test/proto/test_rpc_service.proto  |   1 +
 hadoop-hdds/common/pom.xml |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml |  36 +-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   3 +
 .../hdfs/client/impl/BlockReaderFactory.java   |   2 +-
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java |  11 +-
 .../src/main/proto/ClientDatanodeProtocol.proto|   2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto|   2 +-
 .../src/main/proto/ReconfigurationProtocol.proto   |   2 +-
 .../hadoop-hdfs-client/src/main/proto/acl.proto|   2 +-
 .../src/main/proto/datatransfer.proto  |   2 +-
 .../src/main/proto/encryption.proto|   2 +-
 .../src/main/proto/erasurecoding.proto |   2 +-
 .../hadoop-hdfs-client/src/main/proto/hdfs.proto   |   2 +-
 .../src/main/proto/inotify.proto   |   2 +-
 .../hadoop-hdfs-client/src/main/proto/xattr.proto  |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml|  32 +-
 .../federation/router/RouterAdminServer.java   |  98 ++--
 .../src/main/proto/FederationProtocol.proto|   2 +-
 .../src/main/proto/RouterProt

[hadoop] 01/01: Merge branch 'trunk' into HDDS-1880-Decom

2019-09-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit fd5e87750551e2fc3352c4d6acd7f43d5932cb32
Merge: ee8f24c c30e495
Author: Anu Engineer 
AuthorDate: Mon Sep 23 09:08:14 2019 -0700

Merge branch 'trunk' into HDDS-1880-Decom

 dev-support/docker/Dockerfile  |  20 +-
 hadoop-common-project/hadoop-common/pom.xml|  66 +-
 .../hadoop-common/src/CMakeLists.txt   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Mkdir.java |  11 +-
 .../org/apache/hadoop/io/nativeio/NativeIO.java|  28 +-
 .../apache/hadoop/util/NativeLibraryChecker.java   |  10 +
 .../src/org/apache/hadoop/io/nativeio/NativeIO.c   |  14 +-
 .../src/org/apache/hadoop/io/nativeio/pmdk_load.c  |  28 +-
 .../src/org/apache/hadoop/io/nativeio/pmdk_load.h  |   5 -
 .../hadoop-common/src/main/proto/FSProtos.proto|   2 +-
 .../src/main/proto/GenericRefreshProtocol.proto|   2 +-
 .../src/main/proto/GetUserMappingsProtocol.proto   |   2 +-
 .../src/main/proto/HAServiceProtocol.proto |   2 +-
 .../src/main/proto/IpcConnectionContext.proto  |   2 +-
 .../src/main/proto/ProtobufRpcEngine.proto |   2 +-
 .../src/main/proto/ProtocolInfo.proto  |   2 +-
 .../proto/RefreshAuthorizationPolicyProtocol.proto |   2 +-
 .../src/main/proto/RefreshCallQueueProtocol.proto  |   2 +-
 .../main/proto/RefreshUserMappingsProtocol.proto   |   2 +-
 .../hadoop-common/src/main/proto/RpcHeader.proto   |   2 +-
 .../hadoop-common/src/main/proto/Security.proto|   2 +-
 .../hadoop-common/src/main/proto/TraceAdmin.proto  |   2 +-
 .../src/main/proto/ZKFCProtocol.proto  |   2 +-
 .../site/markdown/release/3.2.1/CHANGELOG.3.2.1.md | 553 +
 .../markdown/release/3.2.1/RELEASENOTES.3.2.1.md   |  80 +++
 .../hadoop-common/src/test/proto/test.proto|   2 +-
 .../src/test/proto/test_rpc_service.proto  |   1 +
 hadoop-hdds/common/pom.xml |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml |  36 +-
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   3 +
 .../hdfs/client/impl/BlockReaderFactory.java   |   2 +-
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java |  11 +-
 .../src/main/proto/ClientDatanodeProtocol.proto|   2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto|   2 +-
 .../src/main/proto/ReconfigurationProtocol.proto   |   2 +-
 .../hadoop-hdfs-client/src/main/proto/acl.proto|   2 +-
 .../src/main/proto/datatransfer.proto  |   2 +-
 .../src/main/proto/encryption.proto|   2 +-
 .../src/main/proto/erasurecoding.proto |   2 +-
 .../hadoop-hdfs-client/src/main/proto/hdfs.proto   |   2 +-
 .../src/main/proto/inotify.proto   |   2 +-
 .../hadoop-hdfs-client/src/main/proto/xattr.proto  |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml|  32 +-
 .../federation/router/RouterAdminServer.java   |  98 ++-
 .../src/main/proto/FederationProtocol.proto|   2 +-
 .../src/main/proto/RouterProtocol.proto|   2 +-
 .../server/federation/router/TestRouterQuota.java  |   9 +
 .../dev-support/jdiff/Apache_Hadoop_HDFS_3.2.1.xml | 674 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml|  48 +-
 .../apache/hadoop/hdfs/net/DFSNetworkTopology.java |   3 +
 .../datanode/erasurecode/StripedBlockReader.java   |   2 +-
 .../datanode/fsdataset/impl/FsDatasetCache.java|  15 +-
 .../src/main/proto/AliasMapProtocol.proto  |   2 +-
 .../src/main/proto/DatanodeLifelineProtocol.proto  |   2 +-
 .../src/main/proto/DatanodeProtocol.proto  |   2 +-
 .../hadoop-hdfs/src/main/proto/HAZKInfo.proto  |   2 +-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto|   2 +-
 .../src/main/proto/InterDatanodeProtocol.proto |   2 +-
 .../src/main/proto/InterQJournalProtocol.proto |   2 +-
 .../src/main/proto/JournalProtocol.proto   |   2 +-
 .../src/main/proto/NamenodeProtocol.proto  |   2 +-
 .../src/main/proto/QJournalProtocol.proto  |   2 +-
 .../hadoop-hdfs/src/main/proto/editlog.proto   |   2 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto   |   2 +-
 .../src/main/resources/hdfs-default.xml|  12 +
 .../hadoop/hdfs/net/TestDFSNetworkTopology.java|  16 +
 .../org/apache/hadoop/ozone/client/OzoneKey.java   |  17 +-
 .../hadoop/ozone/client/OzoneKeyDetails.java   |   4 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   5 +-
 hadoop-ozone/dev-support/checks/checkstyle.sh  |  12 +-
 .../hadoop/ozone/ozShell/TestOzoneShellHA.java | 343 +++
 .../hadoop/ozone/web/ozShell/OzoneAddress.java |  17 +-
 .../hadoop/ozone/client/OzoneBucketStub.java   |   3 +-
 hadoop-project-dist/pom.xml|   2 +-
 hadoop-project/pom.xml |  52 +-
 ..

[hadoop] branch trunk updated (5363730 -> d7d6ec8)

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 5363730  HDDS-2157. checkstyle: print filenames relative to project 
root (#1485)
 add d7d6ec8  HDDS-2128. Make ozone sh command work with OM HA service ids 
(#1445)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/ozone/ozShell/TestOzoneShellHA.java | 343 +
 .../hadoop/ozone/web/ozShell/OzoneAddress.java |  17 +-
 2 files changed, 359 insertions(+), 1 deletion(-)
 create mode 100644 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (3f223be -> 5363730)

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 3f223be  HDFS-14844. Make buffer of 
BlockReaderRemote#newBlockReader#BufferedOutputStream configurable. Contributed 
by Lisheng Sun.
 add 5363730  HDDS-2157. checkstyle: print filenames relative to project 
root (#1485)

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/dev-support/checks/checkstyle.sh | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HDDS-1880-Decom updated: HDDS-1982. Extend SCMNodeManager to support decommission and maintenance states. Contributed by Stephen O'Donnell.

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDDS-1880-Decom by this push:
 new ee8f24c  HDDS-1982. Extend SCMNodeManager to support decommission and 
maintenance states. Contributed by Stephen O'Donnell.
ee8f24c is described below

commit ee8f24ca4fcf364b12ae782beed790c7653bdafc
Author: Anu Engineer 
AuthorDate: Fri Sep 20 11:42:21 2019 -0700

HDDS-1982. Extend SCMNodeManager to support decommission and maintenance 
states.
Contributed by Stephen O'Donnell.
---
 hadoop-hdds/common/src/main/proto/hdds.proto   |  10 +-
 .../hdds/scm/block/SCMBlockDeletingService.java|   7 +-
 .../placement/algorithms/SCMCommonPolicy.java  |   4 +-
 .../apache/hadoop/hdds/scm/node/DatanodeInfo.java  |  50 +++-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |  31 ++-
 .../hadoop/hdds/scm/node/NodeStateManager.java | 291 +++--
 .../apache/hadoop/hdds/scm/node/NodeStatus.java|  93 +++
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  59 -
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java   |   8 -
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  | 241 -
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |   4 +-
 .../hdds/scm/pipeline/SimplePipelineProvider.java  |   4 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |   4 +-
 .../hdds/scm/server/StorageContainerManager.java   |   3 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java|   3 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |  37 ++-
 .../scm/container/TestContainerReportHandler.java  |  12 +-
 .../algorithms/TestContainerPlacementFactory.java  |   4 +-
 .../TestSCMContainerPlacementCapacity.java |   4 +-
 .../TestSCMContainerPlacementRackAware.java|   4 +-
 .../TestSCMContainerPlacementRandom.java   |   4 +-
 .../hdds/scm/node/TestContainerPlacement.java  |   2 +-
 .../hadoop/hdds/scm/node/TestNodeStateManager.java | 223 
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  83 +++---
 .../hdds/scm/node/states/TestNodeStateMap.java | 140 ++
 .../placement/TestContainerPlacement.java  |   6 +-
 .../testutils/ReplicationNodeManagerMock.java  |  36 ++-
 .../hadoop/hdds/scm/cli/TopologySubcommand.java|   4 -
 .../hadoop/ozone/TestStorageContainerManager.java  |   5 +-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |   4 -
 30 files changed, 1063 insertions(+), 317 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
index d2bb355..294f2b7 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -100,8 +100,14 @@ enum NodeState {
 HEALTHY = 1;
 STALE = 2;
 DEAD = 3;
-DECOMMISSIONING = 4;
-DECOMMISSIONED = 5;
+}
+
+enum NodeOperationalState {
+IN_SERVICE = 1;
+DECOMMISSIONING = 2;
+DECOMMISSIONED = 3;
+ENTERING_MAINTENANCE = 4;
+IN_MAINTENANCE = 5;
 }
 
 enum QueryScope {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index ad77624..b5e5d16 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -23,9 +23,9 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
 .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
@@ -137,7 +137,10 @@ public class SCMBlockDeletingService extends 
BackgroundService {
   // to delete blocks.
   LOG.debug("Running DeletedBlockTransactionScanner");
   DatanodeDeletedBlockTransactions transactions = null;
-  List datanodes = 
nodeManager.getNodes(NodeState.HEALTHY);
+  // TODO - DECOMM - should we be deleting blocks from decom nodes
+  //and what about entering maintenance.
+  List datanodes =
+  nodeManager.getNodes(NodeStatus.inServiceHealthy());
   Map transactionMap = null;
   if (datanodes != null) {
 transactions = new DatanodeDeletedBlockTransactions(containe

[hadoop] branch HDDS-1880-Decom created (now b3173e1)

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b3173e1  HDDS-2001. Update Ratis version to 0.4.0.

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2001. Update Ratis version to 0.4.0.

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b3173e1  HDDS-2001. Update Ratis version to 0.4.0.
b3173e1 is described below

commit b3173e1f580787dbbc7b307e6548ba9e9d19b376
Author: Nanda kumar 
AuthorDate: Fri Sep 20 16:30:06 2019 +0530

HDDS-2001. Update Ratis version to 0.4.0.

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/pom.xml  | 2 +-
 hadoop-ozone/pom.xml | 2 +-
 pom.ozone.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index e234b4e..5c98e38 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 0.5.0-SNAPSHOT
 
 
-0.4.0-78e95b9-SNAPSHOT
+0.4.0
 
 1.60
 
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 12998e5..b2d143e 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -28,7 +28,7 @@
   
 0.5.0-SNAPSHOT
 0.5.0-SNAPSHOT
-0.4.0-78e95b9-SNAPSHOT
+0.4.0
 1.60
 Crater Lake
 ${ozone.version}
diff --git a/pom.ozone.xml b/pom.ozone.xml
index 0d60996..c550368 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -154,7 +154,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 1.0.0-M33
 
 
-0.3.0-eca3531-SNAPSHOT
+0.4.0
 1.0-alpha-1
 3.3.1
 2.4.12


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1949. Missing or error-prone test cleanup. Contributed by Doroszlai, Attila.

2019-09-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5553887  HDDS-1949. Missing or error-prone test cleanup. Contributed 
by Doroszlai, Attila.
5553887 is described below

commit 5553887d9592d8ef59e5a2871919ced195edf42c
Author: Anu Engineer 
AuthorDate: Fri Sep 20 09:55:57 2019 -0700

HDDS-1949. Missing or error-prone test cleanup.
Contributed by Doroszlai, Attila.
---
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   8 +-
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 101 ++---
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |  16 +-
 .../hadoop/ozone/TestStorageContainerManager.java  | 240 +++--
 .../ozone/container/TestContainerReplication.java  |  25 ++-
 .../commandhandler/TestBlockDeletion.java  |   8 +
 .../commandhandler/TestCloseContainerHandler.java  |  33 ++-
 .../apache/hadoop/ozone/om/TestScmSafeMode.java|   2 +-
 .../hadoop/ozone/scm/TestSCMNodeManagerMXBean.java |   8 +
 9 files changed, 273 insertions(+), 168 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 8620b0a..0aba968 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -240,7 +240,7 @@ public interface MiniOzoneCluster {
 protected static final int ACTIVE_OMS_NOT_SET = -1;
 
 protected final OzoneConfiguration conf;
-protected final String path;
+protected String path;
 
 protected String clusterId;
 protected String omServiceId;
@@ -269,9 +269,7 @@ public interface MiniOzoneCluster {
 
 protected Builder(OzoneConfiguration conf) {
   this.conf = conf;
-  this.clusterId = UUID.randomUUID().toString();
-  this.path = GenericTestUtils.getTempPath(
-  MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId);
+  setClusterId(UUID.randomUUID().toString());
 }
 
 /**
@@ -283,6 +281,8 @@ public interface MiniOzoneCluster {
  */
 public Builder setClusterId(String id) {
   clusterId = id;
+  path = GenericTestUtils.getTempPath(
+  MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId);
   return this;
 }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index b0cbc6b..ac76482 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone;
 
 import java.io.File;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Optional;
 import org.apache.commons.io.FileUtils;
@@ -317,6 +319,7 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   stop();
   FileUtils.deleteDirectory(baseDir);
   ContainerCache.getInstance(conf).shutdownCache();
+  DefaultMetricsSystem.shutdown();
 } catch (IOException e) {
   LOG.error("Exception while shutting down the cluster.", e);
 }
@@ -325,26 +328,9 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   @Override
   public void stop() {
 LOG.info("Stopping the Mini Ozone Cluster");
-if (ozoneManager != null) {
-  LOG.info("Stopping the OzoneManager");
-  ozoneManager.stop();
-  ozoneManager.join();
-}
-
-if (!hddsDatanodes.isEmpty()) {
-  LOG.info("Shutting the HddsDatanodes");
-  hddsDatanodes.parallelStream()
-  .forEach(dn -> {
-dn.stop();
-dn.join();
-  });
-}
-
-if (scm != null) {
-  LOG.info("Stopping the StorageContainerManager");
-  scm.stop();
-  scm.join();
-}
+stopOM(ozoneManager);
+stopDatanodes(hddsDatanodes);
+stopSCM(scm);
   }
 
   /**
@@ -385,6 +371,37 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 this.caClient = client;
   }
 
+  private static void stopDatanodes(
+  Collection hddsDatanodes) {
+if (!hddsDatanodes.isEmpty()) {
+  LOG.info("Stopping the HddsDatanodes");
+  hddsDatanodes.parallelStream()
+  .forEach(MiniOzoneClusterImpl::stopDatanode);
+}
+  }
+
+  private static void stopDatanode(HddsDatanodeService dn) {
+if (dn != null) {
+  dn.stop();
+  dn.join();
+}
+  }
+

[hadoop] branch trunk updated: HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu Yao.

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d072d33  HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu 
Yao.
d072d33 is described below

commit d072d3304ce3fe33e22bb703839b41ab5107ad42
Author: Xiaoyu Yao 
AuthorDate: Wed Aug 28 08:56:33 2019 -0700

HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu Yao.

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  32 +++--
 .../hadoop/hdds/scm/XceiverClientManager.java  |  28 -
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   9 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java |  26 +---
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  60 ++---
 .../hadoop/hdds/security/x509/SecurityConfig.java  | 137 -
 .../x509/certificate/client/CertificateClient.java |   6 +
 .../client/DefaultCertificateClient.java   |  31 -
 .../common/src/main/resources/ozone-default.xml|  27 
 .../container/common/helpers/ContainerMetrics.java |   9 +-
 .../common/transport/server/XceiverServerGrpc.java |  16 +--
 .../transport/server/ratis/XceiverServerRatis.java |   4 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   1 +
 .../apache/hadoop/hdds/server/ProfileServlet.java  |   1 -
 .../hadoop/hdds/server/TestProfileServlet.java |  11 +-
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |   5 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |   2 +
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   3 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |   9 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  15 +--
 .../hdds/scm/pipeline/SCMPipelineManager.java  |  15 ++-
 .../hdds/scm/server/StorageContainerManager.java   |  13 +-
 .../container/TestCloseContainerEventHandler.java  |   2 +-
 .../scm/container/TestSCMContainerManager.java |   2 +-
 .../hdds/scm/node/TestContainerPlacement.java  |  10 +-
 .../scm/pipeline/MockRatisPipelineProvider.java|   2 +-
 .../safemode/TestHealthyPipelineSafeModeRule.java  |   6 +-
 .../TestOneReplicaPipelineSafeModeRule.java|   2 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |  10 +-
 .../ozone/client/OzoneMultipartUploadList.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  54 ++--
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   1 -
 .../ozone/om/helpers/OmMultipartKeyInfo.java   |   3 -
 .../ozone/om/helpers/OmMultipartUploadList.java|   3 -
 .../om/helpers/OmMultipartUploadListParts.java |   1 -
 ...MultipartUploadList.java => ServiceInfoEx.java} |  30 ++---
 .../ozone/om/protocol/OzoneManagerProtocol.java|   9 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  21 +++-
 .../src/main/proto/OzoneManagerProtocol.proto  |   3 +
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |  15 ++-
 .../hadoop/ozone/TestStorageContainerManager.java  |  23 ++--
 .../ozone/client/CertificateClientTestImpl.java|   7 +-
 .../rpc/TestContainerReplicationEndToEnd.java  |  10 +-
 .../ozoneimpl/TestOzoneContainerWithTLS.java   | 104 +---
 .../hadoop/ozone/scm/TestXceiverClientManager.java |   6 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   1 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  11 ++
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 48 files changed, 375 insertions(+), 428 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index d8daaa7..b31da05 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -52,8 +52,8 @@ import 
org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
 import java.io.IOException;
+import java.security.cert.X509Certificate;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -80,6 +80,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   private boolean closed = false;
   private SecurityConfig secConfig;
   private final boolean topologyAwareRead;
+  private X509Certificate caCert;
 
   /**
* Constructs a client that can communicate with the Container framework on
@@ -87,8 +88,10 @@ public class XceiverClientGrpc extends XceiverClientSpi {
*
* @param pipeline - Pipeline that defines the machines.
* @param config   -- Ozone Config
+   * @param caCert   - SCM ca certificate.
*/
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
+  public XceiverClientG

[hadoop] branch trunk updated: HDDS-2156. Fix alignment issues in HDDS doc pages

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9be448b  HDDS-2156. Fix alignment issues in HDDS doc pages
9be448b is described below

commit 9be448b3368088967064305e78ec17ffaaeaedb2
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:41:00 2019 -0700

HDDS-2156. Fix alignment issues in HDDS doc pages

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  3 +-
 hadoop-hdds/docs/content/security/_index.md|  2 +-
 .../themes/ozonedoc/layouts/_default/section.html  | 69 +-
 .../themes/ozonedoc/layouts/_default/single.html   |  2 +
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |  3 +
 5 files changed, 48 insertions(+), 31 deletions(-)

diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md 
b/hadoop-hdds/docs/content/security/SecurityAcls.md
index b85dcca..31bbb0a 100644
--- a/hadoop-hdds/docs/content/security/SecurityAcls.md
+++ b/hadoop-hdds/docs/content/security/SecurityAcls.md
@@ -2,7 +2,8 @@
 title: "Ozone ACLs"
 date: "2019-April-03"
 weight: 6
-summary: Native ACL support provides ACL functionality without Ranger 
integration.
+summary: Native Ozone Authorizer provides Access Control List (ACL) support 
for Ozone without Ranger integration.
+icon: transfer
 ---
 
 
 {{}}
-  Ozone is an enterprise class, secure storage system. There many
+  Ozone is an enterprise class, secure storage system. There are many
   optional security features in Ozone. Following pages discuss how
   you can leverage the security features of Ozone.
 {{}}
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
index 4150d07..5c01241 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
@@ -18,42 +18,53 @@
 
 
 
-  {{ partial "navbar.html" . }}
+{{ partial "navbar.html" . }}
 
-  
+
 
-  {{ partial "sidebar.html" . }}
-  
-{{ .Title }}
-
-  {{ .Content }}
-{{.Params.card}}
-  {{ if not (eq .Params.cards "false")}}
-  {{ range .Pages }}
-
-  
-
-  
-
-  {{ with .Params.Icon}}
-
-{{end}}
-  {{ .LinkTitle }}
-
-{{.Summary}}
-{{.LinkTitle}}
-  
+{{ partial "sidebar.html" . }}
+
+
+{{ .Title }}
 
-  
+
+{{ .Content }}
+{{.Params.card}}
+{{ if not (eq .Params.cards "false")}}
+{{ range $page_index, $page_val := .Pages }}
+
+{{ $page_count := len .Pages }}
+{{if (eq (mod $page_index 2) 0)}}
+
+{{end}}
+
+
+
+
+{{ with .Params.Icon}}
+
+{{end}}
+{{ .LinkTitle }}
+
+{{.Summary}}
+{{.LinkTitle}}
+
+
+
 
-  {{ end }}
-  {{end}}
+{{if (or (eq (mod $page_index 2) 1) (eq $page_index (sub 
$page_count 1)))}}
+
+{{end}}
+{{ end }}
+{{end}}
+
 
-  
 
-  
+
 
-  {{ partial "footer.html" . }}
+{{ partial "footer.html" . }}
 
 
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
index 31125ba..3679ddb 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
@@ -36,7 +36,9 @@
 
   
 
+  
 {{.Title}}
+  
 
   {{ .Content }}
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css 
b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
index e004da0..6f812c8 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
+++ b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
@@ -160,6 +160,9 @@ h4 {
   padding: 30px;
 }
 
+h1 {
+  margin-bottom: 20px;
+}
 
 .card {
   padding: 20px;


-

[hadoop] branch trunk updated: HDDS-2101. Ozone filesystem provider doesn't exist (#1473)

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7ae8a9  HDDS-2101. Ozone filesystem provider doesn't exist (#1473)
b7ae8a9 is described below

commit b7ae8a96cde5d78c7c73653e09b6e4b130b4d74b
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:28:29 2019 -0700

HDDS-2101. Ozone filesystem provider doesn't exist (#1473)
---
 .../src/main/compose/ozone-mr/hadoop27/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop31/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop32/docker-config |  1 -
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 5 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
index 9e9cc04..fccdace 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
 
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git 
a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..0368002
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git 
a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..39ca348
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs-lib-lega

[hadoop] branch trunk updated: HDDS-2127. Detailed Tools doc not reachable

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f260b5a  HDDS-2127. Detailed Tools doc not reachable
f260b5a is described below

commit f260b5aa5b26d85504e95f877b53300fb0cd70af
Author: Márton Elek 
AuthorDate: Thu Sep 19 14:42:33 2019 +0200

HDDS-2127. Detailed Tools doc not reachable

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/docs/content/recipe/_index.md  |  2 +-
 hadoop-hdds/docs/content/tools/AuditParser.md  |  2 +-
 hadoop-hdds/docs/content/tools/Freon.md| 62 --
 hadoop-hdds/docs/content/tools/Genconf.md  |  2 +-
 hadoop-hdds/docs/content/tools/SCMCLI.md   |  2 +-
 hadoop-hdds/docs/content/tools/TestTools.md|  2 +-
 hadoop-hdds/docs/content/tools/Tools.md| 19 ---
 .../content/{beyond/Tools.md => tools/_index.md}   | 40 +-
 8 files changed, 33 insertions(+), 98 deletions(-)

diff --git a/hadoop-hdds/docs/content/recipe/_index.md 
b/hadoop-hdds/docs/content/recipe/_index.md
index beaab69..47053ab 100644
--- a/hadoop-hdds/docs/content/recipe/_index.md
+++ b/hadoop-hdds/docs/content/recipe/_index.md
@@ -2,7 +2,7 @@
 title: Recipes
 date: "2017-10-10"
 menu: main
-weight: 8
+weight: 9
 
 ---
 
-
-Overview
-
-
-Freon is a load-generator for Ozone. This tool is used for testing the 
functionality of ozone.
-
-### Random keys
-
-In randomkeys mode, the data written into ozone cluster is randomly generated.
-Each key will be of size 10 KB.
-
-The number of volumes/buckets/keys can be configured. The replication type and
-factor (eg. replicate with ratis to 3 nodes) Also can be configured.
-
-For more information use
-
-`bin/ozone freon --help`
-
-### Example
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
-{{< /highlight >}}
-
-{{< highlight bash >}}
-***
-Status: Success
-Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3
-Number of Volumes created: 10
-Number of Buckets created: 100
-Number of Keys added: 1000
-Ratis replication factor: THREE
-Ratis replication type: RATIS
-Average Time spent in volume creation: 00:00:00,035
-Average Time spent in bucket creation: 00:00:00,319
-Average Time spent in key creation: 00:00:03,659
-Average Time spent in key write: 00:00:10,894
-Total bytes written: 1024
-Total Execution time: 00:00:16,898
-***
-{{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/tools/Genconf.md 
b/hadoop-hdds/docs/content/tools/Genconf.md
index 146dfdc..35d5e3d 100644
--- a/hadoop-hdds/docs/content/tools/Genconf.md
+++ b/hadoop-hdds/docs/content/tools/Genconf.md
@@ -1,7 +1,7 @@
 ---
 title: "Generate Configurations"
 date: 2018-12-18
-
+summary: Tool to generate default configuration
 ---
 
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/beyond/Tools.md 
b/hadoop-hdds/docs/content/tools/_index.md
similarity index 85%
rename from hadoop-hdds/docs/content/beyond/Tools.md
rename to hadoop-hdds/docs/content/tools/_index.md
index 7316089..d7c9270 100644
--- a/hadoop-hdds/docs/content/beyond/Tools.md
+++ b/hadoop-hdds/docs/content/tools/_index.md
@@ -2,8 +2,11 @@
 title: "Tools"
 date: "2017-10-10"
 summary: Ozone supports a set of tools that are handy for developers.Here is a 
quick list of command line tools.
-weight: 3
+menu:
+   main:
+  weight: 8
 ---
+
 

[hadoop] branch trunk updated: HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f6d884c  HDDS-2110. Arbitrary file can be downloaded with the help of 
ProfilerServlet
f6d884c is described below

commit f6d884cd118fdb6987eb3c369fc9a4c9317acf68
Author: Márton Elek 
AuthorDate: Sat Sep 14 06:18:33 2019 +0200

HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/hdds/server/ProfileServlet.java  | 60 -
 .../hadoop/hdds/server/TestProfileServlet.java | 63 ++
 2 files changed, 109 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
index e09e9b5..42944e1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
@@ -32,7 +32,9 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import org.apache.commons.io.IOUtils;
 import org.slf4j.Logger;
@@ -111,6 +113,10 @@ public class ProfileServlet extends HttpServlet {
   private static final AtomicInteger ID_GEN = new AtomicInteger(0);
   static final Path OUTPUT_DIR =
   Paths.get(System.getProperty("java.io.tmpdir"), "prof-output");
+  public static final String FILE_PREFIX = "async-prof-pid-";
+
+  public static final Pattern FILE_NAME_PATTERN =
+  Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
 
   private Lock profilerLock = new ReentrantLock();
   private Integer pid;
@@ -165,6 +171,26 @@ public class ProfileServlet extends HttpServlet {
 }
   }
 
+  @VisibleForTesting
+  protected static String generateFileName(Integer pid, Output output,
+  Event event) {
+return FILE_PREFIX + pid + "-" +
+event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
++ "." +
+output.name().toLowerCase();
+  }
+
+  @VisibleForTesting
+  protected static String validateFileName(String filename) {
+if (!FILE_NAME_PATTERN.matcher(filename).matches()) {
+  throw new IllegalArgumentException(
+  "Invalid file name parameter " + filename + " doesn't match pattern "
+  + FILE_NAME_PATTERN);
+
+}
+return filename;
+  }
+
   @Override
   protected void doGet(final HttpServletRequest req,
   final HttpServletResponse resp) throws IOException {
@@ -195,7 +221,8 @@ public class ProfileServlet extends HttpServlet {
   return;
 }
 
-final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+final int duration =
+getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
 final Output output = getOutput(req);
 final Event event = getEvent(req);
 final Long interval = getLong(req, "interval");
@@ -213,11 +240,11 @@ public class ProfileServlet extends HttpServlet {
 int lockTimeoutSecs = 3;
 if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
   try {
+//Should be in sync with FILE_NAME_PATTERN
 File outputFile =
-OUTPUT_DIR.resolve("async-prof-pid-" + pid + "-" +
-event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
-+ "." +
-output.name().toLowerCase()).toFile();
+OUTPUT_DIR.resolve(
+ProfileServlet.generateFileName(pid, output, event))
+.toFile();
 List cmd = new ArrayList<>();
 cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
 cmd.add("-e");
@@ -270,7 +297,8 @@ public class ProfileServlet extends HttpServlet {
 String relativeUrl = "/prof?file=" + outputFile.getName();
 resp.getWriter().write(
 "Started [" + event.getInternalName()
-+ "] profiling. This page will automatically redirect to " 
+
++ "] profiling. This page will automatically redirect to "
++
 relativeUrl + " after " + duration
 + " seconds.\n\ncommand:\n" + Joiner.on(" ").join(cmd));
   

[hadoop] branch trunk updated: HDFS-14846: libhdfs tests are failing on trunk due to jni usage bugs

2019-09-17 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3cf6e42  HDFS-14846: libhdfs tests are failing on trunk due to jni 
usage bugs
3cf6e42 is described below

commit 3cf6e4272f192c69a161307ad9d35142c5a845c5
Author: Sahil Takiar 
AuthorDate: Thu Sep 12 15:51:05 2019 -0700

HDFS-14846: libhdfs tests are failing on trunk due to jni usage bugs

Signed-off-by: Anu Engineer 
---
 .../native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c  | 3 ---
 .../src/main/native/libhdfs-tests/native_mini_dfs.c| 2 +-
 .../hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c   | 1 -
 3 files changed, 1 insertion(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
index 402ffd5..b463679 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c
@@ -199,8 +199,5 @@ done:
   if (ginfo) {
 hadoop_group_info_free(ginfo);
   }
-  if (jgroupname) {
-(*env)->DeleteLocalRef(env, jgroupname);
-  }
   return jgroups;
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
index 3af56f1..a69c6ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -184,8 +184,8 @@ struct NativeMiniDfsCluster* nmdCreate(struct 
NativeMiniDfsConf *conf)
   "Builder::numDataNodes");
 goto error;
 }
+(*env)->DeleteLocalRef(env, val.l);
 }
-(*env)->DeleteLocalRef(env, val.l);
 jthr = findClassAndInvokeMethod(env, &val, INSTANCE, bld, 
MINIDFS_CLUSTER_BUILDER,
 "build", "()L" MINIDFS_CLUSTER ";");
 if (jthr) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index b8442b4..e6b2010 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -3480,7 +3480,6 @@ done:
 destroyLocalReference(env, jUserName);
 destroyLocalReference(env, jGroupName);
 destroyLocalReference(env, jPermission);
-destroyLocalReference(env, jPath);
 return jthr;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2111. XSS fragments can be injected to the S3g landing page

2019-09-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2358e53  HDDS-2111. XSS fragments can be injected to the S3g landing 
page
2358e53 is described below

commit 2358e53e9c9f8489b24648b1017eb856d4bd42b0
Author: Márton Elek 
AuthorDate: Sat Sep 14 05:33:05 2019 +0200

HDDS-2111. XSS fragments can be injected to the S3g landing page

Signed-off-by: Anu Engineer 
---
 .../src/main/resources/webapps/static/index.html   |  8 ++--
 .../src/main/resources/webapps/static/s3g.js   | 23 ++
 2 files changed, 29 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html 
b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
index 68939ef..b20bf35 100644
--- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
+++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html
@@ -21,6 +21,7 @@
 
 
 
+
 
 
 
@@ -68,12 +69,15 @@
 
 For example with aws-cli:
 
-aws s3api --endpoint 
document.write(window.location.href.replace("static/", "")) 
create-bucket --bucket=wordcount
+aws s3api --endpoint  create-bucket 
--bucket=wordcount
 
 For more information, please check the documentation.
 
 
 
-
+
+
+
+
 
 
diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js 
b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
new file mode 100644
index 000..8b1e977
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+window.onload = function () {
+var safeurl = window.location.protocol + "//" + window.location.host + 
window.location.pathname;
+safeurl = safeurl.replace("static/", "");
+document.getElementById('s3gurl').innerHTML = safeurl;
+};


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2030. Generate simplifed reports by the dev-support/checks/*.sh scripts

2019-09-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c8d61ff  HDDS-2030. Generate simplifed reports by the 
dev-support/checks/*.sh scripts
c8d61ff is described below

commit c8d61ffef66ebc1d1ce2324787d0f1164d61e53f
Author: Márton Elek 
AuthorDate: Sat Aug 24 23:57:29 2019 +0200

HDDS-2030. Generate simplifed reports by the dev-support/checks/*.sh scripts

Signed-off-by: Anu Engineer 
Co-Authored-By: Doroszlai, Attila 
<6454655+adorosz...@users.noreply.github.com>
---
 hadoop-ozone/dev-support/checks/README.md  | 27 +
 .../dev-support/checks/_mvn_unit_report.sh | 66 ++
 hadoop-ozone/dev-support/checks/acceptance.sh  | 17 +-
 hadoop-ozone/dev-support/checks/author.sh  | 16 --
 hadoop-ozone/dev-support/checks/checkstyle.sh  | 16 --
 hadoop-ozone/dev-support/checks/findbugs.sh| 20 +++
 hadoop-ozone/dev-support/checks/integration.sh | 12 ++--
 hadoop-ozone/dev-support/checks/rat.sh | 11 +++-
 hadoop-ozone/dev-support/checks/shellcheck.sh  | 12 ++--
 hadoop-ozone/dev-support/checks/unit.sh| 17 --
 10 files changed, 174 insertions(+), 40 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/README.md 
b/hadoop-ozone/dev-support/checks/README.md
new file mode 100755
index 000..ba7202c
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/README.md
@@ -0,0 +1,27 @@
+
+
+# Ozone checks
+
+This directory contains a collection of easy-to-use helper scripts to execute 
various type of tests on the ozone/hdds codebase.
+
+The contract of the scripts are very simple:
+
+ 1. Executing the scripts without any parameter will check the hdds/ozone 
project
+ 2. Shell exit code represents the result of the check (if failed, exits with 
non-zero code)
+ 3. Detailed information may be saved to the $OUTPUT_DIR (if it's not set, 
root level ./target will be used).
+ 4. The standard output should contain all the log about the build AND the 
results.
+ 5. The content of the $OUTPUT_DIR can be:
+* `summary.html`/`summary.md`/`summary.txt`: contains a human readable 
overview about the failed tests (used by reporting)
+* `failures`: contains a simple number (used by reporting)
diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh 
b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
new file mode 100755
index 000..bb29d40
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+REPORT_DIR=${REPORT_DIR:-$PWD}
+
+## generate summary txt file
+find "." -name 'TEST*.xml' -print0 \
+| xargs -n1 -0 "grep" -l -E "> "${REPORT_DIR}/summary.txt"
+
+#Collect of all of the report failes of FAILED tests
+while IFS= read -r -d '' dir; do
+   while IFS=$'\n' read -r file; do
+  DIR_OF_TESTFILE=$(dirname "$file")
+  NAME_OF_TESTFILE=$(basename "$file")
+  NAME_OF_TEST="${NAME_OF_TESTFILE%.*}"
+  DESTDIRNAME=$(realpath --relative-to="$PWD" "$DIR_OF_TESTFILE/../..")
+  mkdir -p "$REPORT_DIR/$DESTDIRNAME"
+  #shellcheck disable=SC2086
+  cp -r "$DIR_OF_TESTFILE"/*$NAME_OF_TEST* "$REPORT_DIR/$DESTDIRNAME/"
+   done < <(grep -l -r FAILURE --include="*.txt" "$dir" | grep -v output.txt)
+done < <(find "." -name surefire-reports -print0)
+
+## generate summary markdown file
+export SUMMARY_FILE="$REPORT_DIR/summary.md"
+for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); 
do
+
+FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk 
'{print $18}' | sort | uniq)
+
+for FAILURE in $FAILURES; do
+TEST_RESULT_LOCATION="$(realpath --relative-to="$REPORT_DIR" 
"$TEST_RESULT

[hadoop] branch trunk updated: HDDS-2057. Incorrect Default OM Port in Ozone FS URI Error Message.

2019-09-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 95010a4  HDDS-2057. Incorrect Default OM Port in Ozone FS URI Error 
Message.
95010a4 is described below

commit 95010a41fcea6ecf5dfd46d6e6f6f38c8b3e2a66
Author: sdeka 
AuthorDate: Thu Aug 29 21:23:50 2019 +0530

HDDS-2057. Incorrect Default OM Port in Ozone FS URI Error Message.

Signed-off-by: Anu Engineer 
---
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 26 +++---
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 4d7bfd9..06eedba 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -87,11 +88,20 @@ public class BasicOzoneFileSystem extends FileSystem {
   private static final Pattern URL_SCHEMA_PATTERN =
   Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)");
 
-  private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " +
-  "should be one of the following formats: " +
-  "o3fs://bucket.volume/key  OR " +
-  "o3fs://bucket.volume.om-host.example.com/key  OR " +
-  "o3fs://bucket.volume.om-host.example.com:5678/key";
+  private OzoneConfiguration getOzoneConf(Configuration conf) {
+
+return (conf instanceof OzoneConfiguration) ?
+(OzoneConfiguration) conf : new OzoneConfiguration(conf);
+  }
+
+  private String getUriExceptionText(Configuration conf) {
+
+return "Ozone file system URL should be one of the following formats: "
++ "o3fs://bucket.volume/key  OR "
++ "o3fs://bucket.volume.om-host.example.com/key  OR "
++ "o3fs://bucket.volume.om-host.example.com:"
++ OmUtils.getOmRpcPort(getOzoneConf(conf)) + "/key";
+  }
 
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
@@ -106,7 +116,7 @@ public class BasicOzoneFileSystem extends FileSystem {
 Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
 
 if (!matcher.matches()) {
-  throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
+  throw new IllegalArgumentException(getUriExceptionText(conf));
 }
 String bucketStr = matcher.group(1);
 String volumeStr = matcher.group(2);
@@ -118,14 +128,14 @@ public class BasicOzoneFileSystem extends FileSystem {
   String[] parts = remaining.split(":");
   // Array length should be either 1(host) or 2(host:port)
   if (parts.length > 2) {
-throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
+throw new IllegalArgumentException(getUriExceptionText(conf));
   }
   omHost = parts[0];
   if (parts.length == 2) {
 try {
   omPort = Integer.parseInt(parts[1]);
 } catch (NumberFormatException e) {
-  throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
+  throw new IllegalArgumentException(getUriExceptionText(conf));
 }
   } else {
 // If port number is not specified, read it from config


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2015. Encrypt/decrypt key using symmetric key while writing/reading

2019-09-06 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b15c116  HDDS-2015. Encrypt/decrypt key using symmetric key while 
writing/reading
b15c116 is described below

commit b15c116c1edaa71a3de86dbbab822ced9df37dbd
Author: dchitlangia 
AuthorDate: Fri Aug 30 17:17:53 2019 -0400

HDDS-2015. Encrypt/decrypt key using symmetric key while writing/reading

Signed-off-by: Anu Engineer 
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  3 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 54 ++
 .../hadoop/ozone/security/GDPRSymmetricKey.java| 10 ++-
 .../ozone/security/TestGDPRSymmetricKey.java   |  7 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java | 86 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 11 ++-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  5 +-
 .../protocolPB/OzoneManagerRequestHandler.java |  2 +
 .../ozone/web/ozShell/keys/PutKeyHandler.java  |  8 +-
 9 files changed, 175 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 398cce2..d6e079a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -313,8 +313,9 @@ public final class OzoneConsts {
   public static final int S3_BUCKET_MAX_LENGTH = 64;
 
   //GDPR
+  public static final String GDPR_FLAG = "gdprEnabled";
   public static final String GDPR_ALGORITHM_NAME = "AES";
-  public static final int GDPR_RANDOM_SECRET_LENGTH = 32;
+  public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16;
   public static final String GDPR_CHARSET = "UTF-8";
   public static final String GDPR_LENGTH = "length";
   public static final String GDPR_SECRET = "secret";
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 003bcc4..d9e6c37 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -82,6 +82,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
 .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB
 .StorageContainerLocationProtocolPB;
+import org.apache.hadoop.ozone.security.GDPRSymmetricKey;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
@@ -96,9 +97,13 @@ import org.apache.ratis.protocol.ClientId;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.crypto.Cipher;
+import javax.crypto.CipherInputStream;
+import javax.crypto.CipherOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.InvalidKeyException;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
@@ -601,6 +606,22 @@ public class RpcClient implements ClientProtocol {
 HddsClientUtils.verifyResourceName(volumeName, bucketName);
 HddsClientUtils.checkNotNull(keyName, type, factor);
 String requestId = UUID.randomUUID().toString();
+
+if(Boolean.valueOf(metadata.get(OzoneConsts.GDPR_FLAG))){
+  try{
+GDPRSymmetricKey gKey = new GDPRSymmetricKey();
+metadata.putAll(gKey.getKeyDetails());
+  }catch (Exception e) {
+if(e instanceof InvalidKeyException &&
+e.getMessage().contains("Illegal key size or default parameters")) 
{
+  LOG.error("Missing Unlimited Strength Policy jars. Please install " +
+  "Java Cryptography Extension (JCE) Unlimited Strength " +
+  "Jurisdiction Policy Files");
+}
+throw new IOException(e);
+  }
+}
+
 OmKeyArgs keyArgs = new OmKeyArgs.Builder()
 .setVolumeName(volumeName)
 .setBucketName(bucketName)
@@ -1062,6 +1083,22 @@ public class RpcClient implements ClientProtocol {
   OzoneKMSUtil.getCryptoCodec(conf, feInfo),
   decrypted.getMaterial(), feInfo.getIV());
   return new OzoneInputStream(cryptoIn);
+} else {
+  try{
+GDPRSymmetricKey gk;
+Map keyInfoMetadata = keyInfo.getMetadata();
+if(Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))){
+  gk = new GDPRSymmetricKe

[hadoop] branch trunk updated: HDDS-1708. Add container scrubber metrics. Contributed by Hrishikesh Gadre.

2019-09-05 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new acbea8d  HDDS-1708. Add container scrubber metrics. Contributed by 
Hrishikesh Gadre.
acbea8d is described below

commit acbea8d9760656c15c8493f9840c22fea89a53a0
Author: Anu Engineer 
AuthorDate: Thu Sep 5 14:33:06 2019 -0700

HDDS-1708. Add container scrubber metrics.
Contributed by Hrishikesh Gadre.
---
 .../keyvalue/impl/ChunkManagerFactory.java |   1 -
 .../container/ozoneimpl/ContainerDataScanner.java  |  94 +
 .../ozoneimpl/ContainerDataScrubberMetrics.java| 115 +
 .../ozoneimpl/ContainerMetadataScanner.java|  59 ---
 .../ContainerMetadataScrubberMetrics.java  |  93 +
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   4 +-
 .../ozoneimpl/TestContainerScrubberMetrics.java| 113 
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   2 +-
 8 files changed, 442 insertions(+), 39 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index 046bfdd..673e289 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.keyvalue.impl;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
index 2b0f3f3..799c8fe 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -42,6 +45,7 @@ public class ContainerDataScanner extends Thread {
   private final ContainerController controller;
   private final DataTransferThrottler throttler;
   private final Canceler canceler;
+  private final ContainerDataScrubberMetrics metrics;
 
   /**
* True if the thread is stopping.
@@ -50,12 +54,15 @@ public class ContainerDataScanner extends Thread {
   private volatile boolean stopping = false;
 
 
-  public ContainerDataScanner(ContainerController controller,
+  public ContainerDataScanner(Configuration conf,
+  ContainerController controller,
   HddsVolume volume, long bytesPerSec) {
 this.controller = controller;
 this.volume = volume;
-this.throttler = new DataTransferThrottler(bytesPerSec);
+this.throttler = new HddsDataTransferThrottler(bytesPerSec);
 this.canceler = new Canceler();
+this.metrics = ContainerDataScrubberMetrics.create(conf,
+volume.toString());
 setName("ContainerDataScanner(" + volume + ")");
 setDaemon(true);
   }
@@ -65,26 +72,54 @@ public class ContainerDataScanner extends Thread {
 LOG.trace("{}: thread starting.", this);
 try {
   while (!stopping) {
-Iterator itr = controller.getContainers(volume);
-while (!stopping && itr.hasNext()) {
-  Container c = itr.next();
-  try {
-if (c.shouldScanData()) {
-  if(!c.scanData(throttler, canceler)) {
-controller.markContainerUnhealthy(
-c.getContainerData().getContainerID());
-  }
-}
-  } catch (IOException ex) {
-long containerId = c.getContainerData().getContainerID();
-LOG.warn("Unexpected exception while scanning container "
-+ containerId, ex);
-  }
-}
+runIterati

[hadoop] branch trunk updated: Add support for checksum verification in data scrubber

2019-09-04 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f347c34  Add support for checksum verification in data scrubber
f347c34 is described below

commit f347c348d8dd092181b7ce135968f4469f685841
Author: Hrishikesh Gadre 
AuthorDate: Thu Jul 11 15:08:56 2019 -0700

Add support for checksum verification in data scrubber

Signed-off-by: Anu Engineer 
---
 .../org/apache/hadoop/hdds/HddsConfigKeys.java |   5 +-
 .../apache/hadoop/ozone/common/ChecksumData.java   |   7 +-
 .../container/common/interfaces/Container.java |  26 +++-
 .../container/keyvalue/KeyValueContainer.java  |  57 +++-
 .../container/keyvalue/KeyValueContainerCheck.java |  99 -
 .../keyvalue/impl/ChunkManagerFactory.java |   6 +-
 .../container/ozoneimpl/ContainerDataScanner.java  | 108 ++
 .../ozoneimpl/ContainerMetadataScanner.java| 110 ++
 .../container/ozoneimpl/ContainerScrubber.java | 158 -
 .../ozoneimpl/ContainerScrubberConfiguration.java  |  74 ++
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  41 --
 .../keyvalue/TestKeyValueContainerCheck.java   | 101 +++--
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   4 +-
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   6 +-
 .../freon/TestDataValidateWithDummyContainers.java |   5 +-
 15 files changed, 542 insertions(+), 265 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 9e757c1..c541f9b 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -65,15 +65,12 @@ public final class HddsConfigKeys {
   public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
   public static final String HDDS_SCM_SAFEMODE_ENABLED =
   "hdds.scm.safemode.enabled";
-  public static final String HDDS_CONTAINERSCRUB_ENABLED =
-  "hdds.containerscrub.enabled";
-  public static final boolean HDDS_CONTAINERSCRUB_ENABLED_DEFAULT = false;
+
   public static final boolean HDDS_SCM_SAFEMODE_ENABLED_DEFAULT = true;
   public static final String HDDS_SCM_SAFEMODE_MIN_DATANODE =
   "hdds.scm.safemode.min.datanode";
   public static final int HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT = 1;
 
-
   public static final String
   HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT =
   "hdds.scm.wait.time.after.safemode.exit";
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
index c0799bb..4a927fb 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
@@ -40,9 +40,14 @@ public class ChecksumData {
   private List checksums;
 
   public ChecksumData(ChecksumType checksumType, int bytesPerChecksum) {
+this(checksumType, bytesPerChecksum, Lists.newArrayList());
+  }
+
+  public ChecksumData(ChecksumType checksumType, int bytesPerChecksum,
+  List checksums) {
 this.type = checksumType;
 this.bytesPerChecksum = bytesPerChecksum;
-this.checksums = Lists.newArrayList();
+this.checksums = checksums;
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 10fec60..05ff93f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
 
+import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
@@ -153,9 +155,27 @@ public interface Container extends RwLock {
   void updateBlockCommitSequenceId(long blockCommitSequenceId);
 
   /**
-   * check and report the structural integrity of the container.
-   * @return true if the integrity checks pass
+   * Scan the container metadata to detect corruption.
+   */
+  boolean scanMetaData();
+
+

[hadoop] branch trunk updated: HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by adjusting timeouts

2019-08-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2d083f  HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by 
adjusting timeouts
a2d083f is described below

commit a2d083f2c546ef9e0a543ea287c2435c6440d9aa
Author: Doroszlai, Attila 
AuthorDate: Thu Aug 29 18:01:21 2019 +0200

HDDS-1413. Attempt to fix TestCloseContainerCommandHandler by adjusting 
timeouts

Signed-off-by: Anu Engineer 
---
 .../TestCloseContainerCommandHandler.java  | 410 +++--
 1 file changed, 143 insertions(+), 267 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 219b504..84a1e5d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -16,306 +16,187 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine
 .DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.util.TimeDuration;
-import org.junit.AfterClass;
-import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 
-import java.io.File;
 import java.io.IOException;
-import java.util.Collections;
-import java.util.Random;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
+
+import static java.util.Collections.singletonMap;
+import static org.apache.hadoop.ozone.OzoneConsts.GB;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Test cases to verify CloseContainerCommandHandler in datanode.
  */
 public class TestCloseContainerCommandHandler {
 
-  private final StateContext context = Mockito.mock(StateContext.class);
-  private final Random random = new Random();
-  private static File testDir;
+  private static final long CONTAINER_ID = 123L;
+
+  private OzoneContainer ozoneContainer;
+  private StateContext context;
+  private XceiverServerSpi writeChannel;
+  private Container container;
+  private Handler containerHandler;
+  private PipelineID pipelineID;
+  private PipelineID nonExistentPipelineID = PipelineID.randomId();
+
+  private CloseContainerCommandHandler subject =
+  new CloseContainerCommandHandler();
+
+  @Before
+  public void before() throws Exception {
+context = mock(StateContext.class);
+DatanodeStateMachine dnStateMachine = mock(DatanodeStateMachine.class);
+when(dnStateMachine.getDatanodeDetails())
+.thenReturn(randomDatanodeDetails());
+when(context.getParent()).thenReturn

[hadoop] branch trunk updated: HDDS-2042. Avoid log on console with Ozone shell

2019-08-30 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c4411f7  HDDS-2042. Avoid log on console with Ozone shell
c4411f7 is described below

commit c4411f7fdf745eefac32749dad4388635a0a9aae
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 27 15:55:47 2019 +0200

HDDS-2042. Avoid log on console with Ozone shell

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/common/src/main/bin/ozone |  3 ++
 .../dist/dev-support/bin/dist-layout-stitching |  1 +
 .../src/main/conf/ozone-shell-log4j.properties | 33 ++
 .../src/main/smoketest/basic/ozone-shell.robot |  1 -
 .../dist/src/main/smoketest/createbucketenv.robot  |  1 -
 .../dist/src/main/smoketest/createmrenv.robot  |  1 -
 6 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index e8cda82..47258d2 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -118,6 +118,8 @@ function ozonecmd_case
 ;;
 freon)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
+  OZONE_FREON_OPTS="${OZONE_FREON_OPTS} -Dhadoop.log.file=ozone-freon.log 
-Dlog4j.configuration=file:${HADOOP_CONF_DIR}/ozone-shell-log4j.properties"
+  HADOOP_OPTS="${HADOOP_OPTS} ${OZONE_FREON_OPTS}"
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
 genesis)
@@ -137,6 +139,7 @@ function ozonecmd_case
 ;;
 sh | shell)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.OzoneShell
+  HDFS_OM_SH_OPTS="${HDFS_OM_SH_OPTS} -Dhadoop.log.file=ozone-shell.log 
-Dlog4j.configuration=file:${HADOOP_CONF_DIR}/ozone-shell-log4j.properties"
   HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_SH_OPTS}"
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
 ;;
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 97acc54..00b1b9a 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -90,6 +90,7 @@ run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties" 
"etc/hadoop"
+run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties" 
"etc/hadoop"
 run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
 run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" 
"etc/hadoop"
diff --git a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties 
b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
new file mode 100644
index 000..e8f5f2d
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.log.dir=.
+hadoop.log.file=ozone-shell.log
+
+log4j.rootLogger=INFO,FILE
+
+log4j.threshold=ALL
+
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.file=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{1}:%L - 
%m%n
+
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+log4j.logger.org.a

[hadoop] branch trunk updated: HDDS-1935. Improve the visibility with Ozone Insight tool (#1255)

2019-08-29 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4f5f46e  HDDS-1935. Improve the visibility with Ozone Insight tool 
(#1255)
4f5f46e is described below

commit 4f5f46eb4af721a5cef2543a78ba6b3812331e3b
Author: Elek, Márton 
AuthorDate: Fri Aug 30 02:07:55 2019 +0200

HDDS-1935. Improve the visibility with Ozone Insight tool (#1255)
---
 .../ozone/protocolPB/ProtocolMessageMetrics.java   | 105 
 ...lockLocationProtocolServerSideTranslatorPB.java | 108 +++-
 .../common/src/main/resources/ozone-default.xml|   2 +-
 .../hadoop/hdds/conf/ConfigFileGenerator.java  |  16 +-
 .../apache/hadoop/hdds/server/BaseHttpServer.java  |   4 +-
 .../hadoop/hdds/server/LogStreamServlet.java   |  58 +++
 .../hadoop/hdds/server/PrometheusMetricsSink.java  |   4 +
 .../hadoop/hdds/server/events/EventQueue.java  |  18 +-
 .../hdds/scm/container/ReplicationManager.java | 104 +---
 .../hadoop/hdds/scm/node/NodeReportHandler.java|   1 -
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  78 -
 .../hdds/scm/server/SCMBlockProtocolServer.java|  51 +++---
 .../scm/server/TestSCMBlockProtocolServer.java |   8 +-
 hadoop-ozone/common/src/main/bin/ozone |   6 +
 hadoop-ozone/dev-support/intellij/ozone-site.xml   |   4 +
 hadoop-ozone/dist/pom.xml  |  12 ++
 .../insight/dev-support/findbugsExcludeFile.xml|  19 +++
 hadoop-ozone/insight/pom.xml   | 132 +++
 .../hadoop/ozone/insight/BaseInsightPoint.java | 188 +
 .../ozone/insight/BaseInsightSubCommand.java   | 101 +++
 .../org/apache/hadoop/ozone/insight/Component.java | 116 +
 .../ozone/insight/ConfigurationSubCommand.java |  89 ++
 .../org/apache/hadoop/ozone/insight/Insight.java   |  41 +
 .../apache/hadoop/ozone/insight/InsightPoint.java  |  49 ++
 .../hadoop/ozone/insight/ListSubCommand.java   |  59 +++
 .../apache/hadoop/ozone/insight/LogSubcommand.java | 167 ++
 .../apache/hadoop/ozone/insight/LoggerSource.java  |  72 
 .../apache/hadoop/ozone/insight/MetricDisplay.java |  69 
 .../hadoop/ozone/insight/MetricGroupDisplay.java   |  69 
 .../hadoop/ozone/insight/MetricsSubCommand.java| 132 +++
 .../ozone/insight/datanode/RatisInsight.java   |  75 
 .../ozone/insight/datanode/package-info.java   |  23 +++
 .../hadoop/ozone/insight/om/KeyManagerInsight.java |  78 +
 .../hadoop/ozone/insight/om/OmProtocolInsight.java |  67 
 .../hadoop/ozone/insight/om/package-info.java  |  23 +++
 .../apache/hadoop/ozone/insight/package-info.java  |  24 +++
 .../ozone/insight/scm/EventQueueInsight.java   |  47 ++
 .../ozone/insight/scm/NodeManagerInsight.java  |  74 
 .../ozone/insight/scm/ReplicaManagerInsight.java   |  60 +++
 .../scm/ScmProtocolBlockLocationInsight.java   |  71 
 .../hadoop/ozone/insight/scm/package-info.java |  23 +++
 .../hadoop/ozone/insight/LogSubcommandTest.java|  41 +
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  25 ++-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |  93 ++
 hadoop-ozone/pom.xml   |   6 +
 45 files changed, 2431 insertions(+), 181 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java
new file mode 100644
index 000..96725f2
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.ap

[hadoop] branch HDDS-151 deleted (was 920d154)

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch HDDS-151
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 920d154  HDDS-148. Remove ContainerReportManager and 
ContainerReportManagerImpl. Contributed by Nanda kumar.

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HDDS-1582 deleted (was 887bdc4)

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch HDDS-1582
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 887bdc4  HDDS-1582. Fix BindException due to address already in use in 
unit tests. Contributed by Mukul Kumar Singh.

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HDDS-1550 deleted (was a8a1727)

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch HDDS-1550
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was a8a1727  HDDS-1550. MiniOzoneChaosCluster is not shutting down all the 
threads during shutdown. Contributed by Mukul Kumar Singh.

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2051. Rat check failure in decommissioning.md (#1372)

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3e6a016  HDDS-2051. Rat check failure in decommissioning.md (#1372)
3e6a016 is described below

commit 3e6a0166f4707ec433e2cdbc04c054b81722c073
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Aug 28 22:28:07 2019 +0200

HDDS-2051. Rat check failure in decommissioning.md (#1372)
---
 hadoop-hdds/docs/content/design/decommissioning.md | 14 ++
 1 file changed, 14 insertions(+)

diff --git a/hadoop-hdds/docs/content/design/decommissioning.md 
b/hadoop-hdds/docs/content/design/decommissioning.md
index 5a7d844..8d620be 100644
--- a/hadoop-hdds/docs/content/design/decommissioning.md
+++ b/hadoop-hdds/docs/content/design/decommissioning.md
@@ -1,3 +1,17 @@
+
+
 ---
 title: Decommissioning in Ozone
 summary: Formal process to shut down machines in a safe way after the required 
replications.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1950. S3 MPU part-list call fails if there are no parts

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new aef6a4f  HDDS-1950. S3 MPU part-list call fails if there are no parts
aef6a4f is described below

commit aef6a4fe0d04fe0d42fa36dc04cac2cc53ae8efd
Author: Márton Elek 
AuthorDate: Sun Aug 11 14:32:00 2019 +0200

HDDS-1950. S3 MPU part-list call fails if there are no parts

Signed-off-by: Anu Engineer 
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  24 -
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 111 +
 2 files changed, 133 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index b58095f..4f56160 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1298,8 +1298,9 @@ public class KeyManagerImpl implements KeyManager {
 multipartKeyInfo.getPartKeyInfoMap();
 Iterator> partKeyInfoMapIterator =
 partKeyInfoMap.entrySet().iterator();
-HddsProtos.ReplicationType replicationType =
-partKeyInfoMap.firstEntry().getValue().getPartKeyInfo().getType();
+
+HddsProtos.ReplicationType replicationType = null;
+
 int count = 0;
 List omPartInfoList = new ArrayList<>();
 
@@ -1316,11 +1317,30 @@ public class KeyManagerImpl implements KeyManager {
 partKeyInfo.getPartKeyInfo().getModificationTime(),
 partKeyInfo.getPartKeyInfo().getDataSize());
 omPartInfoList.add(omPartInfo);
+
+//if there are parts, use replication type from one of the parts
 replicationType = partKeyInfo.getPartKeyInfo().getType();
 count++;
   }
 }
 
+if (replicationType == null) {
+  //if there are no parts, use the replicationType from the open key.
+
+  OmKeyInfo omKeyInfo =
+  metadataManager.getOpenKeyTable().get(multipartKey);
+
+  if (omKeyInfo == null) {
+throw new IllegalStateException(
+"Open key is missing for multipart upload " + multipartKey);
+  }
+
+  replicationType = omKeyInfo.getType();
+
+}
+Preconditions.checkNotNull(replicationType,
+"Replication type can't be identified");
+
 if (partKeyInfoMapIterator.hasNext()) {
   Map.Entry partKeyInfoEntry =
   partKeyInfoMapIterator.next();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
new file mode 100644
index 000..a5a446c
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
+import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
+import org.apache.hadoop.test.GenericTestUtils;
+
+impo

[hadoop] branch trunk updated: HDDS-1942. Support copy during S3 multipart upload part creation

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2fcd0da  HDDS-1942. Support copy during S3 multipart upload part 
creation
2fcd0da is described below

commit 2fcd0da7dcbc15793041efb079210e06272482a4
Author: Márton Elek 
AuthorDate: Sun Aug 11 14:45:02 2019 +0200

HDDS-1942. Support copy during S3 multipart upload part creation

Signed-off-by: Anu Engineer 
---
 .../src/main/smoketest/s3/MultipartUpload.robot|  52 +
 .../hadoop/ozone/s3/endpoint/CopyPartResult.java   |  69 ++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  79 +--
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   2 +
 .../hadoop/ozone/client/OzoneBucketStub.java   |  15 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   | 233 +
 .../ozone/s3/endpoint/TestObjectEndpoint.java  |  53 +
 7 files changed, 483 insertions(+), 20 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 0133d50..df95f4d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -200,3 +200,55 @@ Test Multipart Upload with the simplified aws s3 cp API
 Execute AWSS3Clicp s3://${BUCKET}/mpyawscli 
/tmp/part1.result
 Execute AWSS3Clirm s3://${BUCKET}/mpyawscli
 Compare files   /tmp/part1
/tmp/part1.result
+
+Test Multipart Upload Put With Copy
+Run Keyword Create Random file  5
+${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} 
--key copytest/source --body /tmp/part1
+
+
+${result} = Execute AWSS3APICli create-multipart-upload 
--bucket ${BUCKET} --key copytest/destination
+
+${uploadID} =   Execute and checkrc  echo '${result}' | jq -r 
'.UploadId'0
+Should contain   ${result}${BUCKET}
+Should contain   ${result}UploadId
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 
--copy-source ${BUCKET}/copytest/source
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag1} =  Execute and checkrc  echo '${result}' | jq -r 
'.CopyPartResult.ETag'   0
+
+
+Execute AWSS3APICli complete-multipart-upload 
--upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination 
--multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]'
+Execute AWSS3APICli get-object --bucket ${BUCKET} 
--key copytest/destination /tmp/part-result
+
+Compare files   /tmp/part1
/tmp/part-result
+
+Test Multipart Upload Put With Copy and range
+Run Keyword Create Random file  10
+${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} 
--key copyrange/source --body /tmp/part1
+
+
+${result} = Execute AWSS3APICli create-multipart-upload 
--bucket ${BUCKET} --key copyrange/destination
+
+${uploadID} =   Execute and checkrc  echo '${result}' | jq -r 
'.UploadId'0
+Should contain   ${result}${BUCKET}
+Should contain   ${result}UploadId
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 
--copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag1} =  Execute and checkrc  echo '${result}' | jq -r 
'.CopyPartResult.ETag'   0
+
+${result} = Execute AWSS3APICli  upload-part-copy --bucket 
${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 
--copy-source ${BUCKET}/copyrange/source --copy-source-range 
bytes=10485758-10485760
+Should contain   ${result}${BUCKET}
+Should contain   ${result}ETag
+Should contain   ${result}LastModified
+${eTag2} =  Execute and checkrc 

[hadoop] branch trunk updated: HDDS-1937. Acceptance tests fail if scm webui shows invalid json

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new addfb7f  HDDS-1937. Acceptance tests fail if scm webui shows invalid 
json
addfb7f is described below

commit addfb7ff7d4124db93d7713516f5890811cad9b2
Author: Márton Elek 
AuthorDate: Sat Aug 24 21:19:36 2019 +0200

HDDS-1937. Acceptance tests fail if scm webui shows invalid json

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/src/main/compose/testlib.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh 
b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 462b9fa..ffc6da2 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -39,7 +39,7 @@ count_datanodes() {
   else
 docker-compose -f "${compose_file}" exec -T scm curl -s "${jmx_url}"
   fi \
-| jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+| jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' || true
 }
 
 ## @description wait until datanodes are up (or 30 seconds)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (1407414 -> c7d426d)

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 1407414  HDDS-1094. Performance test infrastructure : skip writing 
user data on Datanode. Contributed by Supratim Deka (#1323)
 add c7d426d  HDDS-1881. Design doc: decommissioning in Ozone (#1196)

No new revisions were added by this update.

Summary of changes:
 hadoop-hdds/docs/content/design/decommissioning.md | 610 +
 1 file changed, 610 insertions(+)
 create mode 100644 hadoop-hdds/docs/content/design/decommissioning.md


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1596. Create service endpoint to download configuration from SCM.

2019-08-28 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c0499bd  HDDS-1596. Create service endpoint to download configuration 
from SCM.
c0499bd is described below

commit c0499bd70455e67bef9a1e00da73e25c9e2cc0ff
Author: Márton Elek 
AuthorDate: Mon Aug 12 13:00:38 2019 +0200

HDDS-1596. Create service endpoint to download configuration from SCM.

Signed-off-by: Anu Engineer 
---
 .../hadoop/hdds/conf/OzoneConfiguration.java   | 30 +++-
 .../hadoop/hdds/discovery/DiscoveryUtil.java   | 90 ++
 .../apache/hadoop/hdds/discovery/package-info.java | 22 ++
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  8 +-
 .../apache/hadoop/hdds/server/BaseHttpServer.java  | 13 
 .../org/apache/hadoop/hdds/server/ServerUtils.java | 12 ++-
 .../apache/hadoop/hdds/server/TestServerUtils.java | 17 
 hadoop-hdds/pom.xml| 21 +
 hadoop-hdds/server-scm/pom.xml | 18 +
 .../hdds/discovery/ConfigurationEndpoint.java  | 60 +++
 .../hadoop/hdds/discovery/ConfigurationXml.java| 44 +++
 .../hdds/discovery/ConfigurationXmlEntry.java  | 56 ++
 .../hdds/discovery/DiscoveryApplication.java   | 35 +
 .../apache/hadoop/hdds/discovery/package-info.java | 22 ++
 .../hdds/scm/server/SCMBlockProtocolServer.java|  9 ++-
 .../server/StorageContainerManagerHttpServer.java  | 15 +++-
 .../scm/server/StorageContainerManagerStarter.java |  5 ++
 .../src/main/compose/ozone/docker-compose.yaml | 19 -
 .../dist/src/main/compose/ozone/docker-config  | 15 +---
 .../hadoop/ozone/om/OzoneManagerStarter.java   |  5 ++
 hadoop-ozone/ozonefs/pom.xml   |  6 ++
 .../java/org/apache/hadoop/ozone/s3/Gateway.java   |  5 ++
 22 files changed, 504 insertions(+), 23 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index b32ad63..dfcf320 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -47,19 +47,43 @@ public class OzoneConfiguration extends Configuration {
   }
 
   public OzoneConfiguration() {
+this(false);
+  }
+
+  private OzoneConfiguration(boolean justTheDefaults) {
 OzoneConfiguration.activate();
 loadDefaults();
+if (!justTheDefaults) {
+  loadConfigFiles();
+}
+  }
+
+  private void loadConfigFiles() {
+addResource("ozone-global.xml");
+addResource("ozone-site.xml");
   }
 
   public OzoneConfiguration(Configuration conf) {
+this(conf, false);
+  }
+
+  private OzoneConfiguration(Configuration conf, boolean justTheDefaults) {
 super(conf);
 //load the configuration from the classloader of the original conf.
 setClassLoader(conf.getClassLoader());
 if (!(conf instanceof OzoneConfiguration)) {
   loadDefaults();
+  //here we load the REAL configuration.
+  if (!justTheDefaults) {
+loadConfigFiles();
+  }
 }
   }
 
+  public static OzoneConfiguration createWithDefaultsOnly() {
+return new OzoneConfiguration(true);
+  }
+
   private void loadDefaults() {
 try {
   //there could be multiple ozone-default-generated.xml files on the
@@ -74,7 +98,6 @@ public class OzoneConfiguration extends Configuration {
 } catch (IOException e) {
   e.printStackTrace();
 }
-addResource("ozone-site.xml");
   }
 
   public List readPropertyFromXml(URL url) throws JAXBException {
@@ -316,4 +339,9 @@ public class OzoneConfiguration extends Configuration {
 }
 return props;
   }
+
+  @Override
+  public synchronized Properties getProps() {
+return super.getProps();
+  }
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/discovery/DiscoveryUtil.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/discovery/DiscoveryUtil.java
new file mode 100644
index 000..42adfc7
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/discovery/DiscoveryUtil.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required b

[hadoop] branch trunk updated: HDDS-2037. Fix hadoop version in pom.ozone.xml.

2019-08-27 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2b9cc7e  HDDS-2037. Fix hadoop version in pom.ozone.xml.
2b9cc7e is described below

commit 2b9cc7eb95a455ba927d395fac91010980d99707
Author: Nanda kumar 
AuthorDate: Mon Aug 26 20:30:56 2019 +0530

HDDS-2037. Fix hadoop version in pom.ozone.xml.

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/pom.xml  | 6 ++
 hadoop-hdds/server-scm/pom.xml   | 4 
 hadoop-ozone/ozone-recon/pom.xml | 2 +-
 pom.ozone.xml| 2 +-
 4 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 7c01601..75cbe6a 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -43,6 +43,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
   
 
   
+3.2.0
 
 0.5.0-SNAPSHOT
 
@@ -209,14 +210,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-common
+  ${hadoop.version}
 
 
   org.apache.hadoop
   hadoop-hdfs
+  ${hadoop.version}
 
 
   org.apache.hadoop
   hadoop-hdfs-client
+  ${hadoop.version}
   
 
   com.squareup.okhttp
@@ -227,12 +231,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   org.apache.hadoop
   hadoop-common
+  ${hadoop.version}
   test
   test-jar
 
 
   org.apache.hadoop
   hadoop-hdfs
+  ${hadoop.version}
   test
   test-jar
 
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 60b1b44..99d5922 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -101,10 +101,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
   bcprov-jdk15on
 
 
-  io.dropwizard.metrics
-  metrics-core
-
-
   com.google.code.findbugs
   findbugs
   provided
diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml
index 130ad35..4eed468 100644
--- a/hadoop-ozone/ozone-recon/pom.xml
+++ b/hadoop-ozone/ozone-recon/pom.xml
@@ -189,7 +189,7 @@
 
   org.apache.hadoop
   hadoop-ozone-reconcodegen
-  ${version}
+  ${ozone.version}
 
 
   org.apache.hadoop
diff --git a/pom.ozone.xml b/pom.ozone.xml
index ff841bd..5937c72 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -65,7 +65,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   
 
-3.3.0-SNAPSHOT
+3.2.0
 
 apache.snapshots.https
 Apache Development Snapshot 
Repository


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2026. Overlapping chunk region cannot be read concurrently

2019-08-27 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0883ce1  HDDS-2026. Overlapping chunk region cannot be read 
concurrently
0883ce1 is described below

commit 0883ce102113cdc9527ab8aa548895a8418cb6bb
Author: Doroszlai, Attila 
AuthorDate: Mon Aug 26 12:59:47 2019 +0200

HDDS-2026. Overlapping chunk region cannot be read concurrently

Signed-off-by: Anu Engineer 
---
 .../container/keyvalue/helpers/ChunkUtils.java | 188 +++--
 .../container/keyvalue/helpers/TestChunkUtils.java | 164 ++
 2 files changed, 267 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 2993bbb..a043cdc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.container.keyvalue.helpers;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
@@ -36,17 +37,20 @@ import 
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
 import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
 import org.apache.hadoop.util.Time;
+import org.apache.ratis.util.function.CheckedSupplier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.channels.AsynchronousFileChannel;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileLock;
+import java.nio.file.Path;
 import java.nio.file.StandardOpenOption;
 import java.security.NoSuchAlgorithmException;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -56,6 +60,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
  */
 public final class ChunkUtils {
 
+  private static final Set LOCKS = ConcurrentHashMap.newKeySet();
+
   /** Never constructed. **/
   private ChunkUtils() {
 
@@ -67,9 +73,8 @@ public final class ChunkUtils {
* @param chunkFile - File to write data to.
* @param chunkInfo - Data stream to write.
* @param data - The data buffer.
-   * @param volumeIOStats
+   * @param volumeIOStats statistics collector
* @param sync whether to do fsync or not
-   * @throws StorageContainerException
*/
   public static void writeData(File chunkFile, ChunkInfo chunkInfo,
   ByteBuffer data, VolumeIOStats volumeIOStats, boolean sync)
@@ -85,58 +90,43 @@ public final class ChunkUtils {
   throw new StorageContainerException(err, INVALID_WRITE_SIZE);
 }
 
-FileChannel file = null;
-FileLock lock = null;
+Path path = chunkFile.toPath();
+long startTime = Time.monotonicNow();
+processFileExclusively(path, () -> {
+  FileChannel file = null;
+  try {
+// skip SYNC and DSYNC to reduce contention on file.lock
+file = FileChannel.open(path,
+StandardOpenOption.CREATE,
+StandardOpenOption.WRITE,
+StandardOpenOption.SPARSE);
+
+int size;
+try (FileLock ignored = file.lock()) {
+  size = file.write(data, chunkInfo.getOffset());
+}
 
-try {
-  long writeTimeStart = Time.monotonicNow();
-
-  // skip SYNC and DSYNC to reduce contention on file.lock
-  file = FileChannel.open(chunkFile.toPath(),
-  StandardOpenOption.CREATE,
-  StandardOpenOption.WRITE,
-  StandardOpenOption.SPARSE);
-
-  lock = file.lock();
-  int size = file.write(data, chunkInfo.getOffset());
-  // Increment volumeIO stats here.
-  volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart);
-  volumeIOStats.incWriteOpCount();
-  volumeIOStats.incWriteBytes(size);
-  if (size != bufferSize) {
-log.error("Invalid write size found. Size:{}  Expected: {} ", size,
-bufferSize);
-throw new StorageContainerException("Invalid write size found. " +
-"Size: " + size + " Expected: " + bufferSize, INVALID_WRITE_SIZE);
+// Increment volumeIO stats here.
+volumeIOStats.i

[hadoop] branch trunk updated: HDDS-2002. Update documentation for 0.4.1 release.

2019-08-23 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b661dcf  HDDS-2002. Update documentation for 0.4.1 release.
b661dcf is described below

commit b661dcf563c0b3cb6fe6f22bb3a39f87e3ec1c57
Author: Nanda kumar 
AuthorDate: Wed Aug 21 22:47:41 2019 +0530

HDDS-2002. Update documentation for 0.4.1 release.

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/docs/content/beyond/Containers.md  |  65 +---
 .../docs/content/beyond/DockerCheatSheet.md|   7 +-
 hadoop-hdds/docs/content/beyond/RunningWithHDFS.md |   2 +-
 hadoop-hdds/docs/content/concept/Datanodes.md  |   6 +-
 hadoop-hdds/docs/content/concept/Hdds.md   |   2 +-
 hadoop-hdds/docs/content/concept/Overview.md   |   6 +-
 hadoop-hdds/docs/content/concept/OzoneManager.md   |  20 +--
 hadoop-hdds/docs/content/interface/JavaApi.md  |   8 +-
 hadoop-hdds/docs/content/interface/OzoneFS.md  |   8 +-
 hadoop-hdds/docs/content/interface/S3.md   |  18 +--
 hadoop-hdds/docs/content/recipe/Prometheus.md  |  22 +--
 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md |  38 +++--
 hadoop-hdds/docs/content/recipe/_index.md  |   3 +-
 .../docs/content/security/SecuityWithRanger.md |   4 +-
 hadoop-hdds/docs/content/security/SecureOzone.md   | 169 +++--
 .../docs/content/security/SecuringDatanodes.md |  13 +-
 hadoop-hdds/docs/content/security/SecuringS3.md|   6 +-
 hadoop-hdds/docs/content/security/SecuringTDE.md   |   5 +-
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  35 ++---
 hadoop-hdds/docs/content/shell/BucketCommands.md   |  30 +---
 hadoop-hdds/docs/content/shell/KeyCommands.md  |  24 ++-
 hadoop-hdds/docs/content/shell/VolumeCommands.md   |  20 ++-
 hadoop-hdds/docs/content/start/Kubernetes.md   |   2 +-
 hadoop-hdds/docs/content/start/OnPrem.md   |   7 +-
 .../docs/content/start/StartFromDockerHub.md   |   7 +-
 25 files changed, 269 insertions(+), 258 deletions(-)

diff --git a/hadoop-hdds/docs/content/beyond/Containers.md 
b/hadoop-hdds/docs/content/beyond/Containers.md
index b4dc94f..ea7e3b1 100644
--- a/hadoop-hdds/docs/content/beyond/Containers.md
+++ b/hadoop-hdds/docs/content/beyond/Containers.md
@@ -25,8 +25,9 @@ Docker heavily is used at the ozone development with three 
principal use-cases:
 * __dev__:
  * We use docker to start local pseudo-clusters (docker provides unified 
environment, but no image creation is required)
 * __test__:
- * We create docker images from the dev branches to test ozone in 
kubernetes and other container orchestator system
- * We provide _apache/ozone_ images for each release to make it easier the 
evaluation of Ozone. These images are __not__ created __for production__ usage.
+ * We create docker images from the dev branches to test ozone in 
kubernetes and other container orchestrator system
+ * We provide _apache/ozone_ images for each release to make it easier for 
evaluation of Ozone.
+ These images are __not__ created __for production__ usage.
 
 
 We strongly recommend that you create your own custom images when you
@@ -36,7 +37,7 @@ shipped container images and k8s resources as examples and 
guides to help you
 
 
 * __production__:
- * We document how can you create your own docker image for your 
production cluster.
+ * We have documentation on how you can create your own docker image for 
your production cluster.
 
 Let's check out each of the use-cases in more detail:
 
@@ -46,38 +47,41 @@ Ozone artifact contains example docker-compose directories 
to make it easier to
 
 From distribution:
 
-```
+```bash
 cd compose/ozone
 docker-compose up -d
 ```
 
-After a local build
+After a local build:
 
-```
+```bash
 cd  hadoop-ozone/dist/target/ozone-*/compose
 docker-compose up -d
 ```
 
 These environments are very important tools to start different type of Ozone 
clusters at any time.
 
-To be sure that the compose files are up-to-date, we also provide acceptance 
test suites which start the cluster and check the basic behaviour.
+To be sure that the compose files are up-to-date, we also provide acceptance 
test suites which start
+the cluster and check the basic behaviour.
 
-The acceptance tests are part of the distribution, and you can find the test 
definitions in `./smoketest` directory.
+The acceptance tests are part of the distribution, and you can find the test 
definitions in `smoketest` directory.
 
 You can start the tests from any compose directory:
 
 For example:
 
-```
+```bash
 cd compose/ozone
 ./test.sh
 ```
 
 ### Implementation details
 
-`./compose` tests are based on the apache/hadoop-runner docker image. The 
image itself doesn't contain any Ozone jar file or binary just the helper 
scripts to start ozone.
+`compose` tests ar

[hadoop] branch trunk updated: HDDS-1927. Consolidate add/remove Acl into OzoneAclUtil class. Contributed by Xiaoyu Yao.

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d58eba8  HDDS-1927. Consolidate add/remove Acl into OzoneAclUtil 
class. Contributed by Xiaoyu Yao.
d58eba8 is described below

commit d58eba867234eaac0e229feb990e9dab3912e063
Author: Xiaoyu Yao 
AuthorDate: Mon Aug 19 15:54:44 2019 -0700

HDDS-1927. Consolidate add/remove Acl into OzoneAclUtil class. Contributed 
by Xiaoyu Yao.

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   5 +-
 .../java/org/apache/hadoop/ozone/OzoneAcl.java |   5 +
 .../hadoop/ozone/om/helpers/OmBucketInfo.java  |  76 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 147 ++-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java |   7 +-
 .../hadoop/ozone/om/helpers/OmPrefixInfo.java  |  30 ++-
 .../hadoop/ozone/om/helpers/OzoneAclUtil.java  | 286 +
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  | 158 
 .../hadoop/ozone/om/helpers/TestOzoneAclUtil.java  | 191 ++
 .../client/rpc/TestOzoneRpcClientAbstract.java |  15 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |   8 +-
 .../apache/hadoop/ozone/om/TestOzoneManager.java   |   3 +-
 .../security/acl/TestOzoneNativeAuthorizer.java|   4 +-
 .../web/storage/DistributedStorageHandler.java |   3 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  91 +--
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 204 ---
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  | 186 --
 .../om/request/file/OMDirectoryCreateRequest.java  |  11 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  13 +-
 .../om/request/key/acl/OMKeyAddAclRequest.java |   7 +-
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |   7 +-
 .../om/request/key/acl/OMKeySetAclRequest.java |   9 +-
 .../S3InitiateMultipartUploadRequest.java  |   3 +-
 .../S3MultipartUploadCompleteRequest.java  |   4 +-
 .../protocolPB/OzoneManagerRequestHandler.java |   5 +-
 25 files changed, 699 insertions(+), 779 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index fbb488e..003bcc4 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -64,9 +64,10 @@ import 
org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
 import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import org.apache.hadoop.ozone.om.protocolPB
 .OzoneManagerProtocolClientSideTranslatorPB;
@@ -440,7 +441,7 @@ public class RpcClient implements ClientProtocol {
* @return listOfAcls
* */
   private List getAclList() {
-return OzoneUtils.getAclList(ugi.getUserName(), ugi.getGroups(),
+return OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
 userRights, groupRights);
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
index 1730a4f..6a74342 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -328,6 +328,11 @@ public class OzoneAcl {
 otherAcl.getAclScope().equals(this.getAclScope());
   }
 
+  public OzoneAcl setAclScope(AclScope scope) {
+this.aclScope = scope;
+return this;
+  }
+
   /**
* Scope of ozone acl.
* */
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 4d764a5..4207583 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.om.helpers;
 
 
-import java.util.BitSet;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedList;
@@ -37,8 +36,6 @@ import org.apache.hadoop.ozone.protocolPB.OMPBHelper

[hadoop] branch trunk updated: HDDS-1871. Remove anti-affinity rules from k8s minkube example

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8fc6567  HDDS-1871. Remove anti-affinity rules from k8s minkube example
8fc6567 is described below

commit 8fc6567b946f1d536ffed4798b5403a365021464
Author: Márton Elek 
AuthorDate: Mon Jul 29 14:44:58 2019 +0200

HDDS-1871. Remove anti-affinity rules from k8s minkube example

Signed-off-by: Anu Engineer 
---
 .../main/k8s/definitions/ozone/datanode-ss.yaml|  6 +++---
 .../getting-started/datanode-statefulset.yaml  |  6 +++---
 .../examples/getting-started/om-statefulset.yaml   | 22 ++
 .../examples/getting-started/s3g-statefulset.yaml  |  5 +
 .../examples/getting-started/scm-statefulset.yaml  |  4 
 .../dist/src/main/k8s/examples/minikube/Flekszible |  9 +
 .../examples/minikube/datanode-statefulset.yaml| 22 ++
 .../main/k8s/examples/minikube/om-statefulset.yaml |  6 +++---
 .../k8s/examples/minikube/s3g-statefulset.yaml |  6 +++---
 .../k8s/examples/minikube/scm-statefulset.yaml | 12 ++--
 .../ozone-dev/csi/csi-ozone-clusterrole.yaml   |  2 +-
 .../csi/csi-ozone-clusterrolebinding.yaml  |  6 +++---
 .../ozone-dev/csi/csi-ozone-serviceaccount.yaml|  2 +-
 .../examples/ozone-dev/datanode-statefulset.yaml   |  6 +++---
 .../examples/ozone-dev/prometheus-clusterrole.yaml |  2 +-
 .../prometheus-operator-clusterrolebinding.yaml|  6 +++---
 .../examples/ozone/csi/csi-ozone-clusterrole.yaml  |  2 +-
 .../ozone/csi/csi-ozone-clusterrolebinding.yaml|  6 +++---
 .../ozone/csi/csi-ozone-serviceaccount.yaml|  2 +-
 .../k8s/examples/ozone/datanode-statefulset.yaml   |  6 +++---
 20 files changed, 68 insertions(+), 70 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
index 94dc570..88a4308 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml
@@ -32,9 +32,9 @@ spec:
 app: ozone
 component: datanode
   annotations:
-prdatanodeetheus.io/scrape: "true"
-prdatanodeetheus.io/port: "9882"
-prdatanodeetheus.io/path: "/prom"
+prometheus.io/scrape: "true"
+prometheus.io/port: "9882"
+prometheus.io/path: "/prom"
 spec:
   affinity:
 podAntiAffinity:
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
index 6c8d1bf..c393ead 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
+++ 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml
@@ -33,9 +33,9 @@ spec:
 app: ozone
 component: datanode
   annotations:
-prdatanodeetheus.io/scrape: "true"
-prdatanodeetheus.io/port: "9882"
-prdatanodeetheus.io/path: /prom
+prometheus.io/scrape: "true"
+prometheus.io/port: "9882"
+prometheus.io/path: /prom
 spec:
   affinity:
 podAntiAffinity:
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
index c8ff81b..5de01f5 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
+++ 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml
@@ -39,22 +39,6 @@ spec:
 spec:
   securityContext:
 fsGroup: 1000
-  initContainers:
-  - name: init
-image: '@docker.image@'
-args:
-- ozone
-- om
-- --init
-env:
-- name: WAITFOR
-  value: scm-0.scm:9876
-envFrom:
-- configMapRef:
-name: config
-volumeMounts:
-- name: data
-  mountPath: /data
   containers:
   - name: om
 image: '@docker.image@'
@@ -64,6 +48,12 @@ spec:
 env:
 - name: WAITFOR
   value: scm-0.scm:9876
+- name: ENSURE_OM_INITIALIZED
+  value: /data/metadata/om/current/VERSION
+livenessProbe:
+  tcpSocket:
+port: 9862
+  initialDelaySeconds: 30
 envFrom:
 - configMapRef:
 name: config
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml
index c7e13e5..2409583 100644
--- 
a/hadoop-ozone/dist/src/m

[hadoop] branch revert-792-HDDS-1474 deleted (was a16d627)

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch revert-792-HDDS-1474
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was a16d627  Revert "HDDS-1474. ozone.scm.datanode.id config should take 
path for a dir  (#792)"

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-715-HDDS-1370 deleted (was 5b308cc)

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch revert-715-HDDS-1370
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 5b308cc  Revert "HDDS-1370. Command Execution in Datanode fails 
because of NPE (#715)"

This change permanently discards the following revisions:

 discard 5b308cc  Revert "HDDS-1370. Command Execution in Datanode fails 
because of NPE (#715)"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-526-HDDS-1183 deleted (was 7d44223)

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch revert-526-HDDS-1183
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 7d44223  Revert "HDDS-1183. Override getDelegationToken API for 
OzoneFileSystem. Contributed by Xiaoyu Yao."

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-1164-HDDS-1829 deleted (was 45095e4)

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch revert-1164-HDDS-1829
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 45095e4  Revert "HDDS-1829 On OM reload/restart OmMetrics#numKeys 
should be updated (#1164)"

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-1156-HDDS-1830 deleted (was 74937b8)

2019-08-21 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch revert-1156-HDDS-1830
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"

This change permanently discards the following revisions:

 discard 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14759. HDFS cat logs an info message. Contributed by Eric Badger.

2019-08-20 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8aaf5e1  HDFS-14759. HDFS cat logs an info message. Contributed by 
Eric Badger.
8aaf5e1 is described below

commit 8aaf5e1a14e577a7d8142bc7d49bb94014032afd
Author: Anu Engineer 
AuthorDate: Tue Aug 20 20:24:19 2019 -0700

HDFS-14759. HDFS cat logs an info message.
Contributed by Eric Badger.
---
 .../hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
index 6112c09..acd1e50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
@@ -227,7 +227,7 @@ public class SaslDataTransferClient {
   throws IOException {
 boolean localTrusted = trustedChannelResolver.isTrusted();
 boolean remoteTrusted = trustedChannelResolver.isTrusted(addr);
-LOG.info("SASL encryption trust check: localHostTrusted = {}, "
+LOG.debug("SASL encryption trust check: localHostTrusted = {}, "
 + "remoteHostTrusted = {}", localTrusted, remoteTrusted);
 if (!localTrusted || !remoteTrusted) {
   // The encryption key factory only returns a key if encryption is 
enabled.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1910. Cannot build hadoop-hdds-config from scratch in IDEA

2019-08-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 77d102c  HDDS-1910. Cannot build hadoop-hdds-config from scratch in 
IDEA
77d102c is described below

commit 77d102ceba1aad2ae3e9172cdef35c6a42735ae2
Author: Doroszlai, Attila 
AuthorDate: Mon Aug 5 16:13:05 2019 +0200

HDDS-1910. Cannot build hadoop-hdds-config from scratch in IDEA

Signed-off-by: Anu Engineer 
---
 .../services/javax.annotation.processing.Processor  |  0
 hadoop-hdds/config/pom.xml  | 21 -
 .../services/javax.annotation.processing.Processor  |  0
 3 files changed, 21 deletions(-)

diff --git 
a/hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
 
b/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor
similarity index 100%
copy from 
hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
copy to 
hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
index bf62949..a595549 100644
--- a/hadoop-hdds/config/pom.xml
+++ b/hadoop-hdds/config/pom.xml
@@ -42,25 +42,4 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   
 
-  
-
-  
-maven-compiler-plugin
-3.1
-
-
-  default-compile
-  compile
-  
-compile
-  
-  
-
--proc:none
-  
-
-
-  
-
-  
 
diff --git 
a/hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
 
b/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor
similarity index 100%
rename from 
hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
rename to 
hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1954. StackOverflowError in OzoneClientInvocationHandler

2019-08-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 46d6191  HDDS-1954. StackOverflowError in OzoneClientInvocationHandler
46d6191 is described below

commit 46d61913ff2a4ed6b5c77f348ba71c2c677b61ef
Author: Doroszlai, Attila 
AuthorDate: Mon Aug 12 21:43:00 2019 +0200

HDDS-1954. StackOverflowError in OzoneClientInvocationHandler

Signed-off-by: Anu Engineer 
---
 .../org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
index 3051e2d..cdc7702 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java
@@ -48,7 +48,7 @@ public class OzoneClientInvocationHandler implements 
InvocationHandler {
   @Override
   public Object invoke(Object proxy, Method method, Object[] args)
   throws Throwable {
-LOG.trace("Invoking method {} on proxy {}", method, proxy);
+LOG.trace("Invoking method {} on target {}", method, target);
 try {
   long startTime = Time.monotonicNow();
   Object result = method.invoke(target, args);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1964. TestOzoneClientProducer fails with ConnectException

2019-08-14 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8242085  HDDS-1964. TestOzoneClientProducer fails with ConnectException
8242085 is described below

commit 82420851645f1644f597e11e14a1d70bb8a7cc23
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 14 11:40:25 2019 +0200

HDDS-1964. TestOzoneClientProducer fails with ConnectException

Signed-off-by: Anu Engineer 
---
 .../hadoop/ozone/s3/TestOzoneClientProducer.java|  2 ++
 .../s3gateway/src/test/resources/log4j.properties   | 21 +
 2 files changed, 23 insertions(+)

diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
index 641b1e4..17cf7bc 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.s3;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -78,6 +79,7 @@ public class TestOzoneClientProducer {
 context = Mockito.mock(ContainerRequestContext.class);
 OzoneConfiguration config = new OzoneConfiguration();
 config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true);
+config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
 setupContext();
 producer.setContext(context);
 producer.setOzoneConfiguration(config);
diff --git a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties 
b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
new file mode 100644
index 000..b8ad21d
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties
@@ -0,0 +1,21 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} 
(%F:%M(%L)) - %m%n
+
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1966. Wrong expected key ACL in acceptance test

2019-08-14 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 06d8ac9  HDDS-1966. Wrong expected key ACL in acceptance test
06d8ac9 is described below

commit 06d8ac95226ef45aa810668f175a70a0ce9b7cb1
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 14 14:27:10 2019 +0200

HDDS-1966. Wrong expected key ACL in acceptance test

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot  | 8 
 .../dist/src/main/smoketest/security/ozone-secure-fs.robot| 6 +++---
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
index 60a3f04..9606567 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
@@ -122,11 +122,11 @@ Test key Acls
 Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : 
. \"ALL\" .
 ${result} = Execute ozone sh key addacl 
${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
 ${result} = Execute ozone sh key removeacl 
${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
 ${result} = Execute ozone sh key setacl 
${protocol}${server}/${volume}/bb1/key2 -al 
user:superuser1:rwxy,group:superuser1:a,user:testuser/s...@example.com:rwxyc
 ${result} = Execute ozone sh key getacl 
${protocol}${server}/${volume}/bb1/key2
-Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
-Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
\ No newline at end of file
+Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
+Should Match Regexp ${result}   \"type\" : 
\"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : 
\"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
diff --git 
a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot 
b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
index 20f9a4f..ee4688c 100644
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
@@ -87,10 +87,10 @@ Test key Acls
 Should Match Regexp ${result}   \"type\" : 
\"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\&quo

[hadoop] branch trunk updated: HDDS-1914. Ozonescript example docker-compose cluster can't be started

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 846848a  HDDS-1914. Ozonescript example docker-compose cluster can't 
be started
846848a is described below

commit 846848ac4c18175053987416e14533c2a7f76500
Author: Márton Elek 
AuthorDate: Tue Aug 6 10:04:02 2019 +0200

HDDS-1914. Ozonescript example docker-compose cluster can't be started

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment | 2 +-
 hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile   | 5 +++--
 hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config| 3 +--
 hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh | 2 ++
 4 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment 
b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
index 5685453..cbde0f2 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
@@ -13,4 +13,4 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/
+JAVA_HOME=/usr/lib/jvm/jre
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile 
b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
index 2741eb0..b078000 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
@@ -14,8 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 FROM apache/ozone-runner
-RUN sudo apt-get update && sudo apt-get install -y openssh-server
+RUN sudo yum install -y openssh-clients openssh-server
 
+RUN sudo ssh-keygen -A
 RUN sudo mkdir -p /run/sshd
 RUN sudo sed -i "s/.*UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" 
/etc/ssh/sshd_config
 RUN sudo sed -i "s/.*PermitUserEnvironment.*/PermitUserEnvironment yes/g" 
/etc/ssh/sshd_config
@@ -29,5 +30,5 @@ RUN sudo chown hadoop /opt
 RUN sudo chmod 600 /opt/.ssh/*
 RUN sudo chmod 700 /opt/.ssh
 
-RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/" 
>> /etc/profile'
+RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/jre/" >> /etc/profile'
 CMD ["sudo","/usr/sbin/sshd","-D"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
index 3c99cd7..4e67a04 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
@@ -14,7 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/
 CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
 OZONE-SITE.XML_ozone.ksm.address=ksm
 OZONE-SITE.XML_ozone.scm.names=scm
@@ -37,4 +36,4 @@ LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR, 
stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
\ No newline at end of file
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh 
b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
index 9540eb9..49fc506 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
@@ -14,7 +14,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+set -x
 docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1  docker 
inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers
+docker-compose ps | grep ozonescripts | awk '{print $1}' | xargs -I CONTAINER 
-n1 docker exec CONTAINER cp /opt/hadoop/etc/hadoop/workers /etc/hadoop/workers
 docker-compose exec scm /opt/hadoop/bin/ozone scm --init
 docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
 #We need a running SCM for om objectstore creation


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1929. OM started on recon host in ozonesecure compose

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 48dc441  HDDS-1929. OM started on recon host in ozonesecure compose
48dc441 is described below

commit 48dc4418644650fe44610db04dfa6974ba5d3c78
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 7 21:55:25 2019 +0200

HDDS-1929. OM started on recon host in ozonesecure compose

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index 6f59994..d202717 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -76,7 +76,6 @@ services:
   - ./docker-config
 environment:
   WAITFOR: om:9874
-  ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
 command: ["/opt/hadoop/bin/ozone","recon"]
   scm:
 image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1947. fix naming issue for ScmBlockLocationTestingClient. Contributed by star.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5a248de  HDDS-1947. fix naming issue for 
ScmBlockLocationTestingClient. Contributed by star.
5a248de is described below

commit 5a248de51158de40561fc57802c029f686623069
Author: Anu Engineer 
AuthorDate: Tue Aug 13 23:07:02 2019 -0700

HDDS-1947. fix naming issue for ScmBlockLocationTestingClient.
Contributed by star.
---
 .../org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java   | 6 +++---
 ...ocationTestIngClient.java => ScmBlockLocationTestingClient.java} | 6 +++---
 .../java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java | 6 +++---
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index b8534de..982e87e 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -62,9 +62,9 @@ import static org.apache.hadoop.hdds.protocol.proto
  * TODO: Move this class to a generic test utils so we can use this class in
  * other Ozone Manager tests.
  */
-public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol 
{
+public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol 
{
   private static final Logger LOG =
-  LoggerFactory.getLogger(ScmBlockLocationTestIngClient.class);
+  LoggerFactory.getLogger(ScmBlockLocationTestingClient.class);
   private final String clusterID;
   private final String scmId;
 
@@ -81,7 +81,7 @@ public class ScmBlockLocationTestIngClient implements 
ScmBlockLocationProtocol {
* @param failCallsFrequency - Set to 0 for no failures, 1 for always to 
fail,
* a positive number for that frequency of failure.
*/
-  public ScmBlockLocationTestIngClient(String clusterID, String scmId,
+  public ScmBlockLocationTestingClient(String clusterID, String scmId,
   int failCallsFrequency) {
 this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID :
 UUID.randomUUID().toString();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
similarity index 97%
copy from 
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
copy to 
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
index b8534de..982e87e 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
@@ -62,9 +62,9 @@ import static org.apache.hadoop.hdds.protocol.proto
  * TODO: Move this class to a generic test utils so we can use this class in
  * other Ozone Manager tests.
  */
-public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol 
{
+public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol 
{
   private static final Logger LOG =
-  LoggerFactory.getLogger(ScmBlockLocationTestIngClient.class);
+  LoggerFactory.getLogger(ScmBlockLocationTestingClient.class);
   private final String clusterID;
   private final String scmId;
 
@@ -81,7 +81,7 @@ public class ScmBlockLocationTestIngClient implements 
ScmBlockLocationProtocol {
* @param failCallsFrequency - Set to 0 for no failures, 1 for always to 
fail,
* a positive number for that frequency of failure.
*/
-  public ScmBlockLocationTestIngClient(String clusterID, String scmId,
+  public ScmBlockLocationTestingClient(String clusterID, String scmId,
   int failCallsFrequency) {
 this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID :
 UUID.randomUUID().toString();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
index 357feeb..4ed4207 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java
@@ -91,7 +91,7 @@ public class TestKeyDeletingService {
 OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf);
 KeyManager keyManager =
 new KeyManagerImpl(
-new ScmBlockLocationTestIngCli

[hadoop] 02/02: HDDS-1832 : Improve logging for PipelineActions handling in SCM and datanode. (Change to Error logging)

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit fc229b6490a152036b6424c7c0ac5c3df9525e57
Author: Aravindan Vijayan 
AuthorDate: Fri Aug 2 11:04:52 2019 -0700

HDDS-1832 : Improve logging for PipelineActions handling in SCM and 
datanode. (Change to Error logging)

Signed-off-by: Anu Engineer 
---
 .../container/common/transport/server/ratis/XceiverServerRatis.java | 2 +-
 .../java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 3a8b79b..54e8f3e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -558,7 +558,7 @@ public final class XceiverServerRatis extends XceiverServer 
{
 if (triggerHB) {
   context.getParent().triggerHeartbeat();
 }
-LOG.info(
+LOG.error(
 "pipeline Action " + action.getAction() + "  on pipeline " + pipelineID
 + ".Reason : " + action.getClosePipeline().getDetailedReason());
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index 8d040f1..8d497fa 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -57,7 +57,7 @@ public class PipelineActionHandler
   pipelineID = PipelineID.
   getFromProtobuf(action.getClosePipeline().getPipelineID());
   Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-  LOG.info("Received pipeline action {} for {} from datanode {}. " +
+  LOG.error("Received pipeline action {} for {} from datanode {}. " +
   "Reason : {}", action.getAction(), pipeline,
   report.getDatanodeDetails(),
   action.getClosePipeline().getDetailedReason());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDDS-1915. Remove hadoop script from ozone distribution

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 15545c8bf1318e936fe2251bc2ef7522a36af7cd
Author: Márton Elek 
AuthorDate: Tue Aug 6 10:10:52 2019 +0200

HDDS-1915. Remove hadoop script from ozone distribution

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/dist/dev-support/bin/dist-layout-stitching | 2 --
 1 file changed, 2 deletions(-)

diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index d95242e..5def094 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -94,8 +94,6 @@ run cp 
"${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
 run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" 
"etc/hadoop"
 run cp 
"${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml" 
"etc/hadoop"
-run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/"
-run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" 
"bin/"
 run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/"
 run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/docker"
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (78b714a -> fc229b6)

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 78b714a  HDDS-1956. Aged IO Thread exits on first read
 new 15545c8  HDDS-1915. Remove hadoop script from ozone distribution
 new fc229b6  HDDS-1832 : Improve logging for PipelineActions handling in 
SCM and datanode. (Change to Error logging)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../container/common/transport/server/ratis/XceiverServerRatis.java | 2 +-
 .../java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java | 2 +-
 hadoop-ozone/dist/dev-support/bin/dist-layout-stitching | 2 --
 3 files changed, 2 insertions(+), 4 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1956. Aged IO Thread exits on first read

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 78b714a  HDDS-1956. Aged IO Thread exits on first read
78b714a is described below

commit 78b714af9c0ef4cd1b6219eee884a43eb66d1574
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 13 09:52:51 2019 +0200

HDDS-1956. Aged IO Thread exits on first read

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |  8 ++---
 .../hadoop/ozone/MiniOzoneLoadGenerator.java   | 38 ++
 .../src/test/resources/log4j.properties|  2 +-
 3 files changed, 30 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 75911df..2eef206 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -68,7 +68,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 
 this.executorService =  Executors.newSingleThreadScheduledExecutor();
 this.numDatanodes = getHddsDatanodes().size();
-LOG.info("Starting MiniOzoneChaosCluster with:{} datanodes" + 
numDatanodes);
+LOG.info("Starting MiniOzoneChaosCluster with {} datanodes", numDatanodes);
 LogUtils.setLogLevel(GrpcClientProtocolClient.LOG, Level.WARN);
   }
 
@@ -108,7 +108,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 LOG.info("{} Completed restarting Datanode: {}", failString,
 dn.getUuid());
   } catch (Exception e) {
-LOG.error("Failed to restartNodes Datanode", dn.getUuid());
+LOG.error("Failed to restartNodes Datanode {}", dn.getUuid(), e);
   }
 }
   }
@@ -119,7 +119,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 for (int i = 0; i < numNodesToFail; i++) {
   boolean shouldStop = shouldStop();
   int failedNodeIndex = getNodeToFail();
-  String stopString = shouldStop ? "Stopping" : "Starting";
+  String stopString = shouldStop ? "Stopping" : "Restarting";
   DatanodeDetails dn =
   getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails();
   try {
@@ -133,7 +133,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 LOG.info("Completed {} DataNode {}", stopString, dn.getUuid());
 
   } catch (Exception e) {
-LOG.error("Failed to shutdown Datanode", dn.getUuid());
+LOG.error("Failed {} Datanode {}", stopString, dn.getUuid(), e);
   }
 }
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
index b942447..6ced6d6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
@@ -49,7 +50,7 @@ import java.util.concurrent.atomic.AtomicInteger;
  */
 public class MiniOzoneLoadGenerator {
 
-  static final Logger LOG =
+  private static final Logger LOG =
   LoggerFactory.getLogger(MiniOzoneLoadGenerator.class);
 
   private static String keyNameDelimiter = "_";
@@ -113,7 +114,7 @@ public class MiniOzoneLoadGenerator {
 int index = RandomUtils.nextInt();
 String keyName = writeData(index, bucket, threadName);
 
-readData(bucket, keyName);
+readData(bucket, keyName, index);
 
 deleteKey(bucket, keyName);
   } catch (Exception e) {
@@ -133,11 +134,13 @@ public class MiniOzoneLoadGenerator {
 ByteBuffer buffer = buffers.get(keyIndex % numBuffers);
 int bufferCapacity = buffer.capacity();
 
-String keyName = threadName + keyNameDelimiter + keyIndex;
+String keyName = getKeyName(keyIndex, threadName);
+LOG.trace("LOADGEN: Writing key {}", keyName);
 try (OzoneOutputStream stream = bucket.createKey(keyName,
 bufferCapacity, ReplicationType.RATIS, ReplicationFactor.THREE,
 new HashMap<>())) {
   stream.write(buffer.array());
+  LOG.trace("LOADGEN: Written key {}", keyNa

[hadoop] branch trunk updated: HDDS-1920. Place ozone.om.address config key default value in ozone-site.xml

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bf45779  HDDS-1920. Place ozone.om.address config key default value in 
ozone-site.xml
bf45779 is described below

commit bf457797f607f3aeeb2292e63f440cb13e15a2d9
Author: Siyao Meng 
AuthorDate: Tue Aug 6 14:14:26 2019 -0700

HDDS-1920. Place ozone.om.address config key default value in ozone-site.xml

Change-Id: Ic5970b383357147b74a01680aedf40bed4d3e176
Signed-off-by: Anu Engineer 
---
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 409cc72..d9440d7 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -550,7 +550,7 @@
   
   
 ozone.om.address
-
+0.0.0.0:9862
 OM, REQUIRED
 
   The address of the Ozone OM service. This allows clients to discover


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1928. Cannot run ozone-recon compose due to syntax error

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new f4d8e1b  HDDS-1928. Cannot run ozone-recon compose due to syntax error
f4d8e1b is described below

commit f4d8e1bdd71d0003cdc3a588bb81fcc3a6f71a7e
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 7 20:46:17 2019 +0200

HDDS-1928. Cannot run ozone-recon compose due to syntax error

Signed-off-by: Anu Engineer 
(cherry picked from commit e6d240dc91004c468533b523358849a2611ed757)
---
 .../main/compose/ozone-recon/docker-compose.yaml   |  2 +-
 .../dist/src/main/compose/ozone-recon/test.sh  | 30 ++
 2 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
index e6d25ea..4cec246 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
datanode:
-  image: apache/ozone-runner:
+  image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
   privileged: true #required by the profiler
   volumes:
 - ../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
new file mode 100755
index 000..f4bfcc3
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR/../testlib.sh"
+
+start_docker_env
+
+execute_robot_test scm basic/basic.robot
+
+stop_docker_env
+
+generate_report


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1928. Cannot run ozone-recon compose due to syntax error

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e6d240d  HDDS-1928. Cannot run ozone-recon compose due to syntax error
e6d240d is described below

commit e6d240dc91004c468533b523358849a2611ed757
Author: Doroszlai, Attila 
AuthorDate: Wed Aug 7 20:46:17 2019 +0200

HDDS-1928. Cannot run ozone-recon compose due to syntax error

Signed-off-by: Anu Engineer 
---
 .../main/compose/ozone-recon/docker-compose.yaml   |  2 +-
 .../dist/src/main/compose/ozone-recon/test.sh  | 30 ++
 2 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
index e6d25ea..4cec246 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
datanode:
-  image: apache/ozone-runner:
+  image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
   privileged: true #required by the profiler
   volumes:
 - ../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
new file mode 100755
index 000..f4bfcc3
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR/../testlib.sh"
+
+start_docker_env
+
+execute_robot_test scm basic/basic.robot
+
+stop_docker_env
+
+generate_report


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1916. Only contract tests are run in ozonefs module

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new a066698  HDDS-1916. Only contract tests are run in ozonefs module
a066698 is described below

commit a0666982fc5a9dc4bfeff2978b166535098fb75c
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 6 10:52:32 2019 +0200

HDDS-1916. Only contract tests are run in ozonefs module

Signed-off-by: Anu Engineer 
(cherry picked from commit 9691117099d7914c6297b0e4ea3852341775fb15)
---
 hadoop-ozone/ozonefs/pom.xml | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 8ef886f..3eedf48 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -68,7 +68,11 @@
 
   
 ITestOzoneContract*.java
-
+**/Test*.java
+**/*Test.java
+**/*Tests.java
+**/*TestCase.java
+  
 
   
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1659. Define the process to add proposal/design docs to the Ozone subproject (#950)

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 50a22b6  HDDS-1659. Define the process to add proposal/design docs to 
the Ozone subproject (#950)
50a22b6 is described below

commit 50a22b66c0292d37984460991a680d9d3e8c862c
Author: Elek, Márton 
AuthorDate: Wed Aug 14 02:10:36 2019 +0200

HDDS-1659. Define the process to add proposal/design docs to the Ozone 
subproject (#950)

* HDDS-1659. Define the process to add proposal/design docs to the Ozone 
subproject
 * Remove Site improvements to display proposals
 * adding license header
 * clarify the support of the existing method
---
 .../content/design/ozone-enhancement-proposals.md  | 197 +
 1 file changed, 197 insertions(+)

diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md 
b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md
new file mode 100644
index 000..cc7569e
--- /dev/null
+++ b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md
@@ -0,0 +1,197 @@
+---
+title: Ozone Enhancement Proposals
+summary: Definition of the process to share new technical proposals with the 
Ozone community.
+date: 2019-06-07
+jira: HDDS-1659
+status: accepted
+author: Anu Enginner, Marton Elek
+---
+
+
+## Problem statement
+
+Some of the biggers features requires well defined plans before the 
implementation. Until now it was managed by uploading PDF design docs to 
selected JIRA. There are multiple problems with the current practice.
+
+ 1. There is no easy way to find existing up-to-date and outdated design docs.
+ 2. Design docs usually have better description of the problem that the user 
docs
+ 3. We need better tools to discuss the design docs in the development phase 
of the doc
+
+We propose to follow the same process what we have now, but instead of 
uploading a PDF to the JIRA, create a PR to merge the proposal document to the 
documentation project.
+
+## Non-goals
+
+ * Modify the existing workflow or approval process
+ * Migrate existing documents
+ * Make it harder to create design docs (it should be easy to support the 
creation of proposals for any kind of tasks)
+ * Define how the design docs are handled/created *before* the publication 
(this proposal is about the publishing process)
+
+## Proposed solution
+
+ * Open a dedicated Jira (`HDDS-*` but with specific component)
+ * Use standard name prefix in the jira (easy to filter on the mailing list) 
`[OEP]
+ * Create a PR to add the design doc to the current documentation
+   * The content of the design can be added to the documentation (Recommended)
+   * Or can be added as external reference
+ * The design doc (or the summary with the reference) will be merged to the 
design doc folder of `hadoop-hdds/docs/content/design` (will be part of the 
docs)
+ * Discuss it as before (lazy consesus, except if somebody calls for a real 
vote)
+ * Design docs can be updated according to the changes during the 
implementation
+ * Only the implemented design docs will be visible as part of the design docs
+
+
+As a result all the design docs can be listed under the documentation page.
+
+A good design doc has the following properties:
+
+ 1. Publicly available for anybody (Please try to avoid services which are 
available only with registration, eg: google docs)
+ 2. Archived for the future (Commit it to the source OR use apache jira or 
wiki)
+ 3. Editable later (Best format is markdown, RTF is also good. PDF has a 
limitation, it's very hard to reuse the text, or create an updated design doc)
+ 4. Well structured to make it easy to comment any part of the document 
(Markdown files which are part of the pull request can be commented in the PR 
line by line)
+
+
+### Example 1: Design doc as a markdown file
+
+The easiest way to create a design doc is to create a new markdown file in a 
PR and merge it to `hadoop-hdds/docs/content/design`.
+
+ 1. Publicly available: YES, it can be linked from Apache git or github
+ 2. Archived: YES, and it's also versioned. All the change history can be 
tracked.
+ 3. Editable later: YES, as it's just a simple text file
+ 4. Commentable: YES, comment can be added to each line.
+
+### Example 2: Design doc as a PDF
+
+A very common practice of today is to create design doc on google docs and 
upload it to the JIRA.
+
+ 1. Publicy available: YES, anybody can download it from the Jira.
+ 2. Archived: YES, it's available from Apache infra.
+ 3. Editable: NO, It's harder to reuse the text to import to the docs or 
create a new design doc.
+ 4. Commentable: PARTIAL, Not as easy as a text file or the original google 
docs, but a good structure with numbered section may help
+
+
+### The format
+
+While the first version (markdown files) are the most powe

[hadoop] branch trunk updated: HDDS-1916. Only contract tests are run in ozonefs module

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9691117  HDDS-1916. Only contract tests are run in ozonefs module
9691117 is described below

commit 9691117099d7914c6297b0e4ea3852341775fb15
Author: Doroszlai, Attila 
AuthorDate: Tue Aug 6 10:52:32 2019 +0200

HDDS-1916. Only contract tests are run in ozonefs module

Signed-off-by: Anu Engineer 
---
 hadoop-ozone/ozonefs/pom.xml | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 02a5640..fdd27b0 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -68,7 +68,11 @@
 
   
 ITestOzoneContract*.java
-
+**/Test*.java
+**/*Test.java
+**/*Tests.java
+**/*TestCase.java
+  
 
   
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDDS-1917. TestOzoneRpcClientAbstract is failing.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e6b744b8f8519342a4e4bdc15cb4088e13e855c6
Author: Nanda kumar 
AuthorDate: Tue Aug 6 14:32:13 2019 +0530

HDDS-1917. TestOzoneRpcClientAbstract is failing.

Signed-off-by: Anu Engineer 
(cherry picked from commit 3cff73aff47695f6a48a36878191409f050f)
---
 .../client/rpc/TestOzoneRpcClientAbstract.java | 52 +++---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 2 files changed, 31 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 4e426ba..c203fec 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
@@ -2533,31 +2534,30 @@ public abstract class TestOzoneRpcClientAbstract {
   ACLType.READ_ACL, ACCESS);
   // Verify that operation successful.
   assertTrue(store.addAcl(ozObj, newAcl));
-  List acls = store.getAcl(ozObj);
-
-  assertTrue(acls.size() == expectedAcls.size());
-  boolean aclVerified = false;
-  for(OzoneAcl acl: acls) {
-if(acl.getName().equals(newAcl.getName())) {
-  assertTrue(acl.getAclList().contains(ACLType.READ_ACL));
-  aclVerified = true;
-}
-  }
-  assertTrue("New acl expected but not found.", aclVerified);
-  aclVerified = false;
+
+  assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+  final Optional readAcl = store.getAcl(ozObj).stream()
+  .filter(acl -> acl.getName().equals(newAcl.getName())
+  && acl.getType().equals(newAcl.getType()))
+  .findFirst();
+  assertTrue("New acl expected but not found.", readAcl.isPresent());
+  assertTrue("READ_ACL should exist in current acls:"
+  + readAcl.get(),
+  readAcl.get().getAclList().contains(ACLType.READ_ACL));
+
 
   // Case:2 Remove newly added acl permission.
   assertTrue(store.removeAcl(ozObj, newAcl));
-  acls = store.getAcl(ozObj);
-  assertTrue(acls.size() == expectedAcls.size());
-  for(OzoneAcl acl: acls) {
-if(acl.getName().equals(newAcl.getName())) {
-  assertFalse("READ_ACL should not exist in current acls:" +
-  acls, acl.getAclList().contains(ACLType.READ_ACL));
-  aclVerified = true;
-}
-  }
-  assertTrue("New acl expected but not found.", aclVerified);
+
+  assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+  final Optional nonReadAcl = store.getAcl(ozObj).stream()
+  .filter(acl -> acl.getName().equals(newAcl.getName())
+  && acl.getType().equals(newAcl.getType()))
+  .findFirst();
+  assertTrue("New acl expected but not found.", nonReadAcl.isPresent());
+  assertFalse("READ_ACL should not exist in current acls:"
+  + nonReadAcl.get(),
+  nonReadAcl.get().getAclList().contains(ACLType.READ_ACL));
 } else {
   fail("Default acl should not be empty.");
 }
@@ -2570,17 +2570,17 @@ public abstract class TestOzoneRpcClientAbstract {
   store.removeAcl(ozObj, a);
 }
 List newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == 0);
+assertEquals(0, newAcls.size());
 
 // Add acl's and then call getAcl.
 int aclCount = 0;
 for (OzoneAcl a : expectedAcls) {
   aclCount++;
   assertTrue(store.addAcl(ozObj, a));
-  assertTrue(store.getAcl(ozObj).size() == aclCount);
+  assertEquals(aclCount, store.getAcl(ozObj).size());
 }
 newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == expectedAcls.size());
+assertEquals(expectedAcls.size(), newAcls.size());
 List finalNewAcls = newAcls;
 expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a)));
 
@@ -2591,7 +2591,7 @@ public abstract class TestOzoneRpcClientAbstract {
 ACLType.ALL, ACCESS);
 store.setAcl(ozObj, Arrays.asList(ua, ug));
 newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == 2);
+assertEquals(2, newAcls.size());
 assertTrue(newAcls.contains(ua));
 assertTrue(newAcls.contains(ug));
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apac

[hadoop] branch ozone-0.4.1 updated (237a208 -> e6b744b)

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 237a208  HDDS-1891. Ozone fs shell command should work with default 
port when port number is not specified
 new 3eec5e1  HDDS-1961. 
TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.
 new e6b744b  HDDS-1917. TestOzoneRpcClientAbstract is failing.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/ozone/TestStorageContainerManager.java  | 25 +--
 .../client/rpc/TestOzoneRpcClientAbstract.java | 52 +++---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 3 files changed, 43 insertions(+), 41 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3eec5e1d61918e83b1f94ebfa0d864826c03465f
Author: Nanda kumar 
AuthorDate: Tue Aug 13 22:04:03 2019 +0530

HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is 
flaky.

Signed-off-by: Anu Engineer 
(cherry picked from commit cb390dff87a86eae22c432576be90d39f84a6ee8)
---
 .../hadoop/ozone/TestStorageContainerManager.java  | 25 +++---
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 3ac5993..55b184a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -500,7 +501,9 @@ public class TestStorageContainerManager {
 String scmId = UUID.randomUUID().toString();
 conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
 StaticMapping.class, DNSToSwitchMapping.class);
-StaticMapping.addNodeToRack(HddsUtils.getHostName(conf), "/rack1");
+StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
+Collections.singleton(HddsUtils.getHostName(conf))).get(0),
+"/rack1");
 
 final int datanodeNum = 3;
 MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
@@ -520,21 +523,17 @@ public class TestStorageContainerManager {
   Thread.sleep(heartbeatCheckerIntervalMs * 2);
 
   List allNodes = scm.getScmNodeManager().getAllNodes();
-  Assert.assertTrue(allNodes.size() == datanodeNum);
-  for (int i = 0; i < allNodes.size(); i++) {
+  Assert.assertEquals(datanodeNum, allNodes.size());
+  for (DatanodeDetails node : allNodes) {
 DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
-.getNodeByUuid(allNodes.get(i).getUuidString());
-Assert.assertTrue((datanodeInfo.getLastHeartbeatTime() - start)
->= heartbeatCheckerIntervalMs);
-Assert.assertTrue(datanodeInfo.getUuidString()
-.equals(datanodeInfo.getNetworkName()));
-Assert.assertTrue(datanodeInfo.getNetworkLocation()
-.equals("/rack1"));
+.getNodeByUuid(node.getUuidString());
+Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
+Assert.assertEquals(datanodeInfo.getUuidString(),
+datanodeInfo.getNetworkName());
+Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
   }
 } finally {
-  if (cluster != null) {
-cluster.shutdown();
-  }
+  cluster.shutdown();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cb390dff87a86eae22c432576be90d39f84a6ee8
Author: Nanda kumar 
AuthorDate: Tue Aug 13 22:04:03 2019 +0530

HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is 
flaky.

Signed-off-by: Anu Engineer 
---
 .../test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 8b0af2a..55b184a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -527,8 +527,7 @@ public class TestStorageContainerManager {
   for (DatanodeDetails node : allNodes) {
 DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
 .getNodeByUuid(node.getUuidString());
-Assert.assertTrue((datanodeInfo.getLastHeartbeatTime() - start)
->= heartbeatCheckerIntervalMs);
+Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
 Assert.assertEquals(datanodeInfo.getUuidString(),
 datanodeInfo.getNetworkName());
 Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDDS-1917. TestOzoneRpcClientAbstract is failing.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3cff73aff47695f6a48a36878191409f050f
Author: Nanda kumar 
AuthorDate: Tue Aug 6 14:32:13 2019 +0530

HDDS-1917. TestOzoneRpcClientAbstract is failing.

Signed-off-by: Anu Engineer 
---
 .../client/rpc/TestOzoneRpcClientAbstract.java | 52 +++---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 2 files changed, 31 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index eb2d048..6ed4eae 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
@@ -2456,31 +2457,30 @@ public abstract class TestOzoneRpcClientAbstract {
   ACLType.READ_ACL, ACCESS);
   // Verify that operation successful.
   assertTrue(store.addAcl(ozObj, newAcl));
-  List acls = store.getAcl(ozObj);
-
-  assertTrue(acls.size() == expectedAcls.size());
-  boolean aclVerified = false;
-  for(OzoneAcl acl: acls) {
-if(acl.getName().equals(newAcl.getName())) {
-  assertTrue(acl.getAclList().contains(ACLType.READ_ACL));
-  aclVerified = true;
-}
-  }
-  assertTrue("New acl expected but not found.", aclVerified);
-  aclVerified = false;
+
+  assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+  final Optional readAcl = store.getAcl(ozObj).stream()
+  .filter(acl -> acl.getName().equals(newAcl.getName())
+  && acl.getType().equals(newAcl.getType()))
+  .findFirst();
+  assertTrue("New acl expected but not found.", readAcl.isPresent());
+  assertTrue("READ_ACL should exist in current acls:"
+  + readAcl.get(),
+  readAcl.get().getAclList().contains(ACLType.READ_ACL));
+
 
   // Case:2 Remove newly added acl permission.
   assertTrue(store.removeAcl(ozObj, newAcl));
-  acls = store.getAcl(ozObj);
-  assertTrue(acls.size() == expectedAcls.size());
-  for(OzoneAcl acl: acls) {
-if(acl.getName().equals(newAcl.getName())) {
-  assertFalse("READ_ACL should not exist in current acls:" +
-  acls, acl.getAclList().contains(ACLType.READ_ACL));
-  aclVerified = true;
-}
-  }
-  assertTrue("New acl expected but not found.", aclVerified);
+
+  assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+  final Optional nonReadAcl = store.getAcl(ozObj).stream()
+  .filter(acl -> acl.getName().equals(newAcl.getName())
+  && acl.getType().equals(newAcl.getType()))
+  .findFirst();
+  assertTrue("New acl expected but not found.", nonReadAcl.isPresent());
+  assertFalse("READ_ACL should not exist in current acls:"
+  + nonReadAcl.get(),
+  nonReadAcl.get().getAclList().contains(ACLType.READ_ACL));
 } else {
   fail("Default acl should not be empty.");
 }
@@ -2493,17 +2493,17 @@ public abstract class TestOzoneRpcClientAbstract {
   store.removeAcl(ozObj, a);
 }
 List newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == 0);
+assertEquals(0, newAcls.size());
 
 // Add acl's and then call getAcl.
 int aclCount = 0;
 for (OzoneAcl a : expectedAcls) {
   aclCount++;
   assertTrue(store.addAcl(ozObj, a));
-  assertTrue(store.getAcl(ozObj).size() == aclCount);
+  assertEquals(aclCount, store.getAcl(ozObj).size());
 }
 newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == expectedAcls.size());
+assertEquals(expectedAcls.size(), newAcls.size());
 List finalNewAcls = newAcls;
 expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a)));
 
@@ -2514,7 +2514,7 @@ public abstract class TestOzoneRpcClientAbstract {
 ACLType.ALL, ACCESS);
 store.setAcl(ozObj, Arrays.asList(ua, ug));
 newAcls = store.getAcl(ozObj);
-assertTrue(newAcls.size() == 2);
+assertEquals(2, newAcls.size());
 assertTrue(newAcls.contains(ua));
 assertTrue(newAcls.contains(ug));
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/

[hadoop] branch trunk updated (68c8184 -> 3cff73a)

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 68c8184  HDDS-1891. Ozone fs shell command should work with default 
port when port number is not specified
 new cb390df  HDDS-1961. 
TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.
 new 3cff73a  HDDS-1917. TestOzoneRpcClientAbstract is failing.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/ozone/TestStorageContainerManager.java  |  3 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java | 52 +++---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 3 files changed, 32 insertions(+), 30 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1891. Ozone fs shell command should work with default port when port number is not specified

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 237a208  HDDS-1891. Ozone fs shell command should work with default 
port when port number is not specified
237a208 is described below

commit 237a20860734232af828c949c821fee7498e2d9f
Author: Siyao Meng 
AuthorDate: Fri Aug 2 12:54:04 2019 -0700

HDDS-1891. Ozone fs shell command should work with default port when port 
number is not specified

Signed-off-by: Anu Engineer 
(cherry picked from commit 68c818415aedf672e35b8ecd9dfd0cb33c43a91e)
---
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 17 +++---
 .../fs/ozone/TestOzoneFileSystemWithMocks.java | 37 ++
 2 files changed, 50 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 6a52746..27bc925 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -54,6 +55,7 @@ import static 
org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,9 +87,10 @@ public class BasicOzoneFileSystem extends FileSystem {
   private static final Pattern URL_SCHEMA_PATTERN =
   Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)");
 
-  private static final String URI_EXCEPTION_TEXT = "Ozone file system url " +
-  "should be either one of the two forms: " +
+  private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " +
+  "should be one of the following formats: " +
   "o3fs://bucket.volume/key  OR " +
+  "o3fs://bucket.volume.om-host.example.com/key  OR " +
   "o3fs://bucket.volume.om-host.example.com:5678/key";
 
   @Override
@@ -113,11 +116,17 @@ public class BasicOzoneFileSystem extends FileSystem {
 String omPort = String.valueOf(-1);
 if (!isEmpty(remaining)) {
   String[] parts = remaining.split(":");
-  if (parts.length != 2) {
+  // Array length should be either 1(host) or 2(host:port)
+  if (parts.length > 2) {
 throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
   }
   omHost = parts[0];
-  omPort = parts[1];
+  if (parts.length == 2) {
+omPort = parts[1];
+  } else {
+// If port number is not specified, read it from config
+omPort = String.valueOf(OmUtils.getOmRpcPort(conf));
+  }
   if (!isNumber(omPort)) {
 throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
   }
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
index 7109327..51fd3c8 100644
--- 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
@@ -27,6 +27,7 @@ import java.net.URI;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
@@ -79,6 +80,42 @@ public class TestOzoneFileSystemWithMocks {
   }
 
   @Test
+  public void testFSUriWithHostPortUnspecified() throws Exception {
+Configuration conf = new OzoneConfiguration();
+final int omPort = OmUtils.getOmRpcPort(conf);
+
+OzoneClient ozoneClient = mock(OzoneClient.class);
+ObjectStore objectStore = mock(ObjectStore.class);
+OzoneVolume volume = mock(OzoneVolume.class);
+OzoneBucket bucket = mock(OzoneBucket.class);
+
+when(ozoneClient.getObjectStore(

[hadoop] branch trunk updated: HDDS-1891. Ozone fs shell command should work with default port when port number is not specified

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 68c8184  HDDS-1891. Ozone fs shell command should work with default 
port when port number is not specified
68c8184 is described below

commit 68c818415aedf672e35b8ecd9dfd0cb33c43a91e
Author: Siyao Meng 
AuthorDate: Fri Aug 2 12:54:04 2019 -0700

HDDS-1891. Ozone fs shell command should work with default port when port 
number is not specified

Signed-off-by: Anu Engineer 
---
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 17 +++---
 .../fs/ozone/TestOzoneFileSystemWithMocks.java | 37 ++
 2 files changed, 50 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 6a52746..27bc925 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -54,6 +55,7 @@ import static 
org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,9 +87,10 @@ public class BasicOzoneFileSystem extends FileSystem {
   private static final Pattern URL_SCHEMA_PATTERN =
   Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)");
 
-  private static final String URI_EXCEPTION_TEXT = "Ozone file system url " +
-  "should be either one of the two forms: " +
+  private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " +
+  "should be one of the following formats: " +
   "o3fs://bucket.volume/key  OR " +
+  "o3fs://bucket.volume.om-host.example.com/key  OR " +
   "o3fs://bucket.volume.om-host.example.com:5678/key";
 
   @Override
@@ -113,11 +116,17 @@ public class BasicOzoneFileSystem extends FileSystem {
 String omPort = String.valueOf(-1);
 if (!isEmpty(remaining)) {
   String[] parts = remaining.split(":");
-  if (parts.length != 2) {
+  // Array length should be either 1(host) or 2(host:port)
+  if (parts.length > 2) {
 throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
   }
   omHost = parts[0];
-  omPort = parts[1];
+  if (parts.length == 2) {
+omPort = parts[1];
+  } else {
+// If port number is not specified, read it from config
+omPort = String.valueOf(OmUtils.getOmRpcPort(conf));
+  }
   if (!isNumber(omPort)) {
 throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
   }
diff --git 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
index 7109327..51fd3c8 100644
--- 
a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
+++ 
b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java
@@ -27,6 +27,7 @@ import java.net.URI;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
@@ -79,6 +80,42 @@ public class TestOzoneFileSystemWithMocks {
   }
 
   @Test
+  public void testFSUriWithHostPortUnspecified() throws Exception {
+Configuration conf = new OzoneConfiguration();
+final int omPort = OmUtils.getOmRpcPort(conf);
+
+OzoneClient ozoneClient = mock(OzoneClient.class);
+ObjectStore objectStore = mock(ObjectStore.class);
+OzoneVolume volume = mock(OzoneVolume.class);
+OzoneBucket bucket = mock(OzoneBucket.class);
+
+when(ozoneClient.getObjectStore()).thenReturn(objectStore);
+when(objectStore.getVolume(eq("volume1"))).thenRe

[hadoop] branch ozone-0.4.1 updated: HDDS-1488. Scm cli command to start/stop replication manager.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 0299f8c  HDDS-1488. Scm cli command to start/stop replication manager.
0299f8c is described below

commit 0299f8c7c5d06d8ea36fd960e96d0606092dc4a6
Author: Nanda kumar 
AuthorDate: Sat Aug 3 19:01:29 2019 +0530

HDDS-1488. Scm cli command to start/stop replication manager.

Signed-off-by: Anu Engineer 
(cherry picked from commit 69b74e90167041f561bfcccf5a4e46ea208c467e)
---
 .../hdds/scm/client/ContainerOperationClient.java  | 17 ++
 .../apache/hadoop/hdds/scm/client/ScmClient.java   | 19 +++
 .../protocol/StorageContainerLocationProtocol.java | 18 +++
 ...inerLocationProtocolClientSideTranslatorPB.java | 39 ++
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |  5 +-
 ...inerLocationProtocolServerSideTranslatorPB.java | 46 +
 .../proto/StorageContainerLocationProtocol.proto   | 31 +++
 .../hdds/scm/container/ReplicationManager.java | 36 -
 .../hdds/scm/server/SCMClientProtocolServer.java   | 21 
 .../hdds/scm/container/TestReplicationManager.java | 16 ++
 .../hdds/scm/cli/ReplicationManagerCommands.java   | 54 +++
 .../scm/cli/ReplicationManagerStartSubcommand.java | 53 +++
 .../cli/ReplicationManagerStatusSubcommand.java| 60 ++
 .../scm/cli/ReplicationManagerStopSubcommand.java  | 55 
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java |  3 +-
 15 files changed, 457 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 3077f9f..e2856d7 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -459,4 +459,21 @@ public class ContainerOperationClient implements ScmClient 
{
   public boolean forceExitSafeMode() throws IOException {
 return storageContainerLocationClient.forceExitSafeMode();
   }
+
+  @Override
+  public void startReplicationManager() throws IOException {
+storageContainerLocationClient.startReplicationManager();
+  }
+
+  @Override
+  public void stopReplicationManager() throws IOException {
+storageContainerLocationClient.stopReplicationManager();
+  }
+
+  @Override
+  public boolean getReplicationManagerStatus() throws IOException {
+return storageContainerLocationClient.getReplicationManagerStatus();
+  }
+
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 85821ac..c2dd5f9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -203,4 +203,23 @@ public interface ScmClient extends Closeable {
* @throws IOException
*/
   boolean forceExitSafeMode() throws IOException;
+
+  /**
+   * Start ReplicationManager.
+   */
+  void startReplicationManager() throws IOException;
+
+  /**
+   * Stop ReplicationManager.
+   */
+  void stopReplicationManager() throws IOException;
+
+  /**
+   * Returns ReplicationManager status.
+   *
+   * @return True if ReplicationManager is running, false otherwise.
+   */
+  boolean getReplicationManagerStatus() throws IOException;
+
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index cc220a5..565ce47 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -177,4 +177,22 @@ public interface StorageContainerLocationProtocol extends 
Closeable {
* @throws IOException
*/
   boolean forceExitSafeMode() throws IOException;
+
+  /**
+   * Start ReplicationManager.
+   */
+  void startReplicationManager() throws IOException;
+
+  /**
+   * Stop ReplicationManager.
+   */
+  void stopReplicationManager() throws IOException;
+
+  /**
+   * Returns ReplicationManager status.
+   *
+   * @return True if ReplicationManager is running, false otherwise.
+   */
+  boolean getReplicationManagerStatus() throws IOException;
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB

[hadoop] branch trunk updated: HDDS-1488. Scm cli command to start/stop replication manager.

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 69b74e9  HDDS-1488. Scm cli command to start/stop replication manager.
69b74e9 is described below

commit 69b74e90167041f561bfcccf5a4e46ea208c467e
Author: Nanda kumar 
AuthorDate: Sat Aug 3 19:01:29 2019 +0530

HDDS-1488. Scm cli command to start/stop replication manager.

Signed-off-by: Anu Engineer 
---
 .../hdds/scm/client/ContainerOperationClient.java  | 17 ++
 .../apache/hadoop/hdds/scm/client/ScmClient.java   | 19 +++
 .../protocol/StorageContainerLocationProtocol.java | 18 +++
 ...inerLocationProtocolClientSideTranslatorPB.java | 39 ++
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |  5 +-
 ...inerLocationProtocolServerSideTranslatorPB.java | 46 +
 .../proto/StorageContainerLocationProtocol.proto   | 31 +++
 .../hdds/scm/container/ReplicationManager.java | 36 -
 .../hdds/scm/server/SCMClientProtocolServer.java   | 21 
 .../hdds/scm/container/TestReplicationManager.java | 16 ++
 .../hdds/scm/cli/ReplicationManagerCommands.java   | 54 +++
 .../scm/cli/ReplicationManagerStartSubcommand.java | 53 +++
 .../cli/ReplicationManagerStatusSubcommand.java| 60 ++
 .../scm/cli/ReplicationManagerStopSubcommand.java  | 55 
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java |  3 +-
 15 files changed, 457 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 3077f9f..e2856d7 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -459,4 +459,21 @@ public class ContainerOperationClient implements ScmClient 
{
   public boolean forceExitSafeMode() throws IOException {
 return storageContainerLocationClient.forceExitSafeMode();
   }
+
+  @Override
+  public void startReplicationManager() throws IOException {
+storageContainerLocationClient.startReplicationManager();
+  }
+
+  @Override
+  public void stopReplicationManager() throws IOException {
+storageContainerLocationClient.stopReplicationManager();
+  }
+
+  @Override
+  public boolean getReplicationManagerStatus() throws IOException {
+return storageContainerLocationClient.getReplicationManagerStatus();
+  }
+
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 85821ac..c2dd5f9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -203,4 +203,23 @@ public interface ScmClient extends Closeable {
* @throws IOException
*/
   boolean forceExitSafeMode() throws IOException;
+
+  /**
+   * Start ReplicationManager.
+   */
+  void startReplicationManager() throws IOException;
+
+  /**
+   * Stop ReplicationManager.
+   */
+  void stopReplicationManager() throws IOException;
+
+  /**
+   * Returns ReplicationManager status.
+   *
+   * @return True if ReplicationManager is running, false otherwise.
+   */
+  boolean getReplicationManagerStatus() throws IOException;
+
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index cc220a5..565ce47 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -177,4 +177,22 @@ public interface StorageContainerLocationProtocol extends 
Closeable {
* @throws IOException
*/
   boolean forceExitSafeMode() throws IOException;
+
+  /**
+   * Start ReplicationManager.
+   */
+  void startReplicationManager() throws IOException;
+
+  /**
+   * Stop ReplicationManager.
+   */
+  void stopReplicationManager() throws IOException;
+
+  /**
+   * Returns ReplicationManager status.
+   *
+   * @return True if ReplicationManager is running, false otherwise.
+   */
+  boolean getReplicationManagerStatus() throws IOException;
+
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache

[hadoop] branch trunk updated: HDDS-1886. Use ArrayList#clear to address audit failure scenario

2019-08-13 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 689a80d  HDDS-1886. Use ArrayList#clear to address audit failure 
scenario
689a80d is described below

commit 689a80d3ce310c3b617537550a529b9a1dc80f4b
Author: dchitlangia 
AuthorDate: Thu Aug 1 02:06:03 2019 -0400

HDDS-1886. Use ArrayList#clear to address audit failure scenario

Signed-off-by: Anu Engineer 
---
 .../test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index 77a6c0b..518ddae 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -153,7 +153,7 @@ public class TestOzoneAuditLogger {
 assertTrue(lines.size() != 0);
 assertTrue(expected.equalsIgnoreCase(lines.get(0)));
 //empty the file
-lines.remove(0);
+lines.clear();
 FileUtils.writeLines(file, lines, false);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1834. parent directories not found in secure setup due to ACL check. Contributed by Doroszlai, Attila.

2019-07-31 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new d3bf724  HDDS-1834. parent directories not found in secure setup due 
to ACL check. Contributed by Doroszlai, Attila.
d3bf724 is described below

commit d3bf7240a819704a11f4f1e31cc4b79449fd9aec
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Tue Jul 30 22:41:16 2019 +0200

HDDS-1834. parent directories not found in secure setup due to ACL check. 
Contributed by Doroszlai, Attila.

This closes #1171.

(cherry picked from commit e68d8446c42a883b9cd8a1fa47d870a47db37ad6)
---
 .../hadoop/ozone/security/acl/OzoneObjInfo.java|  9 
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 57 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 36 +-
 3 files changed, 91 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index a45a156..cbae18c 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.security.acl;
 
 import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -158,6 +159,14 @@ public final class OzoneObjInfo extends OzoneObj {
   return new Builder();
 }
 
+public static Builder fromKeyArgs(OmKeyArgs args) {
+  return new Builder()
+  .setVolumeName(args.getVolumeName())
+  .setBucketName(args.getBucketName())
+  .setKeyName(args.getKeyName())
+  .setResType(ResourceType.KEY);
+}
+
 public Builder setResType(OzoneObj.ResourceType res) {
   this.resType = res;
   return this;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 83eb78f..4c3dce9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -73,6 +74,7 @@ import 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.ozone.security.acl.RequestContext;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -395,6 +397,53 @@ public class TestKeyManagerImpl {
 }
   }
 
+  @Test
+  public void testCheckAccessForFileKey() throws Exception {
+OmKeyArgs keyArgs = createBuilder()
+.setKeyName("testdir/deep/NOTICE.txt")
+.build();
+OpenKeySession keySession = keyManager.createFile(keyArgs, false, true);
+keyArgs.setLocationInfoList(
+keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
+keyManager.commitKey(keyArgs, keySession.getId());
+
+OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
+.setStoreType(OzoneObj.StoreType.OZONE)
+.build();
+RequestContext context = currentUserReads();
+Assert.assertTrue(keyManager.checkAccess(fileKey, context));
+
+OzoneObj parentDirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs)
+.setStoreType(OzoneObj.StoreType.OZONE)
+.setKeyName("testdir")
+.build();
+Assert.assertTrue(keyManager.checkAccess(parentDirKey, context));
+  }
+
+  @Test
+  public void testCheckAccessForNonExistentKey() throws Exception {
+OmKeyArgs keyArgs = createBuilder()
+.setKeyName("testdir/deep/NO_SUCH_FILE.txt")
+.build();
+OzoneObj nonExistentKey = OzoneObjInfo.Builder.fromKey

  1   2   3   4   5   6   7   8   9   10   >