[hadoop] branch trunk updated: HDDS-1736. Cleanup 2phase old HA code for Key requests. (#1038)

2019-07-15 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 395cb3c  HDDS-1736. Cleanup 2phase old HA code for Key requests. 
(#1038)
395cb3c is described below

commit 395cb3cfd703320c96855325dadb37a19fbcfc54
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 15 21:51:59 2019 -0700

HDDS-1736. Cleanup 2phase old HA code for Key requests. (#1038)
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  1 -
 .../org/apache/hadoop/ozone/audit/OMAction.java|  2 -
 .../ozone/om/protocol/OzoneManagerHAProtocol.java  | 40 --
 .../src/main/proto/OzoneManagerProtocol.proto  |  7 --
 .../org/apache/hadoop/ozone/om/KeyManager.java | 32 
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 93 ++
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  9 ---
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 60 --
 .../ozone/om/ratis/OzoneManagerStateMachine.java   | 13 ---
 .../om/request/key/OMAllocateBlockRequest.java |  2 +-
 .../protocolPB/OzoneManagerRequestHandler.java | 34 +---
 11 files changed, 10 insertions(+), 283 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 3bd884e..74ec65d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -221,7 +221,6 @@ public final class OmUtils {
 case GetDelegationToken:
 case RenewDelegationToken:
 case CancelDelegationToken:
-case ApplyCreateKey:
 case ApplyInitiateMultiPartUpload:
 case CreateDirectory:
 case CreateFile:
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index 627880b..11abb21 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -24,9 +24,7 @@ public enum OMAction implements AuditAction {
 
   // WRITE Actions
   ALLOCATE_BLOCK,
-  ADD_ALLOCATE_BLOCK,
   ALLOCATE_KEY,
-  APPLY_ALLOCATE_KEY,
   COMMIT_KEY,
   CREATE_VOLUME,
   CREATE_BUCKET,
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
index ad2bc31..949d6e4 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
@@ -20,17 +20,10 @@ package org.apache.hadoop.ozone.om.protocol;
 
 import org.apache.hadoop.ozone.om.helpers.OmDeleteVolumeResponse;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeOwnerChangeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-.KeyLocation;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 .VolumeList;
 
 import java.io.IOException;
@@ -50,39 +43,6 @@ public interface OzoneManagerHAProtocol {
   long saveRatisSnapshot() throws IOException;
 
   /**
-   * Add a allocate block, it is assumed that the client is having an open
-   * key session going on. This block will be appended to this open key 
session.
-   * This will be called only during HA enabled OM, as during HA we get an
-   * allocated Block information, and add that information to OM DB.
-   *
-   * In HA the flow for allocateBlock is in StartTransaction allocateBlock
-   * will be called which returns block information, and in the
-   * applyTransaction addAllocateBlock will be called to add the block
-   * information to DB.
-   *
-   * @param args the key to append
-   * @param clientID the client identification
-   * @param keyLocation key location given by allocateBlock
-   * @return an allocated block
-   * @throws IOException
-   */
-  OmKeyLocationInfo addAllocatedBlock(OmKeyArgs args, long clientID,
-  KeyLocation keyLocation) throws IOException;
-
-  /**
-   * Add the openKey entry with given keyInfo and clientID in to openKeyTable.
-   * This will be called only from applyTransaction, once after calling
-   * applyKey in startTransaction.
-   

[hadoop] branch trunk updated: HDFS-14642. processMisReplicatedBlocks does not return correct processed count. Contributed by Stephen O'Donnell.

2019-07-15 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f77d54c  HDFS-14642. processMisReplicatedBlocks does not return 
correct processed count. Contributed by Stephen O'Donnell.
f77d54c is described below

commit f77d54c24343e6ca7c438d9db431cef14c3ae77b
Author: Ayush Saxena 
AuthorDate: Tue Jul 16 08:14:27 2019 +0530

HDFS-14642. processMisReplicatedBlocks does not return correct processed 
count. Contributed by Stephen O'Donnell.
---
 .../org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 1 +
 .../hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java   | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 858b82f..74e3853 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3642,6 +3642,7 @@ public class BlockManager implements BlockStatsMXBean {
   while (iter.hasNext() && processed < limit) {
 BlockInfo blk = iter.next();
 MisReplicationResult r = processMisReplicatedBlock(blk);
+processed++;
 LOG.debug("BLOCK* processMisReplicatedBlocks: " +
 "Re-scanned block {}, result is {}", blk, r);
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
index 85f8e54..1704367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
@@ -205,7 +205,7 @@ public class TestBlocksWithNotEnoughRacks {
   "/testFile:  Replica placement policy is violated"));
   assertTrue(fsckOp.contains(" Block should be additionally replicated" +
   " on 1 more rack(s). Total number of racks in the cluster: 2"));
-
+  assertTrue(fsckOp.contains(" Blocks queued for replication:\t1"));
   try {
 DFSTestUtil.waitForReplication(cluster, b, 2, replicationFactor, 0);
   } catch (TimeoutException e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1666. Issue in openKey when allocating block. (#943)

2019-07-15 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ef66e49  HDDS-1666. Issue in openKey when allocating block. (#943)
ef66e49 is described below

commit ef66e4999f3cd5f0ea2fa018359facb776bf892f
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 15 17:54:41 2019 -0700

HDDS-1666. Issue in openKey when allocating block. (#943)
---
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 25 --
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  4 ++--
 2 files changed, 25 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 0aa301a..0c5ce2b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -24,6 +24,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.BitSet;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -61,6 +62,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
@@ -76,6 +78,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -213,11 +216,29 @@ public class TestKeyManagerImpl {
 OmKeyArgs keyArgs = createBuilder()
 .setKeyName(KEY_NAME)
 .build();
-OpenKeySession keySession = keyManager1.openKey(keyArgs);
+
+// As now openKey will allocate at least one block, even if the size
+// passed is 0. So adding an entry to openKeyTable manually to test
+// allocateBlock failure.
+OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
+.setVolumeName(keyArgs.getVolumeName())
+.setBucketName(keyArgs.getBucketName())
+.setKeyName(keyArgs.getKeyName())
+.setOmKeyLocationInfos(Collections.singletonList(
+new OmKeyLocationInfoGroup(0, new ArrayList<>(
+.setCreationTime(Time.now())
+.setModificationTime(Time.now())
+.setDataSize(0)
+.setReplicationType(keyArgs.getType())
+.setReplicationFactor(keyArgs.getFactor())
+.setFileEncryptionInfo(null).build();
+metadataManager.getOpenKeyTable().put(
+metadataManager.getOpenKey(VOLUME_NAME, BUCKET_NAME, KEY_NAME, 1L),
+omKeyInfo);
 LambdaTestUtils.intercept(OMException.class,
 "SafeModePrecheck failed for allocateBlock", () -> {
   keyManager1
-  .allocateBlock(keyArgs, keySession.getId(), new ExcludeList());
+  .allocateBlock(keyArgs, 1L, new ExcludeList());
 });
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 9e29825..90f7e4a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -436,7 +436,7 @@ public class KeyManagerImpl implements KeyManager {
 // client should expect, in terms of current size of key. If client sets
 // a value, then this value is used, otherwise, we allocate a single
 // block which is the current size, if read by the client.
-final long size = args.getDataSize() >= 0 ?
+final long size = args.getDataSize() > 0 ?
 args.getDataSize() : scmBlockSize;
 final List locations = new ArrayList<>();
 
@@ -477,7 +477,7 @@ public class KeyManagerImpl implements KeyManager {
 openVersion = keyInfo.getLatestVersionLocations().getVersion();
 LOG.debug("Key {} allocated in volume {} bucket {}",
 keyName, volumeName, bucketName);
-allocateBlockInKey(keyInfo, args.getDataSize(), currentTime);
+allocateBlockInKey(keyInfo, size, currentTime);
 return new OpenKeySession(currentTime, keyInfo, openVersion);
   }
 


-
To unsubscribe, e-mail: com

[hadoop] branch trunk updated: HDDS-1761. Fix class hierarchy for KeyRequest and FileRequest classes. (#1052)

2019-07-15 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 585f4d5  HDDS-1761. Fix class hierarchy for KeyRequest and FileRequest 
classes. (#1052)
585f4d5 is described below

commit 585f4d5c6405c8f85c6ef11a43a14302c261be11
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 15 17:53:19 2019 -0700

HDDS-1761. Fix class hierarchy for KeyRequest and FileRequest classes. 
(#1052)
---
 .../om/request/file/OMDirectoryCreateRequest.java  |  10 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |  12 +-
 .../om/request/key/OMAllocateBlockRequest.java |   4 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   4 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   | 263 +--
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |   8 +-
 .../ozone/om/request/key/OMKeyRenameRequest.java   |  15 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 284 +++--
 8 files changed, 290 insertions(+), 310 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 41d99fe..72fdee0 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse;
@@ -73,8 +72,7 @@ import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryR
 /**
  * Handle create directory request.
  */
-public class OMDirectoryCreateRequest extends OMClientRequest
-implements OMKeyRequest {
+public class OMDirectoryCreateRequest extends OMKeyRequest {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(OMDirectoryCreateRequest.class);
@@ -214,8 +212,8 @@ public class OMDirectoryCreateRequest extends 
OMClientRequest
   OmBucketInfo omBucketInfo, String volumeName, String bucketName,
   String keyName, KeyArgs keyArgs)
   throws IOException {
-FileEncryptionInfo encryptionInfo = getFileEncryptionInfo(ozoneManager,
-omBucketInfo);
+Optional encryptionInfo =
+getFileEncryptionInfo(ozoneManager, omBucketInfo);
 String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
 
 return new OmKeyInfo.Builder()
@@ -229,7 +227,7 @@ public class OMDirectoryCreateRequest extends 
OMClientRequest
 .setDataSize(0)
 .setReplicationType(HddsProtos.ReplicationType.RATIS)
 .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-.setFileEncryptionInfo(encryptionInfo)
+.setFileEncryptionInfo(encryptionInfo.orNull())
 .setAcls(keyArgs.getAclsList())
 .build();
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index b4e572f..1c89015 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.stream.Collectors;
 import javax.annotation.Nonnull;
 
+import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,7 +44,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -72,8 +72,7 @@ import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryR
 /**
  * Handles create file request.
  */
-public class OMFileCreateRequest extends OMKeyCreateRequest
-implements OMKeyRequest {
+

[hadoop] branch HDDS-1802 deleted (was 73d58e9)

2019-07-15 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a change to branch HDDS-1802
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 was 73d58e9  HDDS-1802. Add Eviction policy for table cache.

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: HDDS-1802. Add Eviction policy for table cache.

2019-07-15 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch HDDS-1802
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 73d58e9cd33917313b5c1801e6b0a851fcd57b63
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 15 17:20:01 2019 -0700

HDDS-1802. Add Eviction policy for table cache.
---
 .../java/org/apache/hadoop/utils/db/DBStore.java   | 14 +++-
 .../java/org/apache/hadoop/utils/db/RDBStore.java  |  9 +++
 .../org/apache/hadoop/utils/db/TypedTable.java | 75 +++---
 ...{PartialTableCache.java => TableCacheImpl.java} | 31 +++--
 ...tialTableCache.java => TestTableCacheImpl.java} | 71 ++--
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |  7 +-
 6 files changed, 168 insertions(+), 39 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
index d01dfe4..95e57d9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.utils.db.cache.TableCacheImpl;
 
 /**
  * The DBStore interface provides the ability to create Tables, which store
@@ -47,7 +48,9 @@ public interface DBStore extends AutoCloseable {
 
 
   /**
-   * Gets an existing TableStore with implicit key/value conversion.
+   * Gets an existing TableStore with implicit key/value conversion and
+   * with default cleanup policy for cache. Default policy is cache cleanup
+   * after flush to DB is completed.
*
* @param name - Name of the TableStore to get
* @param keyType
@@ -59,6 +62,15 @@ public interface DBStore extends AutoCloseable {
   Class keyType, Class valueType) throws IOException;
 
   /**
+   * Gets an existing TableStore with implicit key/value conversion and
+   * with specified cleanup policy for cache.
+   * @throws IOException
+   */
+   Table getTable(String name,
+  Class keyType, Class valueType,
+  TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException;
+
+  /**
* Lists the Known list of Tables in a DB.
*
* @return List of Tables, in case of Rocks DB and LevelDB we will return at
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
index 27862c7..23c03f1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.utils.RocksDBStoreMBean;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.utils.db.cache.TableCacheImpl;
 import 
org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
@@ -261,6 +262,14 @@ public class RDBStore implements DBStore {
   }
 
   @Override
+  public  Table getTable(String name,
+  Class keyType, Class valueType,
+  TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException {
+return new TypedTable(getTable(name), codecRegistry, keyType,
+valueType, cleanupPolicy);
+  }
+
+  @Override
   public ArrayList listTables() throws IOException {
 ArrayList returnList = new ArrayList<>();
 for (ColumnFamilyHandle handle : handleTable.values()) {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java
index 05f73b8..3af498c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/TypedTable.java
@@ -23,9 +23,10 @@ import java.util.Iterator;
 import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
 import org.apache.hadoop.utils.db.cache.CacheKey;
 import org.apache.hadoop.utils.db.cache.CacheValue;
-import org.apache.hadoop.utils.db.cache.PartialTableCache;
+import org.apache.hadoop.utils.db.cache.TableCacheImpl;
 import org.apache.hadoop.utils.db.cache.TableCache;
 
 /**
@@ -49,7 +50,17 @@ public class TypedTable implements Table {
 
   private final TableCache, CacheValue> cache;
 
+  private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy;
 
+
+  /**
+   * Create an TypedTable from the raw table.
+   * Default cleanup policy used for the table is cleanup after flush.
+   * @param rawTable
+   * @param codecRegistry
+   * @param keyType
+   * @param valueType
+   */
   public TypedTable(
   Table rawTable,
   CodecRegistry codecR

[hadoop] branch HDDS-1802 created (now 73d58e9)

2019-07-15 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a change to branch HDDS-1802
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 73d58e9  HDDS-1802. Add Eviction policy for table cache.

This branch includes the following new commits:

 new 73d58e9  HDDS-1802. Add Eviction policy for table cache.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (3c41bc7 -> 336b18f)

2019-07-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 3c41bc7  HDDS-1800. Result of author check is inverted
 new ae30bb8  Merge pull request #1058 from elek/HDDS-1763
 new 336b18f  Merge pull request #944 from elek/HDDS-1668

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 hadoop-hdds/docs/static/ozone-usage.png| Bin 108376 -> 104961 bytes
 .../dist/src/main/k8s/definitions/ozone/om-ss.yaml |  13 +-
 .../src/main/k8s/definitions/ozone/s3g-ss.yaml |   5 
 .../src/main/k8s/definitions/ozone/scm-ss.yaml |   4 +++
 .../examples/minikube/datanode-statefulset.yaml|   6 ++---
 .../main/k8s/examples/minikube/om-statefulset.yaml |  26 ++-
 .../k8s/examples/minikube/s3g-statefulset.yaml |  11 +---
 .../k8s/examples/minikube/scm-statefulset.yaml |  16 +++-
 .../src/main/k8s/examples/ozone-csi/Flekszible |   1 +
 .../k8s/examples/ozone-csi/datanode-daemonset.yaml |  10 +---
 .../{ozone => ozone-csi}/datanode-service.yaml |   0
 .../datanode-statefulset.yaml  |   6 ++---
 .../k8s/examples/ozone-csi/om-statefulset.yaml |  28 -
 .../k8s/examples/ozone-csi/s3g-statefulset.yaml|  12 ++---
 .../k8s/examples/ozone-csi/scm-statefulset.yaml|  18 -
 .../examples/ozone-dev/datanode-statefulset.yaml   |   6 ++---
 .../k8s/examples/ozone-dev/om-statefulset.yaml |  28 +++--
 .../k8s/examples/ozone-dev/s3g-statefulset.yaml|  11 +---
 .../k8s/examples/ozone-dev/scm-statefulset.yaml|  16 +++-
 .../k8s/examples/ozone/datanode-statefulset.yaml   |   6 ++---
 .../main/k8s/examples/ozone/om-statefulset.yaml|  26 ++-
 .../main/k8s/examples/ozone/s3g-statefulset.yaml   |  11 +---
 .../main/k8s/examples/ozone/scm-statefulset.yaml   |  16 +++-
 23 files changed, 144 insertions(+), 132 deletions(-)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => 
ozone-csi}/datanode-service.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{getting-started => 
ozone-csi}/datanode-statefulset.yaml (100%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: Merge pull request #944 from elek/HDDS-1668

2019-07-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 336b18f2663fefebd7459441741adef87fe3f138
Author: Anu Engineer 
AuthorDate: Mon Jul 15 13:19:32 2019 -0700

Merge pull request #944 from elek/HDDS-1668

HDDS-1668. Add liveness probe to the example k8s resources files

(cherry picked from commit 141151325b3b14b84d66efba53c83e480a875333)
---
 .../dist/src/main/k8s/definitions/ozone/om-ss.yaml | 13 +-
 .../src/main/k8s/definitions/ozone/s3g-ss.yaml |  5 
 .../src/main/k8s/definitions/ozone/scm-ss.yaml |  4 
 .../examples/minikube/datanode-statefulset.yaml|  6 ++---
 .../main/k8s/examples/minikube/om-statefulset.yaml | 26 +++-
 .../k8s/examples/minikube/s3g-statefulset.yaml | 11 ++---
 .../k8s/examples/minikube/scm-statefulset.yaml | 16 -
 .../src/main/k8s/examples/ozone-csi/Flekszible |  1 +
 .../k8s/examples/ozone-csi/datanode-daemonset.yaml | 10 +---
 .../{Flekszible => datanode-service.yaml}  | 21 +---
 .../datanode-statefulset.yaml  |  6 ++---
 .../k8s/examples/ozone-csi/om-statefulset.yaml | 28 --
 .../k8s/examples/ozone-csi/s3g-statefulset.yaml| 12 ++
 .../k8s/examples/ozone-csi/scm-statefulset.yaml| 18 +-
 .../examples/ozone-dev/datanode-statefulset.yaml   |  6 ++---
 .../k8s/examples/ozone-dev/om-statefulset.yaml | 28 +++---
 .../k8s/examples/ozone-dev/s3g-statefulset.yaml| 11 ++---
 .../k8s/examples/ozone-dev/scm-statefulset.yaml| 16 -
 .../k8s/examples/ozone/datanode-statefulset.yaml   |  6 ++---
 .../main/k8s/examples/ozone/om-statefulset.yaml| 26 +++-
 .../main/k8s/examples/ozone/s3g-statefulset.yaml   | 11 ++---
 .../main/k8s/examples/ozone/scm-statefulset.yaml   | 16 -
 22 files changed, 156 insertions(+), 141 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
index 5c2f7cc..befc21e 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
@@ -38,13 +38,6 @@ spec:
 spec:
   securityContext:
 fsGroup: 1000
-  initContainers:
-  - name: init
-image: "@docker.image@"
-args: ["ozone","om","--init"]
-env:
-- name: WAITFOR
-  value: scm-0.scm:9876
   containers:
   - name: om
 image: "@docker.image@"
@@ -52,4 +45,10 @@ spec:
 env:
 - name: WAITFOR
   value: scm-0.scm:9876
+- name: ENSURE_OM_INITIALIZED
+  value: /data/metadata/om/current/VERSION
+livenessProbe:
+  tcpSocket:
+port: 9862
+  initialDelaySeconds: 30
   volumes: []
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
index 43044c9..fc8ff9a 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
@@ -36,3 +36,8 @@ spec:
   - name: s3g
 image: "@docker.image@"
 args: ["ozone","s3g"]
+livenessProbe:
+  httpGet:
+path: /
+port: 9878
+  initialDelaySeconds: 30
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
index b3c36b7..d386afc 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
@@ -46,3 +46,7 @@ spec:
   - name: scm
 image: "@docker.image@"
 args: ["ozone","scm"]
+livenessProbe:
+  tcpSocket:
+port: 9861
+  initialDelaySeconds: 30
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
index 6c8d1bf..452e258 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
@@ -55,12 +55,12 @@ spec:
 args:
 - ozone
 - datanode
-envFrom:
-- configMapRef:
-name: config
 volumeMounts:
 - name: data
   mountPath: /data
+envFrom:
+- configMapRef:
+name: config
   volumes:
   - name: data
 emptyDir: {}
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml
index c8ff81b..172df34 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples

[hadoop] 01/02: Merge pull request #1058 from elek/HDDS-1763

2019-07-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ae30bb8b5d262d2989911baf4e008ebc82867323
Author: Anu Engineer 
AuthorDate: Mon Jul 15 13:17:21 2019 -0700

Merge pull request #1058 from elek/HDDS-1763

HDDS-1763. Use vendor neutral s3 logo in ozone doc

(cherry picked from commit 47345e598eed517c9a03f72b80c35cf25ae55dd7)
---
 hadoop-hdds/docs/static/ozone-usage.png | Bin 108376 -> 104961 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/hadoop-hdds/docs/static/ozone-usage.png 
b/hadoop-hdds/docs/static/ozone-usage.png
index 538227d..adcbdcf 100644
Binary files a/hadoop-hdds/docs/static/ozone-usage.png and 
b/hadoop-hdds/docs/static/ozone-usage.png differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1668. Add liveness probe to the example k8s resources files

2019-07-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d3a5abe  HDDS-1668. Add liveness probe to the example k8s resources 
files
 new 1411513  Merge pull request #944 from elek/HDDS-1668
d3a5abe is described below

commit d3a5abec71f2ddefc4fe9c8cd9bb0aaad8b5db80
Author: Márton Elek 
AuthorDate: Sat Jun 29 01:18:28 2019 +0200

HDDS-1668. Add liveness probe to the example k8s resources files
---
 .../dist/src/main/k8s/definitions/ozone/om-ss.yaml | 13 +-
 .../src/main/k8s/definitions/ozone/s3g-ss.yaml |  5 
 .../src/main/k8s/definitions/ozone/scm-ss.yaml |  4 
 .../examples/minikube/datanode-statefulset.yaml|  6 ++---
 .../main/k8s/examples/minikube/om-statefulset.yaml | 26 +++-
 .../k8s/examples/minikube/s3g-statefulset.yaml | 11 ++---
 .../k8s/examples/minikube/scm-statefulset.yaml | 16 -
 .../src/main/k8s/examples/ozone-csi/Flekszible |  1 +
 .../k8s/examples/ozone-csi/datanode-daemonset.yaml | 10 +---
 .../{Flekszible => datanode-service.yaml}  | 21 +---
 .../datanode-statefulset.yaml  |  6 ++---
 .../k8s/examples/ozone-csi/om-statefulset.yaml | 28 --
 .../k8s/examples/ozone-csi/s3g-statefulset.yaml| 12 ++
 .../k8s/examples/ozone-csi/scm-statefulset.yaml| 18 +-
 .../examples/ozone-dev/datanode-statefulset.yaml   |  6 ++---
 .../k8s/examples/ozone-dev/om-statefulset.yaml | 28 +++---
 .../k8s/examples/ozone-dev/s3g-statefulset.yaml| 11 ++---
 .../k8s/examples/ozone-dev/scm-statefulset.yaml| 16 -
 .../k8s/examples/ozone/datanode-statefulset.yaml   |  6 ++---
 .../main/k8s/examples/ozone/om-statefulset.yaml| 26 +++-
 .../main/k8s/examples/ozone/s3g-statefulset.yaml   | 11 ++---
 .../main/k8s/examples/ozone/scm-statefulset.yaml   | 16 -
 22 files changed, 156 insertions(+), 141 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
index 5c2f7cc..befc21e 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml
@@ -38,13 +38,6 @@ spec:
 spec:
   securityContext:
 fsGroup: 1000
-  initContainers:
-  - name: init
-image: "@docker.image@"
-args: ["ozone","om","--init"]
-env:
-- name: WAITFOR
-  value: scm-0.scm:9876
   containers:
   - name: om
 image: "@docker.image@"
@@ -52,4 +45,10 @@ spec:
 env:
 - name: WAITFOR
   value: scm-0.scm:9876
+- name: ENSURE_OM_INITIALIZED
+  value: /data/metadata/om/current/VERSION
+livenessProbe:
+  tcpSocket:
+port: 9862
+  initialDelaySeconds: 30
   volumes: []
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
index 43044c9..fc8ff9a 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml
@@ -36,3 +36,8 @@ spec:
   - name: s3g
 image: "@docker.image@"
 args: ["ozone","s3g"]
+livenessProbe:
+  httpGet:
+path: /
+port: 9878
+  initialDelaySeconds: 30
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
index b3c36b7..d386afc 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml
@@ -46,3 +46,7 @@ spec:
   - name: scm
 image: "@docker.image@"
 args: ["ozone","scm"]
+livenessProbe:
+  tcpSocket:
+port: 9861
+  initialDelaySeconds: 30
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
index 6c8d1bf..452e258 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml
@@ -55,12 +55,12 @@ spec:
 args:
 - ozone
 - datanode
-envFrom:
-- configMapRef:
-name: config
 volumeMounts:
 - name: data
   mountPath: /data
+envFrom:
+- configMapRef:
+name: config
   volumes:
   - name: data
 emptyDir: {}
diff --git 
a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml 
b/hadoop-ozone/dist/src/main/k8s/example

[hadoop] branch trunk updated: HDDS-1763. Use vendor neutral s3 logo in ozone doc. Contributed by Elek, Marton.

2019-07-15 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 68f53f9  HDDS-1763. Use vendor neutral s3 logo in ozone doc. 
Contributed by Elek, Marton.
 new 47345e5  Merge pull request #1058 from elek/HDDS-1763
68f53f9 is described below

commit 68f53f95add1cae7c4f63ffc5deeab05cbc847a0
Author: Márton Elek 
AuthorDate: Thu Jul 4 17:14:19 2019 +0200

HDDS-1763. Use vendor neutral s3 logo in ozone doc. Contributed by Elek, 
Marton.
---
 hadoop-hdds/docs/static/ozone-usage.png | Bin 108376 -> 104961 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/hadoop-hdds/docs/static/ozone-usage.png 
b/hadoop-hdds/docs/static/ozone-usage.png
index 538227d..adcbdcf 100644
Binary files a/hadoop-hdds/docs/static/ozone-usage.png and 
b/hadoop-hdds/docs/static/ozone-usage.png differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14593. RBF: Implement deletion feature for expired records in State Store. Contributed by Takanobu Asanuma.

2019-07-15 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 64d4abf  HDFS-14593. RBF: Implement deletion feature for expired 
records in State Store. Contributed by  Takanobu Asanuma.
64d4abf is described below

commit 64d4abf489a0267a265591026f8e6c84bc78591e
Author: Ayush Saxena 
AuthorDate: Mon Jul 15 22:38:00 2019 +0530

HDFS-14593. RBF: Implement deletion feature for expired records in State 
Store. Contributed by  Takanobu Asanuma.
---
 .../server/federation/router/RBFConfigKeys.java|   8 ++
 .../server/federation/store/CachedRecordStore.java |  22 -
 .../server/federation/store/StateStoreService.java |  16 +++-
 .../federation/store/records/BaseRecord.java   |  46 +-
 .../federation/store/records/MembershipState.java  |  21 +
 .../federation/store/records/QueryResult.java  |   3 +-
 .../federation/store/records/RouterState.java  |  17 
 .../records/impl/pb/MembershipStatePBImpl.java |   4 +-
 .../store/records/impl/pb/RouterStatePBImpl.java   |   4 +-
 .../src/main/resources/hdfs-rbf-default.xml|  20 +
 .../store/TestStateStoreMembershipState.java   | 100 ++---
 .../store/TestStateStoreRouterState.java   |  61 ++---
 .../store/driver/TestStateStoreDriverBase.java |   5 +-
 13 files changed, 289 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 1daebdc..a2bec12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -201,10 +201,18 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   FEDERATION_STORE_PREFIX + "membership.expiration";
   public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT =
   TimeUnit.MINUTES.toMillis(5);
+  public static final String FEDERATION_STORE_MEMBERSHIP_EXPIRATION_DELETION_MS
+  = FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS + ".deletion";
+  public static final long
+  FEDERATION_STORE_MEMBERSHIP_EXPIRATION_DELETION_MS_DEFAULT = -1;
   public static final String FEDERATION_STORE_ROUTER_EXPIRATION_MS =
   FEDERATION_STORE_PREFIX + "router.expiration";
   public static final long FEDERATION_STORE_ROUTER_EXPIRATION_MS_DEFAULT =
   TimeUnit.MINUTES.toMillis(5);
+  public static final String FEDERATION_STORE_ROUTER_EXPIRATION_DELETION_MS =
+  FEDERATION_STORE_ROUTER_EXPIRATION_MS + ".deletion";
+  public static final long
+  FEDERATION_STORE_ROUTER_EXPIRATION_DELETION_MS_DEFAULT = -1;
 
   // HDFS Router safe mode
   public static final String DFS_ROUTER_SAFEMODE_ENABLE =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
index 5cfb521..7b28c03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.federation.store;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.locks.Lock;
@@ -164,13 +163,15 @@ public abstract class CachedRecordStore
 
   /**
* Updates the state store with any record overrides we detected, such as an
-   * expired state.
+   * expired state. If an expired record exists beyond deletion time, it is
+   * removed.
*
* @param query RecordQueryResult containing the data to be inspected.
* @throws IOException If the values cannot be updated.
*/
   public void overrideExpiredRecords(QueryResult query) throws IOException {
 List commitRecords = new ArrayList<>();
+List deleteRecords = new ArrayList<>();
 List newRecords = query.getRecords();
 long currentDriverTime = query.getTimestamp();
 if (newRecords == null || currentDriverTime <= 0) {
@@ -178,7 +179,16 @@ public abstract class CachedRecordStore
   return;
 }
 for (R record : newRecords) {
-  if (record.checkExpired(currentDriverTime)) {
+  if (record.shouldBeDeleted(currentDriverTime)) {
+String recordName = StateStoreUtils.getRecordName(record.getClass());
+if (get

[hadoop] branch ozone-0.4.1 updated (eaec8e2 -> 3c41bc7)

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from eaec8e2  Revert "HDDS-1735. Create separate unit and integration test 
executor dev-support script"
 new 97897b6  HDDS-1735. Create separate unit and integration test executor 
dev-support script
 new 3c41bc7  HDDS-1800. Result of author check is inverted

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 hadoop-ozone/dev-support/checks/acceptance.sh   |  6 +-
 hadoop-ozone/dev-support/checks/author.sh   | 15 ++-
 hadoop-ozone/dev-support/checks/build.sh|  5 -
 hadoop-ozone/dev-support/checks/checkstyle.sh   | 10 --
 hadoop-ozone/dev-support/checks/findbugs.sh | 12 +++-
 .../dev-support/checks/{unit.sh => integration.sh}  | 12 
 hadoop-ozone/dev-support/checks/isolation.sh|  7 +--
 hadoop-ozone/dev-support/checks/rat.sh  | 17 +
 .../dev-support/checks/{acceptance.sh => shellcheck.sh} | 15 ---
 hadoop-ozone/dev-support/checks/unit.sh |  8 
 10 files changed, 76 insertions(+), 31 deletions(-)
 copy hadoop-ozone/dev-support/checks/{unit.sh => integration.sh} (72%)
 copy hadoop-ozone/dev-support/checks/{acceptance.sh => shellcheck.sh} (64%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDDS-1735. Create separate unit and integration test executor dev-support script

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 97897b6ab747e973fd05feac70e4c89bce3daa07
Author: Márton Elek 
AuthorDate: Sat Jun 29 01:59:44 2019 +0200

HDDS-1735. Create separate unit and integration test executor dev-support 
script

(cherry picked from commit 62a057b8d647730498ea9a04d57f18b4520d09cf)
---
 hadoop-ozone/dev-support/checks/acceptance.sh   |  6 +-
 hadoop-ozone/dev-support/checks/author.sh   | 14 ++
 hadoop-ozone/dev-support/checks/build.sh|  5 -
 hadoop-ozone/dev-support/checks/checkstyle.sh   | 10 --
 hadoop-ozone/dev-support/checks/findbugs.sh | 12 +++-
 .../dev-support/checks/{unit.sh => integration.sh}  | 12 
 hadoop-ozone/dev-support/checks/isolation.sh|  7 +--
 hadoop-ozone/dev-support/checks/rat.sh  | 17 +
 .../dev-support/checks/{acceptance.sh => shellcheck.sh} | 15 ---
 hadoop-ozone/dev-support/checks/unit.sh |  8 
 10 files changed, 76 insertions(+), 30 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh 
b/hadoop-ozone/dev-support/checks/acceptance.sh
index 8de920f..4a50e08 100755
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -14,6 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "$DIR/../../.." || exit 1
+
 export HADOOP_VERSION=3
-"$DIR/../../../hadoop-ozone/dist/target/ozone-*-SNAPSHOT/compose/test-all.sh"
+OZONE_VERSION=$(grep "" "$DIR/../../pom.xml" | sed 
's/<[^>]*>//g'|  sed 's/^[ \t]*//')
+cd "$DIR/../../dist/target/ozone-$OZONE_VERSION/compose" || exit 1
+./test-all.sh
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/author.sh 
b/hadoop-ozone/dev-support/checks/author.sh
index 43caa70..d5a469c 100755
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -13,10 +13,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-mkdir -p ./target
-grep -r --include="*.java" "@author" .
-if [ $? -gt 0 ]; then
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "$DIR/../../.." || exit 1
+
+#hide this tring to not confuse yetus
+AUTHOR="uthor"
+AUTHOR="@a${AUTHOR}"
+
+grep -r --include="*.java" "$AUTHOR" .
+if grep -r --include="*.java" "$AUTHOR" .; then
   exit 0
 else
-  exit -1
+  exit 1
 fi
diff --git a/hadoop-ozone/dev-support/checks/build.sh 
b/hadoop-ozone/dev-support/checks/build.sh
index 6a7811e..1197330 100755
--- a/hadoop-ozone/dev-support/checks/build.sh
+++ b/hadoop-ozone/dev-support/checks/build.sh
@@ -13,6 +13,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "$DIR/../../.." || exit 1
+
 export MAVEN_OPTS="-Xmx4096m"
-mvn -am -pl :hadoop-ozone-dist -P hdds -Dmaven.javadoc.skip=true -DskipTests 
clean install
+mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh 
b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 0d80fbc..c4de528 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -13,11 +13,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-mvn -fn checkstyle:check -am -pl :hadoop-ozone-dist -Phdds
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd "$DIR/../../.." || exit 1
+
+mvn -B -fn checkstyle:check -f pom.ozone.xml
+
+#Print out the exact violations with parsing XML results with sed
+find "." -name checkstyle-errors.xml -print0  | xargs -0 sed  '$!N; 
//d'
 
 violations=$(grep -r error --include checkstyle-errors.xml .| wc -l)
 if [[ $violations -gt 0 ]]; then
 echo "There are $violations checkstyle violations"
-exit -1
+exit 1
 fi
 exit 0
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh 
b/hadoop-ozone/dev-support/checks/findbugs.sh
index 1328492..545ad9f 100755
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -13,6 +13,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+DIR="$( 

[hadoop] 02/02: HDDS-1800. Result of author check is inverted

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3c41bc7f918e8e850966ef65a68cb7caa3dc009b
Author: Doroszlai, Attila 
AuthorDate: Mon Jul 15 18:00:10 2019 +0200

HDDS-1800. Result of author check is inverted

Closes #1092
---
 hadoop-ozone/dev-support/checks/author.sh | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/author.sh 
b/hadoop-ozone/dev-support/checks/author.sh
index d5a469c..f50a396 100755
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -20,9 +20,8 @@ cd "$DIR/../../.." || exit 1
 AUTHOR="uthor"
 AUTHOR="@a${AUTHOR}"
 
-grep -r --include="*.java" "$AUTHOR" .
 if grep -r --include="*.java" "$AUTHOR" .; then
-  exit 0
-else
   exit 1
+else
+  exit 0
 fi


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: Revert "HDDS-1735. Create separate unit and integration test executor dev-support script"

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new eaec8e2  Revert "HDDS-1735. Create separate unit and integration test 
executor dev-support script"
eaec8e2 is described below

commit eaec8e20d94c5c06fd87804458f0ed65a40ae7e5
Author: Márton Elek 
AuthorDate: Mon Jul 15 18:13:20 2019 +0200

Revert "HDDS-1735. Create separate unit and integration test executor 
dev-support script"

This reverts commit e8ea4dda04abbf02d426f5df882f1ac89b89f05b.
---
 hadoop-ozone/dev-support/checks/acceptance.sh  |  3 +--
 hadoop-ozone/dev-support/checks/author.sh  |  1 +
 hadoop-ozone/dev-support/checks/build.sh   |  2 +-
 hadoop-ozone/dev-support/checks/checkstyle.sh  |  5 +
 hadoop-ozone/dev-support/checks/findbugs.sh|  2 +-
 hadoop-ozone/dev-support/checks/integration.sh | 25 -
 hadoop-ozone/dev-support/checks/rat.sh |  5 +
 hadoop-ozone/dev-support/checks/unit.sh|  2 +-
 8 files changed, 7 insertions(+), 38 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh 
b/hadoop-ozone/dev-support/checks/acceptance.sh
index 258c4e2..8de920f 100755
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -15,6 +15,5 @@
 # limitations under the License.
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 export HADOOP_VERSION=3
-OZONE_VERSION=$(cat $DIR/../../pom.xml  | grep "" | sed 
's/<[^>]*>//g'|  sed 's/^[ \t]*//')
-"$DIR/../../dist/target/ozone-$OZONE_VERSION/compose/test-all.sh"
+"$DIR/../../../hadoop-ozone/dist/target/ozone-*-SNAPSHOT/compose/test-all.sh"
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/author.sh 
b/hadoop-ozone/dev-support/checks/author.sh
index 56d15a5..43caa70 100755
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -13,6 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+mkdir -p ./target
 grep -r --include="*.java" "@author" .
 if [ $? -gt 0 ]; then
   exit 0
diff --git a/hadoop-ozone/dev-support/checks/build.sh 
b/hadoop-ozone/dev-support/checks/build.sh
index 71bf778..6a7811e 100755
--- a/hadoop-ozone/dev-support/checks/build.sh
+++ b/hadoop-ozone/dev-support/checks/build.sh
@@ -14,5 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 export MAVEN_OPTS="-Xmx4096m"
-mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install
+mvn -am -pl :hadoop-ozone-dist -P hdds -Dmaven.javadoc.skip=true -DskipTests 
clean install
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh 
b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 323cbc8..0d80fbc 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -13,10 +13,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-mvn -B -fn checkstyle:check -f pom.ozone.xml
-
-#Print out the exact violations with parsing XML results with sed
-find -name checkstyle-errors.xml | xargs sed  '$!N; //d'
+mvn -fn checkstyle:check -am -pl :hadoop-ozone-dist -Phdds
 
 violations=$(grep -r error --include checkstyle-errors.xml .| wc -l)
 if [[ $violations -gt 0 ]]; then
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh 
b/hadoop-ozone/dev-support/checks/findbugs.sh
index c8bd40b..1328492 100755
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -20,7 +20,7 @@ mkdir -p ./target
 rm "$FINDBUGS_ALL_FILE" || true
 touch "$FINDBUGS_ALL_FILE"
 
-mvn -B compile -fn findbugs:check -Dfindbugs.failOnError=false  -f 
pom.ozone.xml
+mvn -fn findbugs:check -Dfindbugs.failOnError=false  -am -pl 
:hadoop-ozone-dist -Phdds
 
 find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a 
"${FINDBUGS_ALL_FILE}"
 find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a 
"${FINDBUGS_ALL_FILE}"
diff --git a/hadoop-ozone/dev-support/checks/integration.sh 
b/hadoop-ozone/dev-support/checks/integration.sh
deleted file mode 100755
index 8270d4f..000
--- a/hadoop-ozone/dev-support/checks/integration.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this f

[hadoop] branch trunk updated: HDDS-1800. Result of author check is inverted

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 61bbdee  HDDS-1800. Result of author check is inverted
61bbdee is described below

commit 61bbdeee193d8bdcbadbc2823a3e63aab0c83422
Author: Doroszlai, Attila 
AuthorDate: Mon Jul 15 18:00:10 2019 +0200

HDDS-1800. Result of author check is inverted

Closes #1092
---
 hadoop-ozone/dev-support/checks/author.sh | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/author.sh 
b/hadoop-ozone/dev-support/checks/author.sh
index d5a469c..f50a396 100755
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -20,9 +20,8 @@ cd "$DIR/../../.." || exit 1
 AUTHOR="uthor"
 AUTHOR="@a${AUTHOR}"
 
-grep -r --include="*.java" "$AUTHOR" .
 if grep -r --include="*.java" "$AUTHOR" .; then
-  exit 0
-else
   exit 1
+else
+  exit 0
 fi


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1735. Create separate unit and integration test executor dev-support script

2019-07-15 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new e8ea4dd  HDDS-1735. Create separate unit and integration test executor 
dev-support script
e8ea4dd is described below

commit e8ea4dda04abbf02d426f5df882f1ac89b89f05b
Author: Márton Elek 
AuthorDate: Sat Jun 29 01:59:44 2019 +0200

HDDS-1735. Create separate unit and integration test executor dev-support 
script

(cherry picked from commit 0bae9e8ec8b53a3b484eaa01a3fa3f177d56b3e4)
---
 hadoop-ozone/dev-support/checks/acceptance.sh   | 3 ++-
 hadoop-ozone/dev-support/checks/author.sh   | 1 -
 hadoop-ozone/dev-support/checks/build.sh| 2 +-
 hadoop-ozone/dev-support/checks/checkstyle.sh   | 5 -
 hadoop-ozone/dev-support/checks/findbugs.sh | 2 +-
 hadoop-ozone/dev-support/checks/{unit.sh => integration.sh} | 3 ++-
 hadoop-ozone/dev-support/checks/rat.sh  | 5 -
 hadoop-ozone/dev-support/checks/unit.sh | 2 +-
 8 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh 
b/hadoop-ozone/dev-support/checks/acceptance.sh
index 8de920f..258c4e2 100755
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -15,5 +15,6 @@
 # limitations under the License.
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 export HADOOP_VERSION=3
-"$DIR/../../../hadoop-ozone/dist/target/ozone-*-SNAPSHOT/compose/test-all.sh"
+OZONE_VERSION=$(cat $DIR/../../pom.xml  | grep "" | sed 
's/<[^>]*>//g'|  sed 's/^[ \t]*//')
+"$DIR/../../dist/target/ozone-$OZONE_VERSION/compose/test-all.sh"
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/author.sh 
b/hadoop-ozone/dev-support/checks/author.sh
index 43caa70..56d15a5 100755
--- a/hadoop-ozone/dev-support/checks/author.sh
+++ b/hadoop-ozone/dev-support/checks/author.sh
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-mkdir -p ./target
 grep -r --include="*.java" "@author" .
 if [ $? -gt 0 ]; then
   exit 0
diff --git a/hadoop-ozone/dev-support/checks/build.sh 
b/hadoop-ozone/dev-support/checks/build.sh
index 6a7811e..71bf778 100755
--- a/hadoop-ozone/dev-support/checks/build.sh
+++ b/hadoop-ozone/dev-support/checks/build.sh
@@ -14,5 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 export MAVEN_OPTS="-Xmx4096m"
-mvn -am -pl :hadoop-ozone-dist -P hdds -Dmaven.javadoc.skip=true -DskipTests 
clean install
+mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install
 exit $?
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh 
b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 0d80fbc..323cbc8 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -13,7 +13,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-mvn -fn checkstyle:check -am -pl :hadoop-ozone-dist -Phdds
+mvn -B -fn checkstyle:check -f pom.ozone.xml
+
+#Print out the exact violations with parsing XML results with sed
+find -name checkstyle-errors.xml | xargs sed  '$!N; //d'
 
 violations=$(grep -r error --include checkstyle-errors.xml .| wc -l)
 if [[ $violations -gt 0 ]]; then
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh 
b/hadoop-ozone/dev-support/checks/findbugs.sh
index 1328492..c8bd40b 100755
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -20,7 +20,7 @@ mkdir -p ./target
 rm "$FINDBUGS_ALL_FILE" || true
 touch "$FINDBUGS_ALL_FILE"
 
-mvn -fn findbugs:check -Dfindbugs.failOnError=false  -am -pl 
:hadoop-ozone-dist -Phdds
+mvn -B compile -fn findbugs:check -Dfindbugs.failOnError=false  -f 
pom.ozone.xml
 
 find hadoop-ozone -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a 
"${FINDBUGS_ALL_FILE}"
 find hadoop-hdds -name findbugsXml.xml | xargs -n1 convertXmlToText | tee -a 
"${FINDBUGS_ALL_FILE}"
diff --git a/hadoop-ozone/dev-support/checks/unit.sh 
b/hadoop-ozone/dev-support/checks/integration.sh
similarity index 88%
copy from hadoop-ozone/dev-support/checks/unit.sh
copy to hadoop-ozone/dev-support/checks/integration.sh
index d839f22..8270d4f 100755
--- a/hadoop-ozone/dev-support/checks/unit.sh
+++ b/hadoop-ozone/dev-support/checks/integration.sh
@@ -14,7 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 export MAVEN_OPTS="-Xmx4096m"
-mvn -fn test -am -

[hadoop] branch branch-3.2 updated: YARN-9326. Fair Scheduler configuration defaults are not documented in case of min and maxResources. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7c9cfc0  YARN-9326. Fair Scheduler configuration defaults are not 
documented in case of min and maxResources. Contributed by Adam Antal
7c9cfc0 is described below

commit 7c9cfc0996316c08f23a4fb3a53e10fd20521d7b
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 13:28:01 2019 +0200

YARN-9326. Fair Scheduler configuration defaults are not documented in case 
of min and maxResources. Contributed by Adam Antal

(cherry picked from commit 5446308360f57cb98c54c416231788ba9ae332f8)
---
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 5f9e779..991796a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,13 +86,19 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an 
optional attribute 'type', which when set to 'parent' makes it a parent queue. 
This is useful when we want to create a parent queue without configuring any 
leaf queues. Each queue element may contain the following properties:
 
-* **minResources**: minimum resources the queue is entitled to, in the 
form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is 
required when specifying resources other than memory and CPU. For the 
single-resource fairness policy, the vcores value is ignored. If a queue's 
minimum share is not satisfied, it will be offered available resources before 
any other queue under the same parent. Under the single-resource fairness 
policy, a queue is considered unsatisfied if its [...]
+* **minResources**: minimum resources the queue is entitled to. For the 
single-resource fairness policy, only the memory is used, other resources are 
ignored. If a queue's minimum share is not satisfied, it will be offered 
available resources before any other queue under the same parent. Under the 
single-resource fairness policy, a queue is considered unsatisfied if its 
memory usage is below its minimum memory share. Under dominant resource 
fairness, a queue is considered unsatisfied [...]
 
-* **maxResources**: maximum resources a queue will allocated, expressed in 
the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, 
memory-mb=Y". The last form is required when specifying resources other than 
memory and CPU. In the last form, X and Y can either be a percentage or an 
integer resource value without units. In the latter case the units will be 
inferred from the default units configured for that resource. A queue will not 
be assigned a container that would p [...]
+* **maxResources**: maximum resources a queue can be allocated. A queue 
will not be assigned a container that would put its aggregate usage over this 
limit. This limit is enforced recursively, the queue will not be assigned a 
container if that assignment would put the queue or its parent(s) over the 
maximum resources.
 
-* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container, expressed in the form of "X mb, Y vcores" or "vcores=X, 
memory-mb=Y". The latter form is required when specifying resources other than 
memory and CPU. If the property is not set it's value is inherited from a 
parent queue. It's default value is **yarn.scheduler.maximum-allocation-mb**. 
Cannot be higher than **maxResources**. This property is invalid for root queue.
+* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container. If the property is not set it's value is inherited from a 
parent queue. The default values are **yarn.scheduler.maximum-allocation-mb** 
and **yarn.scheduler.maximum-allocation-vcores**. Cannot be higher than 
**maxResources**. This property is invalid for root queue.
 
-* **maxChildResources**: maximum resources an ad hoc child queue will 
allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y 
vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying 
resources other than memory and CPU. In the last form, X and Y can either be a 
percentage or an integer resource value without units. In the latter case the 
units will be inferred from the default units configured for that resource. An 
ad hoc child queue will not be  [...]
+* **maxChildResources**: maximum resources an ad hoc child queue can be 
allocated. A child queue limi

[hadoop] branch trunk updated: YARN-9326. Fair Scheduler configuration defaults are not documented in case of min and maxResources. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5446308  YARN-9326. Fair Scheduler configuration defaults are not 
documented in case of min and maxResources. Contributed by Adam Antal
5446308 is described below

commit 5446308360f57cb98c54c416231788ba9ae332f8
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 13:28:01 2019 +0200

YARN-9326. Fair Scheduler configuration defaults are not documented in case 
of min and maxResources. Contributed by Adam Antal
---
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 5f9e779..991796a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,13 +86,19 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an 
optional attribute 'type', which when set to 'parent' makes it a parent queue. 
This is useful when we want to create a parent queue without configuring any 
leaf queues. Each queue element may contain the following properties:
 
-* **minResources**: minimum resources the queue is entitled to, in the 
form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is 
required when specifying resources other than memory and CPU. For the 
single-resource fairness policy, the vcores value is ignored. If a queue's 
minimum share is not satisfied, it will be offered available resources before 
any other queue under the same parent. Under the single-resource fairness 
policy, a queue is considered unsatisfied if its [...]
+* **minResources**: minimum resources the queue is entitled to. For the 
single-resource fairness policy, only the memory is used, other resources are 
ignored. If a queue's minimum share is not satisfied, it will be offered 
available resources before any other queue under the same parent. Under the 
single-resource fairness policy, a queue is considered unsatisfied if its 
memory usage is below its minimum memory share. Under dominant resource 
fairness, a queue is considered unsatisfied [...]
 
-* **maxResources**: maximum resources a queue will allocated, expressed in 
the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, 
memory-mb=Y". The last form is required when specifying resources other than 
memory and CPU. In the last form, X and Y can either be a percentage or an 
integer resource value without units. In the latter case the units will be 
inferred from the default units configured for that resource. A queue will not 
be assigned a container that would p [...]
+* **maxResources**: maximum resources a queue can be allocated. A queue 
will not be assigned a container that would put its aggregate usage over this 
limit. This limit is enforced recursively, the queue will not be assigned a 
container if that assignment would put the queue or its parent(s) over the 
maximum resources.
 
-* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container, expressed in the form of "X mb, Y vcores" or "vcores=X, 
memory-mb=Y". The latter form is required when specifying resources other than 
memory and CPU. If the property is not set it's value is inherited from a 
parent queue. It's default value is **yarn.scheduler.maximum-allocation-mb**. 
Cannot be higher than **maxResources**. This property is invalid for root queue.
+* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container. If the property is not set it's value is inherited from a 
parent queue. The default values are **yarn.scheduler.maximum-allocation-mb** 
and **yarn.scheduler.maximum-allocation-vcores**. Cannot be higher than 
**maxResources**. This property is invalid for root queue.
 
-* **maxChildResources**: maximum resources an ad hoc child queue will 
allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y 
vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying 
resources other than memory and CPU. In the last form, X and Y can either be a 
percentage or an integer resource value without units. In the latter case the 
units will be inferred from the default units configured for that resource. An 
ad hoc child queue will not be  [...]
+* **maxChildResources**: maximum resources an ad hoc child queue can be 
allocated. A child queue limit is enforced recursively and thus will not be 
assigned a container if that assignment 

[hadoop] 08/08: HDDS-1766. ContainerStateMachine is unable to increment lastAppliedTermIndex. Contributed by Mukul Kumar Singh. (#1072)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9b3c034695d55c1edd7b5874b6929781fe35318e
Author: Mukul Kumar Singh 
AuthorDate: Sun Jul 14 10:53:51 2019 +0530

HDDS-1766. ContainerStateMachine is unable to increment 
lastAppliedTermIndex. Contributed by  Mukul Kumar Singh. (#1072)

(cherry picked from commit 0976f6fc30ed8bb774d823f09c58cea54be05ae7)
---
 .../server/ratis/ContainerStateMachine.java| 40 --
 1 file changed, 21 insertions(+), 19 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index f4a8008..87826e6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.server.RaftServer;
@@ -195,17 +196,16 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 if (snapshot == null) {
   TermIndex empty =
   TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX);
-  LOG.info(
-  "The snapshot info is null." + "Setting the last applied index to:"
-  + empty);
+  LOG.info("{}: The snapshot info is null. Setting the last applied index" 
+
+  "to:{}", gid, empty);
   setLastAppliedTermIndex(empty);
-  return RaftLog.INVALID_LOG_INDEX;
+  return empty.getIndex();
 }
 
 final File snapshotFile = snapshot.getFile().getPath().toFile();
 final TermIndex last =
 SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile);
-LOG.info("Setting the last applied index to " + last);
+LOG.info("{}: Setting the last applied index to {}", gid, last);
 setLastAppliedTermIndex(last);
 
 // initialize the dispatcher with snapshot so that it build the missing
@@ -241,18 +241,20 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   @Override
   public long takeSnapshot() throws IOException {
 TermIndex ti = getLastAppliedTermIndex();
-LOG.info("Taking snapshot at termIndex:" + ti);
+long startTime = Time.monotonicNow();
 if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
   final File snapshotFile =
   storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
-  LOG.info("Taking a snapshot to file {}", snapshotFile);
+  LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile);
   try (FileOutputStream fos = new FileOutputStream(snapshotFile)) {
 persistContainerSet(fos);
   } catch (IOException ioe) {
-LOG.warn("Failed to write snapshot file \"" + snapshotFile
-+ "\", last applied index=" + ti);
+LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
+snapshotFile);
 throw ioe;
   }
+  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}",
+  gid, ti, snapshotFile, (Time.monotonicNow() - startTime));
   return ti.getIndex();
 }
 return -1;
@@ -326,7 +328,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 
   private ContainerCommandResponseProto dispatchCommand(
   ContainerCommandRequestProto requestProto, DispatcherContext context) {
-LOG.trace("dispatch {} containerID={} pipelineID={} traceID={}",
+LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid,
 requestProto.getCmdType(), requestProto.getContainerID(),
 requestProto.getPipelineID(), requestProto.getTraceID());
 if (isBlockTokenEnabled) {
@@ -344,7 +346,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 }
 ContainerCommandResponseProto response =
 dispatcher.dispatch(requestProto, context);
-LOG.trace("response {}", response);
+LOG.trace("{}: response {}", gid, response);
 return response;
   }
 
@@ -384,18 +386,18 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 .supplyAsync(() -> runCommand(requestProto, context), chunkExecutor);
 
 writeChunkFutureMap.put(entryIndex, writeChunkFuture);
-LOG.debug("writeChunk writeStateMachineData : blockId " + 
write.getBlockID()

[hadoop] 01/08: HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 758d756a8e08c8928df83093439ba9e434ac162b
Author: Vivek Ratnavel Subramanian 
AuthorDate: Mon Jul 8 21:06:50 2019 -0700

HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and 
containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.

(cherry picked from commit 82d88a8d30790c5841fc4f71ea39cc12b470c41f)
---
 .../org/apache/hadoop/ozone/common/Storage.java|   6 +-
 .../common/src/main/resources/ozone-default.xml|   6 +-
 .../hadoop/ozone/om/OzoneManagerStarter.java   |   2 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   2 +
 .../ozone/recon/schema/StatsSchemaDefinition.java  |  61 
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   8 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  22 ++
 .../ozone/recon/api/ContainerKeyService.java   |  22 +-
 .../ozone/recon/api/types/ContainersResponse.java  |  94 ++
 .../hadoop/ozone/recon/api/types/KeysResponse.java |  93 ++
 .../recon/spi/ContainerDBServiceProvider.java  |  58 +++-
 .../spi/impl/ContainerDBServiceProviderImpl.java   | 137 -
 .../recon/spi/impl/ReconContainerDBProvider.java   |   4 +
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  |  57 +++-
 .../recon/GuiceInjectorUtilsForTestsImpl.java} |  25 +-
 .../ozone/recon/api/TestContainerKeyService.java   | 186 +++-
 .../recon/persistence/AbstractSqlDatabaseTest.java |  12 +-
 .../persistence/TestStatsSchemaDefinition.java | 147 ++
 .../impl/TestContainerDBServiceProviderImpl.java   | 326 +
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  53 +---
 .../recon/tasks/TestContainerKeyMapperTask.java| 127 
 .../recon/types/GuiceInjectorUtilsForTests.java| 117 
 22 files changed, 1209 insertions(+), 356 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index f393ed9..7992dad 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -81,7 +81,7 @@ public abstract class Storage {
 
   /**
* Gets the path of the Storage dir.
-   * @return Stoarge dir path
+   * @return Storage dir path
*/
   public String getStorageDir() {
 return storageDir.getAbsoluteFile().toString();
@@ -117,7 +117,7 @@ public abstract class Storage {
   }
 
   /**
-   * Retreives the storageInfo instance to read/write the common
+   * Retrieves the storageInfo instance to read/write the common
* version file properties.
* @return the instance of the storageInfo class
*/
@@ -128,7 +128,7 @@ public abstract class Storage {
   abstract protected Properties getNodeProperties();
 
   /**
-   * Sets the Node properties spaecific to OM/SCM.
+   * Sets the Node properties specific to OM/SCM.
*/
   private void setNodeProperties() {
 Properties nodeProperties = getNodeProperties();
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index c10aa33..219bd29 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -659,7 +659,7 @@
 
 OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED
 
-  This setting is the fallback location for SCM, OM and DataNodes
+  This setting is the fallback location for SCM, OM, Recon and DataNodes
   to store their metadata. This setting may be used only in test/PoC
   clusters to simplify configuration.
 
@@ -2457,7 +2457,7 @@
 
 OZONE, RECON
 
-  Ozone Recon datbase password.
+  Ozone Recon database password.
 
   
   
@@ -2484,7 +2484,7 @@
 
   The max active connections to the SQL database. The default SQLite
   database only allows single active connection, set this to a
-  resonable value like 10, for external production database.
+  reasonable value like 10, for external production database.
 
   
   
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 8a0c317..fa229aa 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -60,7 +60,7 @@ public class OzoneManagerStarter extends GenericCli {
   public Void call() throws Exception {
 /**
  * This method is invoked only when a sub-command is not called. Therefore
- * if someone runs "ozone om" with

[hadoop] 03/08: HDDS-1611. Evaluate ACL on volume bucket key and prefix to authorize access. Contributed by Ajay Kumar. (#973)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 84cdacbb2aabd915dc708322d9978b631f58bf0a
Author: Ajay Yadav <7813154+ajay...@users.noreply.github.com>
AuthorDate: Wed Jul 10 11:03:58 2019 -0700

HDDS-1611. Evaluate ACL on volume bucket key and prefix to authorize 
access. Contributed by Ajay Kumar. (#973)

(cherry picked from commit cdb20adfcce22beb4f232f91822b190119d098ce)
---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   6 +
 .../hdfs/server/diskbalancer/TestDiskBalancer.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   1 +
 .../java/org/apache/hadoop/ozone/OzoneAcl.java |  46 +-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java |  80 +++-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  15 +-
 .../ozone/security/acl/IAccessAuthorizer.java  |  15 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  | 102 -
 .../src/main/proto/OzoneManagerProtocol.proto  |  13 +-
 .../org/apache/hadoop/ozone/TestOzoneAcls.java |   8 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 +
 .../src/main/compose/ozonesecure/docker-config |   5 +-
 .../dist/src/main/smoketest/__init__.robot |   2 +-
 .../src/main/smoketest/basic/ozone-shell.robot |  21 +-
 .../dist/src/main/smoketest/commonlib.robot|   5 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |   2 +-
 .../dist/src/main/smoketest/createmrenv.robot  |   2 +-
 hadoop-ozone/dist/src/main/smoketest/kinit.robot   |   2 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |  12 +-
 .../dist/src/main/smoketest/s3/awss3.robot |   2 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |  50 ++-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +
 .../client/rpc/TestOzoneRpcClientAbstract.java |   5 +-
 .../org/apache/hadoop/ozone/om/TestOmAcls.java |  12 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java |   6 +-
 .../apache/hadoop/ozone/om/TestOzoneManager.java   |  13 +-
 .../security/acl/TestOzoneNativeAuthorizer.java| 464 +
 .../apache/hadoop/ozone/web/client/TestVolume.java |   4 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  46 ++
 .../java/org/apache/hadoop/ozone/om/IOzoneAcl.java |  13 +
 .../org/apache/hadoop/ozone/om/KeyManager.java |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  98 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 141 +--
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  39 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java   |  24 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  66 ++-
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  | 120 ++
 .../hadoop/ozone/security/acl/package-info.java|  22 +
 .../web/ozShell/volume/ListVolumeHandler.java  |   2 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  |  19 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java|   2 +-
 43 files changed, 1315 insertions(+), 185 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 98b3b56..1c82a7a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -120,6 +120,10 @@ public final class OzoneConfigKeys {
* */
   public static final String OZONE_ADMINISTRATORS =
   "ozone.administrators";
+  /**
+   * Used only for testing purpose. Results in making every user an admin.
+   * */
+  public static final String OZONE_ADMINISTRATORS_WILDCARD = "*";
 
   public static final String OZONE_CLIENT_PROTOCOL =
   "ozone.client.protocol";
@@ -390,6 +394,8 @@ public final class OzoneConfigKeys {
   "ozone.acl.authorizer.class";
   public static final String OZONE_ACL_AUTHORIZER_CLASS_DEFAULT =
   "org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer";
+  public static final String OZONE_ACL_AUTHORIZER_CLASS_NATIVE =
+  "org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer";
   public static final String OZONE_ACL_ENABLED =
   "ozone.acl.enabled";
   public static final boolean OZONE_ACL_ENABLED_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index b400391..931bdb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestD

[hadoop] 04/08: HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and prefix to authorize access. Contributed by Ajay Kumar. (#973)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 70c42faca9bc9f2247f95affdc1ab133d76fb850
Author: Anu Engineer 
AuthorDate: Wed Jul 10 11:28:18 2019 -0700

HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and prefix to 
authorize access. Contributed by Ajay Kumar. (#973)

Fixes a build break in ozone.

(cherry picked from commit 6872efcabfd8fad5658642baa26df0e74399348b)
---
 .../main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index d8afb91..bd90b2d 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,10 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
@@ -632,8 +629,8 @@ public class BasicOzoneFileSystem extends FileSystem {
 String key = pathToKey(qualifiedPath);
 FileStatus fileStatus = null;
 try {
-  fileStatus = adapter.getFileStatus(key)
-.makeQualified(uri, qualifiedPath, getUsername(), getUsername());
+  fileStatus = convertFileStatus(
+  adapter.getFileStatus(key, uri, qualifiedPath, getUsername()));
 } catch (OMException ex) {
   if (ex.getResult().equals(OMException.ResultCodes.KEY_NOT_FOUND)) {
 throw new FileNotFoundException("File not found. path:" + f);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 07/08: HDDS-1384. TestBlockOutputStreamWithFailures is failing

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9be196740cd23ce5123895f7523b7dd0ec4dedcd
Author: Márton Elek 
AuthorDate: Thu Jul 11 12:46:39 2019 +0200

HDDS-1384. TestBlockOutputStreamWithFailures is failing

Closes #1029

(cherry picked from commit 9119ed07ff32143b548316bf69c49695196f8422)
---
 .../common/transport/server/XceiverServerGrpc.java | 37 +++---
 .../transport/server/ratis/XceiverServerRatis.java | 38 ---
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  | 56 --
 3 files changed, 83 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 6fe8fd4..e224045 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -21,6 +21,7 @@ package 
org.apache.hadoop.ozone.container.common.transport.server;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
@@ -51,9 +52,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.SocketAddress;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
@@ -70,6 +68,8 @@ public final class XceiverServerGrpc extends XceiverServer {
   private Server server;
   private final ContainerDispatcher storageContainer;
   private boolean isStarted;
+  private DatanodeDetails datanodeDetails;
+
 
   /**
* Constructs a Grpc server class.
@@ -83,25 +83,15 @@ public final class XceiverServerGrpc extends XceiverServer {
 Preconditions.checkNotNull(conf);
 
 this.id = datanodeDetails.getUuid();
+this.datanodeDetails = datanodeDetails;
 this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
 OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-// Get an available port on current node and
-// use that as the container port
+
 if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
 OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
-  try (ServerSocket socket = new ServerSocket()) {
-socket.setReuseAddress(true);
-SocketAddress address = new InetSocketAddress(0);
-socket.bind(address);
-this.port = socket.getLocalPort();
-LOG.info("Found a free port for the server : {}", this.port);
-  } catch (IOException e) {
-LOG.error("Unable find a random free port for the server, "
-+ "fallback to use default port {}", this.port, e);
-  }
+  this.port = 0;
 }
-datanodeDetails.setPort(
-DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
+
 NettyServerBuilder nettyServerBuilder =
 ((NettyServerBuilder) ServerBuilder.forPort(port))
 .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
@@ -164,6 +154,19 @@ public final class XceiverServerGrpc extends XceiverServer 
{
   public void start() throws IOException {
 if (!isStarted) {
   server.start();
+  int realPort = server.getPort();
+
+  if (port == 0) {
+LOG.info("{} {} is started using port {}", getClass().getSimpleName(),
+this.id, realPort);
+port = realPort;
+  }
+
+  //register the real port to the datanode details.
+  datanodeDetails.setPort(DatanodeDetails
+  .newPort(Name.STANDALONE,
+  realPort));
+
   isStarted = true;
 }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 246d58a..23c4ea5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -66,6 +66,7 @@ import org.apache.ratis.proto.RaftProtos;
 import org.apache.ratis.proto.RaftProtos.RoleInfoProt

[hadoop] 05/08: HDDS-1784. Missing HostName and IpAddress in the response of register command.

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f6135a5e1928bbfacaa7b7e1020b8f2a33edebf3
Author: Nanda kumar 
AuthorDate: Thu Jul 11 19:01:06 2019 +0530

HDDS-1784. Missing HostName and IpAddress in the response of register 
command.

(cherry picked from commit 0f399b0d57875c64f49df3942743111905fd2198)
---
 .../org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java| 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index f07db62..cd78d3d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -271,6 +271,8 @@ public class SCMDatanodeProtocolServer implements
 .setErrorCode(cmd.getError())
 .setClusterID(cmd.getClusterID())
 .setDatanodeUUID(cmd.getDatanodeUUID())
+.setIpAddress(cmd.getIpAddress())
+.setHostname(cmd.getHostName())
 .build();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/08: HDDS-1718. Increase Ratis Leader election timeout default. Contributed by Aravindan Vijayan & Siddharth Wagle. (#1062)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 31556ffd202d4fca85ddbf8800333ee1f6163f47
Author: Mukul Kumar Singh 
AuthorDate: Tue Jul 9 23:17:50 2019 +0530

HDDS-1718. Increase Ratis Leader election timeout default. Contributed by 
Aravindan Vijayan & Siddharth Wagle. (#1062)

(cherry picked from commit 96d05559132630288126d9e66a66ac31617334a4)
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 2 +-
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 4 ++--
 .../statemachine/commandhandler/TestCloseContainerCommandHandler.java | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index a987399..1213dee 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -147,7 +147,7 @@ public final class ScmConfigKeys {
   "dfs.ratis.leader.election.minimum.timeout.duration";
   public static final TimeDuration
   DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-  TimeDuration.valueOf(1, TimeUnit.SECONDS);
+  TimeDuration.valueOf(5, TimeUnit.SECONDS);
 
   public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
   "dfs.ratis.snapshot.threshold";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 219bd29..27b02e6 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -270,10 +270,10 @@
   
   
 dfs.ratis.leader.election.minimum.timeout.duration
-1s
+5s
 OZONE, RATIS, MANAGEMENT
 The minimum timeout duration for ratis leader election.
-Default is 1s.
+Default is 5s.
 
   
   
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 1f6ed86..f802470 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -298,7 +298,7 @@ public class TestCloseContainerCommandHandler {
 maxOutstandingRequests,
 TimeDuration.valueOf(3, TimeUnit.SECONDS));
 Assert.assertTrue(client.groupAdd(group, peer.getId()).isSuccess());
-Thread.sleep(2000);
+Thread.sleep(1);
 final ContainerID containerId = ContainerID.valueof(
 random.nextLong() & Long.MAX_VALUE);
 ContainerProtos.ContainerCommandRequestProto.Builder request =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 06/08: HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. Contributed by Supratim Deka (#1081)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 75af39398e914960aafb09e2b7ea05486a1b40eb
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Fri Jul 12 10:31:48 2019 +0530

HDDS-1754. getContainerWithPipeline fails with PipelineNotFoundException. 
Contributed by Supratim Deka (#1081)

(cherry picked from commit 738fab3bff04ab0128146b401b4978d3d60ec97f)
---
 .../java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java  | 8 
 .../apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java| 2 +-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 7b5c467..0ecfdac 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -467,4 +467,12 @@ public class ContainerInfo implements 
Comparator,
 return state == HddsProtos.LifeCycleState.OPEN
 || state == HddsProtos.LifeCycleState.CLOSING;
   }
+
+  /**
+   * Check if a container is in Open state, but Close has not been initiated.
+   * @return true if Open, false otherwise.
+   */
+  public boolean isOpenNotClosing() {
+return state == HddsProtos.LifeCycleState.OPEN;
+  }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 6a875e6..769f3ef 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -238,7 +238,7 @@ public class SCMClientProtocolServer implements
   getContainer(id);
   final Pipeline pipeline;
 
-  if (container.isOpen()) {
+  if (container.isOpenNotClosing()) {
 // Ratis pipeline
 pipeline = scm.getPipelineManager()
 .getPipeline(container.getPipelineID());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated (5b99872 -> 9b3c034)

2019-07-15 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 5b99872  HDDS-1791. Update network-tests/src/test/blockade/README.md 
file
 new 758d756  HDDS-1705. Recon: Add estimatedTotalCount to the response of 
containers and containers/{id} endpoints. Contributed by Vivek Ratnavel 
Subramanian.
 new 31556ff  HDDS-1718. Increase Ratis Leader election timeout default. 
Contributed by Aravindan Vijayan & Siddharth Wagle. (#1062)
 new 84cdacb  HDDS-1611. Evaluate ACL on volume bucket key and prefix to 
authorize access. Contributed by Ajay Kumar. (#973)
 new 70c42fa  HDDS-1611.[Addendum] Evaluate ACL on volume bucket key and 
prefix to authorize access. Contributed by Ajay Kumar. (#973)
 new f6135a5  HDDS-1784. Missing HostName and IpAddress in the response of 
register command.
 new 75af393  HDDS-1754. getContainerWithPipeline fails with 
PipelineNotFoundException. Contributed by Supratim Deka (#1081)
 new 9be1967  HDDS-1384. TestBlockOutputStreamWithFailures is failing
 new 9b3c034  HDDS-1766. ContainerStateMachine is unable to increment 
lastAppliedTermIndex. Contributed by  Mukul Kumar Singh. (#1072)

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   2 +-
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   8 +
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   6 +
 .../org/apache/hadoop/ozone/common/Storage.java|   6 +-
 .../common/src/main/resources/ozone-default.xml|  10 +-
 .../common/transport/server/XceiverServerGrpc.java |  37 +-
 .../server/ratis/ContainerStateMachine.java|  40 +-
 .../transport/server/ratis/XceiverServerRatis.java |  38 +-
 .../TestCloseContainerCommandHandler.java  |   2 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |   2 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   2 +
 .../hdfs/server/diskbalancer/TestDiskBalancer.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   1 +
 .../java/org/apache/hadoop/ozone/OzoneAcl.java |  46 +-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java |  80 +++-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  15 +-
 .../ozone/security/acl/IAccessAuthorizer.java  |  15 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  | 102 -
 .../src/main/proto/OzoneManagerProtocol.proto  |  13 +-
 .../org/apache/hadoop/ozone/TestOzoneAcls.java |   8 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 +
 .../src/main/compose/ozonesecure/docker-config |   5 +-
 .../dist/src/main/smoketest/__init__.robot |   2 +-
 .../src/main/smoketest/basic/ozone-shell.robot |  21 +-
 .../dist/src/main/smoketest/commonlib.robot|   5 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |   2 +-
 .../dist/src/main/smoketest/createmrenv.robot  |   2 +-
 hadoop-ozone/dist/src/main/smoketest/kinit.robot   |   2 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |  12 +-
 .../dist/src/main/smoketest/s3/awss3.robot |   2 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |  50 ++-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |  56 ++-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +
 .../client/rpc/TestOzoneRpcClientAbstract.java |   5 +-
 .../org/apache/hadoop/ozone/om/TestOmAcls.java |  12 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java |   6 +-
 .../apache/hadoop/ozone/om/TestOzoneManager.java   |  13 +-
 .../security/acl/TestOzoneNativeAuthorizer.java| 464 +
 .../apache/hadoop/ozone/web/client/TestVolume.java |   4 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  46 ++
 .../java/org/apache/hadoop/ozone/om/IOzoneAcl.java |  13 +
 .../org/apache/hadoop/ozone/om/KeyManager.java |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  98 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 141 +--
 .../hadoop/ozone/om/OzoneManagerStarter.java   |   2 +-
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  39 ++
 .../hadoop/ozone/om/S3BucketManagerImpl.java   |  24 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  66 ++-
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  | 120 ++
 .../hadoop/ozone/security/acl/package-info.java|   2 +-
 .../web/ozShell/volume/ListVolumeHandler.java  |   2 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   2 +
 ...aDefinition.java => StatsSchemaDefinition.java} |  38 +-
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   8 +-
 .../org/apache/hadoop/o

[hadoop] branch submarine-0.2 updated: SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch submarine-0.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/submarine-0.2 by this push:
 new cc382eb  SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in 
RunJobCli. Contributed by Adam Antal
cc382eb is described below

commit cc382eb163af3a2621c39682e1d6c91ff556fde9
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:17:16 2019 +0200

SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. 
Contributed by Adam Antal

(cherry picked from commit be784de2d4c8d7ae2724cf348925a0fbdbe0c503)
---
 .../submarine/client/cli/runjob/RunJobCli.java | 149 -
 1 file changed, 88 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
index 7b544c1..dfd951f 100644
--- 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
+++ 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
@@ -71,13 +71,22 @@ import java.util.Map;
 public class RunJobCli extends AbstractCli {
   private static final Logger LOG =
   LoggerFactory.getLogger(RunJobCli.class);
+
+  private static final String TENSORFLOW = "TensorFlow";
+  private static final String PYTORCH = "PyTorch";
+  private static final String PS = "PS";
+  private static final String WORKER = "worker";
+  private static final String TENSORBOARD = "TensorBoard";
+
   private static final String CAN_BE_USED_WITH_TF_PYTORCH =
-  "Can be used with TensorFlow or PyTorch frameworks.";
-  private static final String CAN_BE_USED_WITH_TF_ONLY =
-  "Can only be used with TensorFlow framework.";
+  String.format("Can be used with %s or %s frameworks.",
+  TENSORFLOW, PYTORCH);
+  private static final String TENSORFLOW_ONLY =
+  String.format("Can only be used with %s framework.", TENSORFLOW);
   public static final String YAML_PARSE_FAILED = "Failed to parse " +
   "YAML config";
-
+  private static final String LOCAL_OR_ANY_FS_DIRECTORY = "Could be a local " +
+  "directory or any other directory on the file system.";
 
   private Options options;
   private JobSubmitter jobSubmitter;
@@ -112,50 +121,55 @@ public class RunJobCli extends AbstractCli {
 Framework.getValues()));
 options.addOption(CliConstants.NAME, true, "Name of the job");
 options.addOption(CliConstants.INPUT_PATH, true,
-"Input of the job, could be local or other FS directory");
+"Input of the job. " + LOCAL_OR_ANY_FS_DIRECTORY);
 options.addOption(CliConstants.CHECKPOINT_PATH, true,
-"Training output directory of the job, "
-+ "could be local or other FS directory. This typically includes "
-+ "checkpoint files and exported model ");
+"Training output directory of the job. " + LOCAL_OR_ANY_FS_DIRECTORY +
+"This typically includes checkpoint files and exported model");
 options.addOption(CliConstants.SAVED_MODEL_PATH, true,
-"Model exported path (savedmodel) of the job, which is needed when "
-+ "exported model is not placed under ${checkpoint_path}"
-+ "could be local or other FS directory. " +
-"This will be used to serve.");
+"Model exported path (saved model) of the job, which is needed when " +
+"exported model is not placed under ${checkpoint_path}. " +
+LOCAL_OR_ANY_FS_DIRECTORY + "This will be used to serve");
 options.addOption(CliConstants.DOCKER_IMAGE, true, "Docker image 
name/tag");
+options.addOption(CliConstants.PS_DOCKER_IMAGE, true,
+getDockerImageMessage(PS));
+options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
+getDockerImageMessage(WORKER));
 options.addOption(CliConstants.QUEUE, true,
-"Name of queue to run the job, by default it uses default queue");
+"Name of queue to run the job. By default, the default queue is used");
 
 addWorkerOptions(options);
 addPSOptions(options);
 addTensorboardOptions(options);
 
 options.addOption(CliConstants.ENV, true,
-"Common environment variable of worker/ps");
+"Common environment variable passed to worker / PS");
 options.addOption(CliConstants.VERBOSE, false,
 "Print verbose log for troubleshooting");
 options.addOption(CliConstants.WAIT_JOB_FINISH, false,
-"Specified when user want to wait the job finish");
-options.addOption(CliConstants.QUICKLINK, true, "Specify quicklink so YARN"
-+ "web UI shows link to given role instance and port. When "
-+ "--

[hadoop] branch branch-3.1 updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 30c7b43  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
30c7b43 is described below

commit 30c7b432276bf66cc6b8a88305cbb0cee87f62e9
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko

(cherry picked from commit 18ee1092b471c5337f05809f8f01dae415e51a3a)
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 27a4ea1..ce76722 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/

[hadoop] branch branch-3.2 updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 28d6a45  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
28d6a45 is described below

commit 28d6a453a9d5b1ec12a1b5ec4f21cf275f01d3d4
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko

(cherry picked from commit 18ee1092b471c5337f05809f8f01dae415e51a3a)
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 27a4ea1..ce76722 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/

[hadoop] branch trunk updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 18ee109  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
18ee109 is described below

commit 18ee1092b471c5337f05809f8f01dae415e51a3a
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index b52d767..986f84a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hado

[hadoop] branch trunk updated: SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new be784de  SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in 
RunJobCli. Contributed by Adam Antal
be784de is described below

commit be784de2d4c8d7ae2724cf348925a0fbdbe0c503
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:17:16 2019 +0200

SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. 
Contributed by Adam Antal
---
 .../submarine/client/cli/runjob/RunJobCli.java | 149 -
 1 file changed, 88 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
index 7b544c1..dfd951f 100644
--- 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
+++ 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
@@ -71,13 +71,22 @@ import java.util.Map;
 public class RunJobCli extends AbstractCli {
   private static final Logger LOG =
   LoggerFactory.getLogger(RunJobCli.class);
+
+  private static final String TENSORFLOW = "TensorFlow";
+  private static final String PYTORCH = "PyTorch";
+  private static final String PS = "PS";
+  private static final String WORKER = "worker";
+  private static final String TENSORBOARD = "TensorBoard";
+
   private static final String CAN_BE_USED_WITH_TF_PYTORCH =
-  "Can be used with TensorFlow or PyTorch frameworks.";
-  private static final String CAN_BE_USED_WITH_TF_ONLY =
-  "Can only be used with TensorFlow framework.";
+  String.format("Can be used with %s or %s frameworks.",
+  TENSORFLOW, PYTORCH);
+  private static final String TENSORFLOW_ONLY =
+  String.format("Can only be used with %s framework.", TENSORFLOW);
   public static final String YAML_PARSE_FAILED = "Failed to parse " +
   "YAML config";
-
+  private static final String LOCAL_OR_ANY_FS_DIRECTORY = "Could be a local " +
+  "directory or any other directory on the file system.";
 
   private Options options;
   private JobSubmitter jobSubmitter;
@@ -112,50 +121,55 @@ public class RunJobCli extends AbstractCli {
 Framework.getValues()));
 options.addOption(CliConstants.NAME, true, "Name of the job");
 options.addOption(CliConstants.INPUT_PATH, true,
-"Input of the job, could be local or other FS directory");
+"Input of the job. " + LOCAL_OR_ANY_FS_DIRECTORY);
 options.addOption(CliConstants.CHECKPOINT_PATH, true,
-"Training output directory of the job, "
-+ "could be local or other FS directory. This typically includes "
-+ "checkpoint files and exported model ");
+"Training output directory of the job. " + LOCAL_OR_ANY_FS_DIRECTORY +
+"This typically includes checkpoint files and exported model");
 options.addOption(CliConstants.SAVED_MODEL_PATH, true,
-"Model exported path (savedmodel) of the job, which is needed when "
-+ "exported model is not placed under ${checkpoint_path}"
-+ "could be local or other FS directory. " +
-"This will be used to serve.");
+"Model exported path (saved model) of the job, which is needed when " +
+"exported model is not placed under ${checkpoint_path}. " +
+LOCAL_OR_ANY_FS_DIRECTORY + "This will be used to serve");
 options.addOption(CliConstants.DOCKER_IMAGE, true, "Docker image 
name/tag");
+options.addOption(CliConstants.PS_DOCKER_IMAGE, true,
+getDockerImageMessage(PS));
+options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
+getDockerImageMessage(WORKER));
 options.addOption(CliConstants.QUEUE, true,
-"Name of queue to run the job, by default it uses default queue");
+"Name of queue to run the job. By default, the default queue is used");
 
 addWorkerOptions(options);
 addPSOptions(options);
 addTensorboardOptions(options);
 
 options.addOption(CliConstants.ENV, true,
-"Common environment variable of worker/ps");
+"Common environment variable passed to worker / PS");
 options.addOption(CliConstants.VERBOSE, false,
 "Print verbose log for troubleshooting");
 options.addOption(CliConstants.WAIT_JOB_FINISH, false,
-"Specified when user want to wait the job finish");
-options.addOption(CliConstants.QUICKLINK, true, "Specify quicklink so YARN"
-+ "web UI shows link to given role instance and port. When "
-+ "--tensorboard is specified, quicklink to tensorboard instance will "
-+ "be added automa

[hadoop] branch trunk updated: YARN-9360. Do not expose innards of QueueMetrics object into FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 91ce09e  YARN-9360. Do not expose innards of QueueMetrics object into 
FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko
91ce09e is described below

commit 91ce09e7065bacd7b4f09696fff35b789c52bcd7
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 10:47:10 2019 +0200

YARN-9360. Do not expose innards of QueueMetrics object into 
FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko
---
 .../resourcemanager/scheduler/QueueMetrics.java| 34 --
 .../scheduler/fair/FSLeafQueue.java| 33 -
 2 files changed, 38 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index d126f09..c126338 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -833,7 +833,37 @@ public class QueueMetrics implements MetricsSource {
 return aggregateContainersPreempted.value();
   }
 
-  public QueueMetricsForCustomResources getQueueMetricsForCustomResources() {
-return queueMetricsForCustomResources;
+  /**
+   * Fills in Resource values from available metrics values of custom resources
+   * to @code{targetResource}, only if the corresponding
+   * value of @code{targetResource} is zero.
+   * If @code{fromResource} has a value less than the available metrics value
+   * for a particular resource, it will be set to the @code{targetResource}
+   * instead.
+   *
+   * @param fromResource The resource to compare available resource values 
with.
+   * @param targetResource The resource to save the values into.
+   */
+  public void fillInValuesFromAvailableResources(Resource fromResource,
+  Resource targetResource) {
+if (queueMetricsForCustomResources != null) {
+  QueueMetricsCustomResource availableResources =
+  queueMetricsForCustomResources.getAvailable();
+
+  // We expect all custom resources contained in availableResources,
+  // so we will loop through all of them.
+  for (Map.Entry availableEntry : availableResources
+  .getValues().entrySet()) {
+String resourceName = availableEntry.getKey();
+
+// We only update the value if fairshare is 0 for that resource.
+if (targetResource.getResourceValue(resourceName) == 0) {
+  Long availableValue = availableEntry.getValue();
+  long value = Math.min(availableValue,
+  fromResource.getResourceValue(resourceName));
+  targetResource.setResourceValue(resourceName, value);
+}
+  }
+}
   }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 361355b..afea3d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -23,7 +23,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -43,8 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsCustomResource;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsForCustomResources;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import 
org.apache.had

[hadoop] branch trunk updated: HDFS-14357. Update documentation for HDFS cache on SCM support. Contributed by Feilong He.

2019-07-15 Thread rakeshr
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 30a8f84  HDFS-14357. Update documentation for HDFS cache on SCM 
support. Contributed by Feilong He.
30a8f84 is described below

commit 30a8f840f1572129fe7d02f8a784c47ab57ce89a
Author: Rakesh Radhakrishnan 
AuthorDate: Mon Jul 15 13:18:23 2019 +0530

HDFS-14357. Update documentation for HDFS cache on SCM support. Contributed 
by Feilong He.
---
 .../src/site/markdown/CentralizedCacheManagement.md| 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 7568949..8880ea5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -32,6 +32,8 @@ Centralized cache management in HDFS has many significant 
advantages.
 
 4.  Centralized caching can improve overall cluster memory utilization. When 
relying on the OS buffer cache at each DataNode, repeated reads of a block will 
result in all *n* replicas of the block being pulled into buffer cache. With 
centralized cache management, a user can explicitly pin only *m* of the *n* 
replicas, saving *n-m* memory.
 
+5.  HDFS supports non-volatile storage class memory (SCM, also known as 
persistent memory) cache in Linux platform. User can enable either memory cache 
or SCM cache for a DataNode. Memory cache and SCM cache can coexist among 
DataNodes. In the current implementation, the cache data in SCM will be cleaned 
up when DataNode restarts. Persistent HDFS cache support on SCM will be 
considered in the future.
+
 Use Cases
 -
 
@@ -200,11 +202,21 @@ Configuration
 
 In order to lock block files into memory, the DataNode relies on native JNI 
code found in `libhadoop.so` or `hadoop.dll` on Windows. Be sure to [enable 
JNI](../hadoop-common/NativeLibraries.html) if you are using HDFS centralized 
cache management.
 
+Currently, there are two implementations for persistent memory cache. The 
default one is pure Java based implementation and the other is native 
implementation which leverages PMDK library to improve the performance of cache 
write and cache read.
+
+To enable PMDK based implementation, please follow the below steps.
+
+1. Install PMDK library. Please refer to the official site http://pmem.io/ for 
detailed information.
+
+2. Build Hadoop with PMDK support. Please refer to "PMDK library build 
options" section in `BUILDING.txt` in the source code.
+
+To verify that PMDK is correctly detected by Hadoop, run the `hadoop 
checknative` command.
+
 ### Configuration Properties
 
  Required
 
-Be sure to configure the following:
+Be sure to configure one of the following properties for DRAM cache or 
persistent memory cache. Please note that DRAM cache and persistent cache 
cannot coexist on a DataNode.
 
 *   dfs.datanode.max.locked.memory
 
@@ -212,6 +224,10 @@ Be sure to configure the following:
 
 This setting is shared with the [Lazy Persist Writes 
feature](./MemoryStorage.html). The Data Node will ensure that the combined 
memory used by Lazy Persist Writes and Centralized Cache Management does not 
exceed the amount configured in `dfs.datanode.max.locked.memory`.
 
+*   dfs.datanode.cache.pmem.dirs
+
+This property specifies the cache volume of persistent memory. For 
multiple volumes, they should be separated by “,”, e.g. “/mnt/pmem0, 
/mnt/pmem1”. The default value is empty. If this property is configured, the 
volume capacity will be detected. And there is no need to configure 
`dfs.datanode.max.locked.memory`.
+
  Optional
 
 The following properties are not required, but may be specified for tuning:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14458. Report pmem stats to namenode. Contributed by Feilong He.

2019-07-15 Thread rakeshr
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e98adb0  HDFS-14458. Report pmem stats to namenode. Contributed by 
Feilong He.
e98adb0 is described below

commit e98adb00b7da8fa913b86ecf2049444b1d8617d4
Author: Rakesh Radhakrishnan 
AuthorDate: Mon Jul 15 13:02:37 2019 +0530

HDFS-14458. Report pmem stats to namenode. Contributed by Feilong He.
---
 .../{MemoryCacheStats.java => CacheStats.java} |  6 +--
 .../datanode/fsdataset/impl/FsDatasetCache.java| 54 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  4 +-
 .../fsdataset/impl/MappableBlockLoader.java|  3 +-
 .../fsdataset/impl/MemoryMappableBlockLoader.java  |  8 ++--
 .../impl/NativePmemMappableBlockLoader.java|  5 +-
 .../fsdataset/impl/PmemMappableBlockLoader.java|  9 +++-
 .../impl/TestCacheByPmemMappableBlockLoader.java   | 14 +++---
 .../fsdataset/impl/TestFsDatasetCache.java |  2 +-
 9 files changed, 45 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java
similarity index 97%
rename from 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java
rename to 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java
index d276c27..f79b7c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java
@@ -27,7 +27,7 @@ import com.google.common.annotations.VisibleForTesting;
 /**
  * Keeps statistics for the memory cache.
  */
-class MemoryCacheStats {
+class CacheStats {
 
   /**
* The approximate amount of cache space in use.
@@ -47,7 +47,7 @@ class MemoryCacheStats {
*/
   private final long maxBytes;
 
-  MemoryCacheStats(long maxBytes) {
+  CacheStats(long maxBytes) {
 this.usedBytesCount = new UsedBytesCount();
 this.maxBytes = maxBytes;
   }
@@ -81,7 +81,7 @@ class MemoryCacheStats {
   private class UsedBytesCount {
 private final AtomicLong usedBytes = new AtomicLong(0);
 
-private MemoryCacheStats.PageRounder rounder = new PageRounder();
+private CacheStats.PageRounder rounder = new PageRounder();
 
 /**
  * Try to reserve more bytes.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index 37e548e..1514927 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -23,7 +23,6 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -137,7 +136,7 @@ public class FsDatasetCache {
*/
   private final MappableBlockLoader cacheLoader;
 
-  private final MemoryCacheStats memCacheStats;
+  private final CacheStats memCacheStats;
 
   /**
* Number of cache commands that could not be completed successfully
@@ -178,30 +177,17 @@ public class FsDatasetCache {
   ".  Reconfigure this to " + minRevocationPollingMs);
 }
 this.revocationPollingMs = confRevocationPollingMs;
-// Both lazy writer and read cache are sharing this statistics.
-this.memCacheStats = new MemoryCacheStats(
-dataset.datanode.getDnConf().getMaxLockedMemory());
 
 this.cacheLoader = MappableBlockLoaderFactory.createCacheLoader(
 this.getDnConf());
-cacheLoader.initialize(this);
-  }
-
-  /**
-   * Check if pmem cache is enabled.
-   */
-  private boolean isPmemCacheEnabled() {
-return !cacheLoader.isTransientCache();
+// Both lazy writer and read cache are sharing this statistics.
+this.memCacheStats = cacheLoader.initialize(this.getDnConf());
   }
 
   DNConf getDnConf() {
 return this.dataset.datanode.getDnConf();
   }
 
-  MemoryCacheStats getMemCacheStats() {
-r