This is an automated email from the ASF dual-hosted git repository.

yqlin pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
     new c9a6545  HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo 
Goiri.
c9a6545 is described below

commit c9a65456fc64b16289d62d72a22cf7890605f024
Author: Yiqun Lin <yq...@apache.org>
AuthorDate: Tue Jan 15 14:21:33 2019 +0800

    HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo Goiri.
---
 .../hdfs/server/federation/router/Quota.java       |  6 ++--
 .../federation/router/RouterClientProtocol.java    | 22 +++++++-------
 .../federation/router/RouterQuotaManager.java      |  2 +-
 .../router/RouterQuotaUpdateService.java           |  6 ++--
 .../server/federation/router/RouterQuotaUsage.java | 35 ++++++++++++----------
 5 files changed, 38 insertions(+), 33 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 5d0309f..cfb538f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -163,7 +163,7 @@ public class Quota {
     long ssCount = 0;
     long nsQuota = HdfsConstants.QUOTA_RESET;
     long ssQuota = HdfsConstants.QUOTA_RESET;
-    boolean hasQuotaUnSet = false;
+    boolean hasQuotaUnset = false;
 
     for (Map.Entry<RemoteLocation, QuotaUsage> entry : results.entrySet()) {
       RemoteLocation loc = entry.getKey();
@@ -172,7 +172,7 @@ public class Quota {
         // If quota is not set in real FileSystem, the usage
         // value will return -1.
         if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) {
-          hasQuotaUnSet = true;
+          hasQuotaUnset = true;
         }
         nsQuota = usage.getQuota();
         ssQuota = usage.getSpaceQuota();
@@ -189,7 +189,7 @@ public class Quota {
 
     QuotaUsage.Builder builder = new QuotaUsage.Builder()
         .fileAndDirectoryCount(nsCount).spaceConsumed(ssCount);
-    if (hasQuotaUnSet) {
+    if (hasQuotaUnset) {
       builder.quota(HdfsConstants.QUOTA_RESET)
           .spaceQuota(HdfsConstants.QUOTA_RESET);
     } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 894f5be..0962c0d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.federation.router;
 import static 
org.apache.hadoop.hdfs.server.federation.router.FederationUtil.updateMountPointStatus;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.fs.BatchedRemoteIterator;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -1140,7 +1140,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
   }
 
   @Override
-  public BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> 
listCacheDirectives(
+  public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
       long prevId, CacheDirectiveInfo filter) throws IOException {
     rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
     return null;
@@ -1162,7 +1162,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
   }
 
   @Override
-  public BatchedRemoteIterator.BatchedEntries<CachePoolEntry> 
listCachePools(String prevKey)
+  public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
     rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
     return null;
@@ -1273,7 +1273,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
   }
 
   @Override
-  public BatchedRemoteIterator.BatchedEntries<EncryptionZone> 
listEncryptionZones(long prevId)
+  public BatchedEntries<EncryptionZone> listEncryptionZones(long prevId)
       throws IOException {
     rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
     return null;
@@ -1286,7 +1286,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
   }
 
   @Override
-  public BatchedRemoteIterator.BatchedEntries<ZoneReencryptionStatus> 
listReencryptionStatus(
+  public BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(
       long prevId) throws IOException {
     rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
     return null;
@@ -1522,15 +1522,17 @@ public class RouterClientProtocol implements 
ClientProtocol {
 
   @Deprecated
   @Override
-  public BatchedRemoteIterator.BatchedEntries<OpenFileEntry> 
listOpenFiles(long prevId)
+  public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
       throws IOException {
-    return listOpenFiles(prevId, 
EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES),
+    return listOpenFiles(prevId,
+        EnumSet.of(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES),
         OpenFilesIterator.FILTER_PATH_DEFAULT);
   }
 
   @Override
-  public BatchedRemoteIterator.BatchedEntries<OpenFileEntry> 
listOpenFiles(long prevId,
-      EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, String path) 
throws IOException {
+  public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
+      EnumSet<OpenFilesIterator.OpenFilesType> openFilesTypes, String path)
+          throws IOException {
     rpcServer.checkOperation(NameNode.OperationCategory.READ, false);
     return null;
   }
@@ -1662,7 +1664,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
     // Get the file info from everybody
     Map<RemoteLocation, HdfsFileStatus> results =
         rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class);
-    int children=0;
+    int children = 0;
     // We return the first file
     HdfsFileStatus dirStatus = null;
     for (RemoteLocation loc : locations) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
index fa2a6e4..e818f5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
@@ -88,7 +88,7 @@ public class RouterQuotaManager {
   }
 
   /**
-   * Get children paths (can including itself) under specified federation path.
+   * Get children paths (can include itself) under specified federation path.
    * @param parentPath Federated path.
    * @return Set of children paths.
    */
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 9bfd705..dd21e1a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -186,10 +186,8 @@ public class RouterQuotaUpdateService extends 
PeriodicService {
    */
   private List<MountTable> getQuotaSetMountTables() throws IOException {
     List<MountTable> mountTables = getMountTableEntries();
-    Set<String> stalePaths = new HashSet<>();
-    for (String path : this.quotaManager.getAll()) {
-      stalePaths.add(path);
-    }
+    Set<String> allPaths = this.quotaManager.getAll();
+    Set<String> stalePaths = new HashSet<>(allPaths);
 
     List<MountTable> neededMountTables = new LinkedList<>();
     for (MountTable entry : mountTables) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
index e4728f5..34d9487 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
@@ -75,9 +75,10 @@ public final class RouterQuotaUsage extends QuotaUsage {
    * @throws NSQuotaExceededException If the quota is exceeded.
    */
   public void verifyNamespaceQuota() throws NSQuotaExceededException {
-    if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) {
-      throw new NSQuotaExceededException(getQuota(),
-          getFileAndDirectoryCount());
+    long quota = getQuota();
+    long fileAndDirectoryCount = getFileAndDirectoryCount();
+    if (Quota.isViolated(quota, fileAndDirectoryCount)) {
+      throw new NSQuotaExceededException(quota, fileAndDirectoryCount);
     }
   }
 
@@ -87,25 +88,29 @@ public final class RouterQuotaUsage extends QuotaUsage {
    * @throws DSQuotaExceededException If the quota is exceeded.
    */
   public void verifyStoragespaceQuota() throws DSQuotaExceededException {
-    if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) {
-      throw new DSQuotaExceededException(getSpaceQuota(), getSpaceConsumed());
+    long spaceQuota = getSpaceQuota();
+    long spaceConsumed = getSpaceConsumed();
+    if (Quota.isViolated(spaceQuota, spaceConsumed)) {
+      throw new DSQuotaExceededException(spaceQuota, spaceConsumed);
     }
   }
 
   @Override
   public String toString() {
-    String nsQuota = String.valueOf(getQuota());
-    String nsCount = String.valueOf(getFileAndDirectoryCount());
-    if (getQuota() == HdfsConstants.QUOTA_RESET) {
-      nsQuota = "-";
-      nsCount = "-";
+    String nsQuota = "-";
+    String nsCount = "-";
+    long quota = getQuota();
+    if (quota != HdfsConstants.QUOTA_RESET) {
+      nsQuota = String.valueOf(quota);
+      nsCount = String.valueOf(getFileAndDirectoryCount());
     }
 
-    String ssQuota = StringUtils.byteDesc(getSpaceQuota());
-    String ssCount = StringUtils.byteDesc(getSpaceConsumed());
-    if (getSpaceQuota() == HdfsConstants.QUOTA_RESET) {
-      ssQuota = "-";
-      ssCount = "-";
+    String ssQuota = "-";
+    String ssCount = "-";
+    long spaceQuota = getSpaceQuota();
+    if (spaceQuota != HdfsConstants.QUOTA_RESET) {
+      ssQuota = StringUtils.byteDesc(spaceQuota);
+      ssCount = StringUtils.byteDesc(getSpaceConsumed());
     }
 
     StringBuilder str = new StringBuilder();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to