Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 de86f100d -> 44b0b08b1


HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44b0b08b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44b0b08b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44b0b08b

Branch: refs/heads/HDFS-10285
Commit: 44b0b08b19c007fc2e2570ca3d1a6a8ebed60347
Parents: de86f10
Author: Rakesh Radhakrishnan <rake...@apache.org>
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Rakesh Radhakrishnan <rake...@apache.org>
Committed: Fri Nov 3 08:18:14 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 ++++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  21 ++++
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  27 +++++
 .../ClientNamenodeProtocolTranslatorPB.java     |  20 ++++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++++++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java    |  12 ++
 .../namenode/BlockStorageMovementNeeded.java    | 109 +++++++++++++++++++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +++++-
 .../src/site/markdown/ArchivalStorage.md        |   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java    |  67 ++++++++++++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8ea0407..859e693 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -3115,4 +3116,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     checkOpen();
     return new OpenFilesIterator(namenode, tracer);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   *         <ul>
+   *         <li>PENDING if path is in queue and not processed for satisfying
+   *         the policy.</li>
+   *         <li>IN_PROGRESS if satisfying the storage policy for path.</li>
+   *         <li>SUCCESS if storage policy satisfied for the path.</li>
+   *         <li>NOT_AVAILABLE if
+   *         {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   *         path or SPS work is already finished.</li>
+   *         </ul>
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException {
+    return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 66989f7..81ab932 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -1692,4 +1693,24 @@ public interface ClientProtocol {
    */
   @Idempotent
   boolean isStoragePolicySatisfierRunning() throws IOException;
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link ClientProtocol#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   *         <ul>
+   *         <li>PENDING if path is in queue and not processed for satisfying
+   *         the policy.</li>
+   *         <li>IN_PROGRESS if satisfying the storage policy for path.</li>
+   *         <li>SUCCESS if storage policy satisfied for the path.</li>
+   *         <li>NOT_AVAILABLE if
+   *         {@link ClientProtocol#satisfyStoragePolicy(String)} not called for
+   *         path or SPS work is already finished.</li>
+   *         </ul>
+   * @throws IOException
+   */
+  @Idempotent
+  StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 8c44293..11fe292 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -121,6 +121,33 @@ public final class HdfsConstants {
     SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET, SAFEMODE_FORCE_EXIT
   }
 
+  /**
+   * Storage policy satisfy path status.
+   */
+  public enum StoragePolicySatisfyPathStatus {
+    /**
+     * Scheduled but not yet processed. This will come only in case of
+     * directory. Directory will be added first in "pendingWorkForDirectory"
+     * queue and then later it is processed recursively.
+     */
+    PENDING,
+
+    /**
+     * Satisfying the storage policy for path.
+     */
+    IN_PROGRESS,
+
+    /**
+     * Storage policy satisfied for the path.
+     */
+    SUCCESS,
+
+    /**
+     * Status not available.
+     */
+    NOT_AVAILABLE
+  }
+
   public enum RollingUpgradeAction {
     QUERY, PREPARE, FINALIZE;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index cf22be2..b2f5da9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -95,6 +96,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -230,6 +233,7 @@ import org.apache.hadoop.security.token.Token;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Message;
 import com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.util.concurrent.AsyncGet;
 
 /**
@@ -1891,4 +1895,20 @@ public class ClientNamenodeProtocolTranslatorPB 
implements
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException {
+    try {
+      CheckStoragePolicySatisfyPathStatusRequestProto request =
+          CheckStoragePolicySatisfyPathStatusRequestProto.newBuilder()
+          .setSrc(path)
+          .build();
+      CheckStoragePolicySatisfyPathStatusResponseProto response = rpcProxy
+          .checkStoragePolicySatisfyPathStatus(null, request);
+      return PBHelperClient.convert(response.getStatus());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 30a3108..bc7e103 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -117,6 +117,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheF
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto.StoragePolicySatisfyPathStatus;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
@@ -3023,4 +3024,36 @@ public class PBHelperClient {
     }
     return ret;
   }
+
+  public static StoragePolicySatisfyPathStatus convert(
+      HdfsConstants.StoragePolicySatisfyPathStatus status) {
+    switch (status) {
+    case PENDING:
+      return StoragePolicySatisfyPathStatus.PENDING;
+    case IN_PROGRESS:
+      return StoragePolicySatisfyPathStatus.IN_PROGRESS;
+    case SUCCESS:
+      return StoragePolicySatisfyPathStatus.SUCCESS;
+    case NOT_AVAILABLE:
+      return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+    default:
+      throw new IllegalArgumentException("Unexpected SPSStatus :" + status);
+    }
+  }
+
+  public static HdfsConstants.StoragePolicySatisfyPathStatus convert(
+      StoragePolicySatisfyPathStatus status) {
+    switch (status) {
+    case PENDING:
+      return HdfsConstants.StoragePolicySatisfyPathStatus.PENDING;
+    case IN_PROGRESS:
+      return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
+    case SUCCESS:
+      return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
+    case NOT_AVAILABLE:
+      return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+    default:
+      throw new IllegalArgumentException("Unexpected SPSStatus :" + status);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 4157540..204184e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -460,7 +460,6 @@ message RollingUpgradeInfoProto {
 message RollingUpgradeResponseProto {
   optional RollingUpgradeInfoProto rollingUpgradeInfo= 1;
 }
-
 message ListCorruptFileBlocksRequestProto {
   required string path = 1;
   optional string cookie = 2;
@@ -808,6 +807,20 @@ message IsStoragePolicySatisfierRunningResponseProto {
   required bool running = 1;
 }
 
+message CheckStoragePolicySatisfyPathStatusRequestProto {  // no parameters
+  required string src = 1;
+}
+
+message CheckStoragePolicySatisfyPathStatusResponseProto {
+  enum StoragePolicySatisfyPathStatus {
+    PENDING = 0;
+    IN_PROGRESS = 1;
+    SUCCESS = 2;
+    NOT_AVAILABLE = 3;
+  }
+  required StoragePolicySatisfyPathStatus status = 1;
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -992,4 +1005,6 @@ service ClientNamenodeProtocol {
       returns(SatisfyStoragePolicyResponseProto);
   rpc 
isStoragePolicySatisfierRunning(IsStoragePolicySatisfierRunningRequestProto)
       returns(IsStoragePolicySatisfierRunningResponseProto);
+  rpc 
checkStoragePolicySatisfyPathStatus(CheckStoragePolicySatisfyPathStatusRequestProto)
+      returns(CheckStoragePolicySatisfyPathStatusResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 7892476..87c2717 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -82,6 +83,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -248,7 +251,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCod
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
@@ -1852,4 +1855,22 @@ public class 
ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     return VOID_SATISFYSTORAGEPOLICY_RESPONSE;
   }
+
+  @Override
+  public CheckStoragePolicySatisfyPathStatusResponseProto
+      checkStoragePolicySatisfyPathStatus(RpcController controller,
+      CheckStoragePolicySatisfyPathStatusRequestProto request)
+      throws ServiceException {
+    try {
+      StoragePolicySatisfyPathStatus status = server
+          .checkStoragePolicySatisfyPathStatus(request.getSrc());
+      CheckStoragePolicySatisfyPathStatusResponseProto.Builder builder =
+          CheckStoragePolicySatisfyPathStatusResponseProto
+          .newBuilder();
+      builder.setStatus(PBHelperClient.convert(status));
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 419ca80..16b0ece 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -47,6 +47,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -68,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
@@ -4955,4 +4957,14 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean isStoragePolicySatisfierRunning() {
     return sps.isRunning();
   }
+
+  /**
+   * @return status
+   *                Storage policy satisfy status of the path.
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException {
+    return sps.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
index 788a98b..8f7487c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
@@ -26,13 +26,17 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier.ItemInfo;
 import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -62,6 +66,9 @@ public class BlockStorageMovementNeeded {
   private final Map<Long, DirPendingWorkInfo> pendingWorkForDirectory =
       new HashMap<Long, DirPendingWorkInfo>();
 
+  private final Map<Long, StoragePolicySatisfyPathStatusInfo> spsStatus =
+      new ConcurrentHashMap<>();
+
   private final Namesystem namesystem;
 
   // List of pending dir to satisfy the policy
@@ -73,6 +80,10 @@ public class BlockStorageMovementNeeded {
 
   private final int maxQueuedItem;
 
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 300000;
+
   public BlockStorageMovementNeeded(Namesystem namesystem,
       StoragePolicySatisfier sps, int queueLimit) {
     this.namesystem = namesystem;
@@ -88,6 +99,9 @@ public class BlockStorageMovementNeeded {
    *          - track info for satisfy the policy
    */
   public synchronized void add(ItemInfo trackInfo) {
+    spsStatus.put(trackInfo.getStartId(),
+        new StoragePolicySatisfyPathStatusInfo(
+            StoragePolicySatisfyPathStatus.IN_PROGRESS));
     storageMovementNeeded.add(trackInfo);
   }
 
@@ -125,6 +139,8 @@ public class BlockStorageMovementNeeded {
   }
 
   public synchronized void addToPendingDirQueue(long id) {
+    spsStatus.put(id, new StoragePolicySatisfyPathStatusInfo(
+        StoragePolicySatisfyPathStatus.PENDING));
     spsDirsToBeTraveresed.add(id);
     // Notify waiting FileInodeIdCollector thread about the newly
     // added SPS path.
@@ -172,6 +188,7 @@ public class BlockStorageMovementNeeded {
       if (inode == null) {
         // directory deleted just remove it.
         this.pendingWorkForDirectory.remove(startId);
+        markSuccess(startId);
       } else {
         DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
         if (pendingWork != null) {
@@ -179,6 +196,7 @@ public class BlockStorageMovementNeeded {
           if (pendingWork.isDirWorkDone()) {
             namesystem.removeXattr(startId, XATTR_SATISFY_STORAGE_POLICY);
             pendingWorkForDirectory.remove(startId);
+            markSuccess(startId);
           }
         }
       }
@@ -187,6 +205,7 @@ public class BlockStorageMovementNeeded {
       // storageMovementAttemptedItems or file policy satisfied.
       namesystem.removeXattr(trackInfo.getTrackId(),
           XATTR_SATISFY_STORAGE_POLICY);
+      markSuccess(trackInfo.getStartId());
     }
   }
 
@@ -203,6 +222,19 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
+   * Mark inode status as SUCCESS in map.
+   */
+  private void markSuccess(long startId){
+    StoragePolicySatisfyPathStatusInfo spsStatusInfo =
+        spsStatus.get(startId);
+    if (spsStatusInfo == null) {
+      spsStatusInfo = new StoragePolicySatisfyPathStatusInfo();
+      spsStatus.put(startId, spsStatusInfo);
+    }
+    spsStatusInfo.setSuccess();
+  }
+
+  /**
    * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
    * and notify to clean up required resources.
    * @throws IOException
@@ -256,6 +288,7 @@ public class BlockStorageMovementNeeded {
     @Override
     public void run() {
       LOG.info("Starting FileInodeIdCollector!.");
+      long lastStatusCleanTime = 0;
       while (namesystem.isRunning() && sps.isRunning()) {
         try {
           if (!namesystem.isInSafeMode()) {
@@ -271,6 +304,9 @@ public class BlockStorageMovementNeeded {
               if (startInode != null) {
                 try {
                   remainingCapacity = remainingCapacity();
+                  spsStatus.put(startINodeId,
+                      new StoragePolicySatisfyPathStatusInfo(
+                          StoragePolicySatisfyPathStatus.IN_PROGRESS));
                   readLock();
                   traverseDir(startInode.asDirectory(), startINodeId,
                       HdfsFileStatus.EMPTY_NAME,
@@ -289,9 +325,16 @@ public class BlockStorageMovementNeeded {
                   namesystem.removeXattr(startInode.getId(),
                       XATTR_SATISFY_STORAGE_POLICY);
                   pendingWorkForDirectory.remove(startInode.getId());
+                  markSuccess(startInode.getId());
                 }
               }
             }
+            //Clear the SPS status if status is in SUCCESS more than 5 min.
+            if (Time.monotonicNow()
+                - lastStatusCleanTime > statusClearanceElapsedTimeMs) {
+              lastStatusCleanTime = Time.monotonicNow();
+              cleanSpsStatus();
+            }
           }
         } catch (Throwable t) {
           LOG.warn("Exception while loading inodes to satisfy the policy", t);
@@ -299,6 +342,16 @@ public class BlockStorageMovementNeeded {
       }
     }
 
+    private synchronized void cleanSpsStatus() {
+      for (Iterator<Entry<Long, StoragePolicySatisfyPathStatusInfo>> it =
+          spsStatus.entrySet().iterator(); it.hasNext();) {
+        Entry<Long, StoragePolicySatisfyPathStatusInfo> entry = it.next();
+        if (entry.getValue().canRemove()) {
+          it.remove();
+        }
+      }
+    }
+
     @Override
     protected void checkPauseForTesting() throws InterruptedException {
       // TODO implement if needed
@@ -434,4 +487,60 @@ public class BlockStorageMovementNeeded {
       return startId;
     }
   }
+
+  /**
+   * Represent the file/directory block movement status.
+   */
+  static class StoragePolicySatisfyPathStatusInfo {
+    private StoragePolicySatisfyPathStatus status =
+        StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+    private long lastStatusUpdateTime;
+
+    StoragePolicySatisfyPathStatusInfo() {
+      this.lastStatusUpdateTime = 0;
+    }
+
+    StoragePolicySatisfyPathStatusInfo(StoragePolicySatisfyPathStatus status) {
+      this.status = status;
+      this.lastStatusUpdateTime = 0;
+    }
+
+    private void setSuccess() {
+      this.status = StoragePolicySatisfyPathStatus.SUCCESS;
+      this.lastStatusUpdateTime = Time.monotonicNow();
+    }
+
+    private StoragePolicySatisfyPathStatus getStatus() {
+      return status;
+    }
+
+    /**
+     * Return true if SUCCESS status cached more then 5 min.
+     */
+    private boolean canRemove() {
+      return StoragePolicySatisfyPathStatus.SUCCESS == status
+          && (Time.monotonicNow()
+              - lastStatusUpdateTime) > statusClearanceElapsedTimeMs;
+    }
+  }
+
+  public StoragePolicySatisfyPathStatus getStatus(long id) {
+    StoragePolicySatisfyPathStatusInfo spsStatusInfo = spsStatus.get(id);
+    if(spsStatusInfo == null){
+      return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+    }
+    return spsStatusInfo.getStatus();
+  }
+
+  @VisibleForTesting
+  public static void setStatusClearanceElapsedTimeMs(
+      long statusClearanceElapsedTimeMs) {
+    BlockStorageMovementNeeded.statusClearanceElapsedTimeMs =
+        statusClearanceElapsedTimeMs;
+  }
+
+  @VisibleForTesting
+  public static long getStatusClearanceElapsedTimeMs() {
+    return statusClearanceElapsedTimeMs;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 8009990..353ae76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -28,7 +28,6 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH;
-
 import static org.apache.hadoop.util.Time.now;
 
 import java.io.FileNotFoundException;
@@ -108,6 +107,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -2415,4 +2415,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
     }
     return namesystem.getBlockManager().isStoragePolicySatisfierRunning();
   }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException {
+    checkNNStartup();
+    if (nn.isStandbyState()) {
+      throw new StandbyException("Not supported by Standby Namenode.");
+    }
+    return namesystem.getBlockManager().checkStoragePolicySatisfyPathStatus(
+        path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index cbfba44..2382d36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.server.balancer.Matcher;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -934,4 +936,10 @@ public class StoragePolicySatisfier implements Runnable {
     }
 
   }
+
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+      String path) throws IOException {
+    INode inode = namesystem.getFSDirectory().getINode(path);
+    return storageMovementNeeded.getStatus(inode.getId());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 9b57874..14152d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
@@ -242,7 +243,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 
     @Override
     public String getShortUsage() {
-      return "[" + getName() + " -path <path>]\n";
+      return "[" + getName() + " [-w] -path <path>]\n";
     }
 
     @Override
@@ -250,6 +251,14 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
       TableListing listing = AdminHelper.getOptionDescriptionListing();
       listing.addRow("<path>", "The path of the file/directory to satisfy"
           + " storage policy");
+      listing.addRow("-w",
+          "It requests that the command wait till all the files satisfy"
+              + " the policy in given path. This will print the current"
+              + "status of the path in each 10 sec and status are:\n"
+              + "PENDING : Path is in queue and not processed for satisfying"
+              + " the policy.\nIN_PROGRESS : Satisfying the storage policy for"
+              + " path.\nSUCCESS : Storage policy satisfied for the path.\n"
+              + "NOT_AVAILABLE : Status not available.");
       return getShortUsage() + "\n" +
           "Schedule blocks to move based on file/directory policy.\n\n" +
           listing.toString();
@@ -269,12 +278,36 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
         dfs.satisfyStoragePolicy(new Path(path));
         System.out.println("Scheduled blocks to move based on the current"
             + " storage policy on " + path);
+        boolean waitOpt = StringUtils.popOption("-w", args);
+        if (waitOpt) {
+          waitForSatisfyPolicy(dfs, path);
+        }
       } catch (Exception e) {
         System.err.println(AdminHelper.prettifyException(e));
         return 2;
       }
       return 0;
     }
+
+
+    private void waitForSatisfyPolicy(DistributedFileSystem dfs, String path)
+        throws IOException {
+      System.out.println("Waiting for satisfy the policy ...");
+      while (true) {
+        StoragePolicySatisfyPathStatus status = dfs.getClient()
+            .checkStoragePolicySatisfyPathStatus(path);
+        if (StoragePolicySatisfyPathStatus.SUCCESS.equals(status)) {
+          System.out.println(status);
+          break;
+        }
+        System.out.println(status);
+        try {
+          Thread.sleep(10000);
+        } catch (InterruptedException e) {
+        }
+      }
+      System.out.println(" done");
+    }
   }
 
   /** Command to check storage policy satisfier status. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index f69a3ce..93fcb1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -215,13 +215,14 @@ Schedule blocks to move based on file's/directory's 
current storage policy.
 
 * Command:
 
-        hdfs storagepolicies -satisfyStoragePolicy -path <path>
+        hdfs storagepolicies -satisfyStoragePolicy [-w] -path <path>
 
 * Arguments:
 
 | | |
 |:---- |:---- |
 | `-path <path>` | The path referring to either a directory or a file. |
+| `-w` | It requests that the command wait till all the files satisfy the 
policy in given path. This will print the current status of the path in each 10 
sec and status are:<br/>PENDING - Path is in queue and not processed for 
satisfying the policy.<br/>IN_PROGRESS - Satisfying the storage policy for 
path.<br/>SUCCESS - Storage policy satisfied for the path.<br/>NOT_AVAILABLE - 
Status not available. |
 
 ### SPS Running Status
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 7165d06..c301b8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -479,9 +479,9 @@ public class TestPersistentStoragePolicySatisfier {
       clusterSetUp();
       fs.setStoragePolicy(parentDir, "COLD");
       fs.satisfyStoragePolicy(childDir);
-      fs.satisfyStoragePolicy(parentDir);
       DFSTestUtil.waitExpectedStorageType(childFileName, StorageType.ARCHIVE,
           3, 30000, cluster.getFileSystem());
+      fs.satisfyStoragePolicy(parentDir);
       DFSTestUtil.waitExpectedStorageType(parentFileName, StorageType.ARCHIVE,
           3, 30000, cluster.getFileSystem());
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 70219f6..f42d911 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -1463,6 +1464,72 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  @Test(timeout = 300000)
+  public void testStoragePolicySatisfyPathStatus() throws Exception {
+    try {
+      config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      config.setBoolean(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY,
+          true);
+
+      StorageType[][] storagetypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+      hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(2)
+          .storageTypes(storagetypes).build();
+      hdfsCluster.waitActive();
+      BlockStorageMovementNeeded.setStatusClearanceElapsedTimeMs(20000);
+      dfs = hdfsCluster.getFileSystem();
+      Path filePath = new Path("/file");
+      DFSTestUtil.createFile(dfs, filePath, 1024, (short) 2,
+            0);
+      dfs.setStoragePolicy(filePath, "COLD");
+      dfs.satisfyStoragePolicy(filePath);
+      StoragePolicySatisfyPathStatus status = dfs.getClient()
+          .checkStoragePolicySatisfyPathStatus(filePath.toString());
+      Assert.assertTrue("Status should be IN_PROGRESS",
+          StoragePolicySatisfyPathStatus.IN_PROGRESS.equals(status));
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 2, 30000, dfs);
+
+      // wait till status is SUCCESS
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          try {
+            StoragePolicySatisfyPathStatus status = dfs.getClient()
+                .checkStoragePolicySatisfyPathStatus(filePath.toString());
+            return StoragePolicySatisfyPathStatus.SUCCESS.equals(status);
+          } catch (IOException e) {
+            Assert.fail("Fail to get path status for sps");
+          }
+          return false;
+        }
+      }, 100, 60000);
+
+      // wait till status is NOT_AVAILABLE
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          try {
+            StoragePolicySatisfyPathStatus status = dfs.getClient()
+                .checkStoragePolicySatisfyPathStatus(filePath.toString());
+            return StoragePolicySatisfyPathStatus.NOT_AVAILABLE.equals(status);
+          } catch (IOException e) {
+            Assert.fail("Fail to get path status for sps");
+          }
+          return false;
+        }
+      }, 100, 60000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
   private static void createDirectoryTree(DistributedFileSystem dfs)
       throws Exception {
     // tree structure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index e6a0356..51498ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -203,4 +203,22 @@ public class TestStoragePolicyCommands {
     DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1,
         "Can't understand arguments: ");
   }
+
+  @Test(timeout = 90000)
+  public void testSatisfyStoragePolicyCommandWithWaitOption()
+      throws Exception {
+    final String file = "/testSatisfyStoragePolicyCommandWithWaitOption";
+    DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+
+    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+
+    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file
+        + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
+
+    DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -w -path " + file, 0,
+        "Waiting for satisfy the policy");
+
+    DFSTestUtil
+        .waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, fs);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to