Repository: hadoop
Updated Branches:
  refs/heads/trunk 699a6918a -> bc6d9d4c7


HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed 
by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc6d9d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc6d9d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc6d9d4c

Branch: refs/heads/trunk
Commit: bc6d9d4c796d3c9d27dbbe3266031bf2adecde4f
Parents: 699a691
Author: Bharat Viswanadham <bha...@apache.org>
Authored: Wed May 23 10:15:40 2018 -0700
Committer: Bharat Viswanadham <bha...@apache.org>
Committed: Wed May 23 10:15:40 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 ++
 .../hadoop/hdfs/DistributedFileSystem.java      | 10 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  9 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  7 ++
 .../ClientNamenodeProtocolTranslatorPB.java     | 17 ++++
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 ++
 .../federation/router/RouterRpcServer.java      |  7 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 17 ++++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 97 ++++++++++++++++++++
 .../src/site/markdown/HDFSCommands.md           |  2 +
 .../markdown/HDFSHighAvailabilityWithQJM.md     |  2 +
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 67 ++++++++++++++
 13 files changed, 260 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 09154d0..5f1b2bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2341,6 +2341,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     }
   }
 
+  /**
+   * @see ClientProtocol#upgradeStatus()
+   */
+  public boolean upgradeStatus() throws IOException {
+    checkOpen();
+    try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
+      return namenode.upgradeStatus();
+    }
+  }
+
   RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
       throws IOException {
     checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1e9ed09..82cdd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1534,6 +1534,16 @@ public class DistributedFileSystem extends FileSystem
   }
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  public boolean upgradeStatus() throws IOException {
+    return dfs.upgradeStatus();
+  }
+
+  /**
    * Rolling upgrade: prepare/finalize/query.
    */
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f5d5e82..7729e10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -941,6 +941,15 @@ public interface ClientProtocol {
   void finalizeUpgrade() throws IOException;
 
   /**
+   * Get status of upgrade - finalized or not.
+   * @return true if upgrade is finalized or if no upgrade is in progress and
+   * false otherwise.
+   * @throws IOException
+   */
+  @Idempotent
+  boolean upgradeStatus() throws IOException;
+
+  /**
    * Rolling upgrade operations.
    * @param action either query, prepare or finalize.
    * @return rolling upgrade information. On query, if no upgrade is in

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index fd7f9e0..74efcd2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -146,6 +146,13 @@ public final class HdfsConstants {
     }
   }
 
+  /**
+   * Upgrade actions.
+   */
+  public enum UpgradeAction {
+    QUERY, FINALIZE;
+  }
+
   // type of the datanode report
   public enum DatanodeReportType {
     ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE, IN_MAINTENANCE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 4a22da9..e7ae6fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -186,6 +186,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Trunca
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
@@ -273,6 +275,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
       VOID_FINALIZE_UPGRADE_REQUEST =
       FinalizeUpgradeRequestProto.newBuilder().build();
 
+  private final static UpgradeStatusRequestProto
+      VOID_UPGRADE_STATUS_REQUEST =
+      UpgradeStatusRequestProto.newBuilder().build();
+
   private final static GetDataEncryptionKeyRequestProto
       VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
       GetDataEncryptionKeyRequestProto.newBuilder().build();
@@ -831,6 +837,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
+  public boolean upgradeStatus() throws IOException {
+    try {
+      final UpgradeStatusResponseProto proto = rpcProxy.upgradeStatus(
+          null, VOID_UPGRADE_STATUS_REQUEST);
+      return proto.getUpgradeFinalized();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
       throws IOException {
     final RollingUpgradeRequestProto r = 
RollingUpgradeRequestProto.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index db31e22..0f5ce94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -450,6 +450,13 @@ message FinalizeUpgradeRequestProto { // no parameters
 message FinalizeUpgradeResponseProto { // void response
 }
 
+message UpgradeStatusRequestProto { // no parameters
+}
+
+message UpgradeStatusResponseProto {
+ required bool upgradeFinalized = 1;
+}
+
 enum RollingUpgradeActionProto {
   QUERY = 1;
   START = 2;
@@ -879,6 +886,8 @@ service ClientNamenodeProtocol {
   rpc refreshNodes(RefreshNodesRequestProto) 
returns(RefreshNodesResponseProto);
   rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
       returns(FinalizeUpgradeResponseProto);
+  rpc upgradeStatus(UpgradeStatusRequestProto)
+      returns(UpgradeStatusResponseProto);
   rpc rollingUpgrade(RollingUpgradeRequestProto)
       returns(RollingUpgradeResponseProto);
   rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 6b466b8..716ebee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1482,6 +1482,13 @@ public class RouterRpcServer extends AbstractService
   }
 
   @Override // ClientProtocol
+  public boolean upgradeStatus() throws IOException {
+    String methodName = getMethodName();
+    throw new UnsupportedOperationException(
+        "Operation \"" + methodName + "\" is not supported");
+  }
+
+  @Override // ClientProtocol
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
       throws IOException {
     checkOperation(OperationCategory.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index d68669f..ac46d52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -157,6 +157,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSto
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@@ -900,6 +902,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   }
 
   @Override
+  public UpgradeStatusResponseProto upgradeStatus(
+      RpcController controller, UpgradeStatusRequestProto req)
+      throws ServiceException {
+    try {
+      final boolean isUpgradeFinalized = server.upgradeStatus();
+      UpgradeStatusResponseProto.Builder b =
+          UpgradeStatusResponseProto.newBuilder();
+      b.setUpgradeFinalized(isUpgradeFinalized);
+      return b.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public RollingUpgradeResponseProto rollingUpgrade(RpcController controller,
       RollingUpgradeRequestProto req) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 1b7a636..c5b9d5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1322,6 +1322,12 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   }
 
   @Override // ClientProtocol
+  public boolean upgradeStatus() throws IOException {
+    checkNNStartup();
+    return namesystem.isUpgradeFinalized();
+  }
+
+  @Override // ClientProtocol
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws 
IOException {
     checkNNStartup();
     LOG.info("rollingUpgrade " + action);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 023fea9..f793557 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
@@ -443,6 +444,7 @@ public class DFSAdmin extends FsShell {
     "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
     "\t[-finalizeUpgrade]\n" +
     "\t[" + RollingUpgradeCommand.USAGE +"]\n" +
+    "\t[-upgrade <query | finalize>]\n" +
     "\t[-refreshServiceAcl]\n" +
     "\t[-refreshUserToGroupsMappings]\n" +
     "\t[-refreshSuperUserGroupsConfiguration]\n" +
@@ -1147,6 +1149,11 @@ public class DFSAdmin extends FsShell {
       "\t\tfollowed by Namenode doing the same.\n" + 
       "\t\tThis completes the upgrade process.\n";
 
+    String upgrade = "-upgrade <query | finalize>:\n"
+        + "     query: query the current upgrade status.\n"
+        + "  finalize: finalize the upgrade of HDFS (equivalent to " +
+        "-finalizeUpgrade.";
+
     String metaSave = "-metasave <filename>: \tSave Namenode's primary data 
structures\n" +
       "\t\tto <filename> in the directory specified by hadoop.log.dir 
property.\n" +
       "\t\t<filename> is overwritten if it exists.\n" +
@@ -1278,6 +1285,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(finalizeUpgrade);
     } else if (RollingUpgradeCommand.matches("-"+cmd)) {
       System.out.println(RollingUpgradeCommand.DESCRIPTION);
+    } else if ("upgrade".equals(cmd)) {
+      System.out.println(upgrade);
     } else if ("metasave".equals(cmd)) {
       System.out.println(metaSave);
     } else if (SetQuotaCommand.matches("-"+cmd)) {
@@ -1338,6 +1347,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshNodes);
       System.out.println(finalizeUpgrade);
       System.out.println(RollingUpgradeCommand.DESCRIPTION);
+      System.out.println(upgrade);
       System.out.println(metaSave);
       System.out.println(SetQuotaCommand.DESCRIPTION);
       System.out.println(ClearQuotaCommand.DESCRIPTION);
@@ -1417,6 +1427,83 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
+   * Command to get the upgrade status of each namenode in the nameservice.
+   * Usage: hdfs dfsadmin -upgrade query
+   * @exception IOException
+   */
+  public int getUpgradeStatus() throws IOException {
+    DistributedFileSystem dfs = getDFS();
+
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+
+    boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
+    if (isHaAndLogicalUri) {
+      // In the case of HA and logical URI, run upgrade query for all
+      // NNs in this nameservice.
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+              nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        try {
+          boolean upgradeFinalized = proxy.getProxy().upgradeStatus();
+          if (upgradeFinalized) {
+            System.out.println("Upgrade finalized for " + proxy.getAddress());
+          } else {
+            System.out.println("Upgrade not finalized for " +
+                proxy.getAddress());
+          }
+        } catch (IOException ioe){
+          System.err.println("Getting upgrade status failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if (!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
+      }
+    } else {
+      if (dfs.upgradeStatus()) {
+        System.out.println("Upgrade finalized");
+      } else {
+        System.out.println("Upgrade not finalized");
+      }
+    }
+
+    return 0;
+  }
+
+  /**
+   * Upgrade command to get the status of upgrade or ask NameNode to finalize
+   * the previously performed upgrade.
+   * Usage: hdfs dfsadmin -upgrade [query | finalize]
+   * @exception IOException
+   */
+  public int upgrade(String arg) throws IOException {
+    UpgradeAction action;
+    if ("query".equalsIgnoreCase(arg)) {
+      action = UpgradeAction.QUERY;
+    } else if ("finalize".equalsIgnoreCase(arg)) {
+      action = UpgradeAction.FINALIZE;
+    } else {
+      printUsage("-upgrade");
+      return -1;
+    }
+
+    switch (action) {
+    case QUERY:
+      return getUpgradeStatus();
+    case FINALIZE:
+      return finalizeUpgrade();
+    default:
+      printUsage("-upgrade");
+      return -1;
+    }
+  }
+
+  /**
    * Dumps DFS data structures into specified file.
    * Usage: hdfs dfsadmin -metasave filename
    * @param argv List of of command line parameters.
@@ -1997,6 +2084,9 @@ public class DFSAdmin extends FsShell {
     } else if (RollingUpgradeCommand.matches(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [" + RollingUpgradeCommand.USAGE+"]");
+    } else if ("-upgrade".equals(cmd)) {
+      System.err.println("Usage: hdfs dfsadmin"
+          + " [-upgrade query | finalize]");
     } else if ("-metasave".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [-metasave filename]");
@@ -2146,6 +2236,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-upgrade".equals(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-metasave".equals(cmd)) {
       if (argv.length != 2) {
         printUsage(cmd);
@@ -2263,6 +2358,8 @@ public class DFSAdmin extends FsShell {
         exitCode = finalizeUpgrade();
       } else if (RollingUpgradeCommand.matches(cmd)) {
         exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
+      } else if ("-upgrade".equals(cmd)) {
+        exitCode = upgrade(argv[i]);
       } else if ("-metasave".equals(cmd)) {
         exitCode = metaSave(argv, i);
       } else if (ClearQuotaCommand.matches(cmd)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 63d8b20..9ed69bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -352,6 +352,7 @@ Usage:
         hdfs dfsadmin [-clrSpaceQuota [-storageType <storagetype>] 
<dirname>...<dirname>]
         hdfs dfsadmin [-finalizeUpgrade]
         hdfs dfsadmin [-rollingUpgrade [<query> |<prepare> |<finalize>]]
+        hdfs dfsadmin [-upgrade [query | finalize]
         hdfs dfsadmin [-refreshServiceAcl]
         hdfs dfsadmin [-refreshUserToGroupsMappings]
         hdfs dfsadmin [-refreshSuperUserGroupsConfiguration]
@@ -389,6 +390,7 @@ Usage:
 | `-clrSpaceQuota` `[-storageType <storagetype>]` \<dirname\>...\<dirname\> | 
See [HDFS Quotas 
Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the 
detail. |
 | `-finalizeUpgrade` | Finalize upgrade of HDFS. Datanodes delete their 
previous version working directories, followed by Namenode doing the same. This 
completes the upgrade process. |
 | `-rollingUpgrade` [\<query\>\|\<prepare\>\|\<finalize\>] | See [Rolling 
Upgrade 
document](../hadoop-hdfs/HdfsRollingUpgrade.html#dfsadmin_-rollingUpgrade) for 
the detail. |
+| `-upgrade` query\|finalize | Query the current upgrade status.<br/>Finalize 
upgrade of HDFS (equivalent to -finalizeUpgrade). |
 | `-refreshServiceAcl` | Reload the service-level authorization policy file. |
 | `-refreshUserToGroupsMappings` | Refresh user-to-groups mappings. |
 | `-refreshSuperUserGroupsConfiguration` | Refresh superuser proxy groups 
mappings |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 2ac7767..f32868a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -632,6 +632,8 @@ When moving between versions of HDFS, sometimes the newer 
software can simply be
 
 Note that if at any time you want to restart the NameNodes before finalizing 
or rolling back the upgrade, you should start the NNs as normal, i.e. without 
any special startup flag.
 
+**To query status of upgrade**, the operator will use the `` `hdfs dfsadmin 
-upgrade query' `` command while atleast one of the NNs is running. The command 
will return whether the NN upgrade process is finalized or not, for each NN.
+
 **To finalize an HA upgrade**, the operator will use the `` `hdfs dfsadmin 
-finalizeUpgrade' `` command while the NNs are running and one of them is 
active. The active NN at the time this happens will perform the finalization of 
the shared log, and the NN whose local storage directories contain the previous 
FS state will delete its local state.
 
 **To perform a rollback** of an upgrade, both NNs should first be shut down. 
The operator should run the roll back command on the NN where they initiated 
the upgrade procedure, which will perform the rollback on the local dirs there, 
as well as on the shared log, either NFS or on the JNs. Afterward, this NN 
should be started and the operator should run `` `-bootstrapStandby' `` on the 
other NN to bring the two NNs in sync with this rolled-back file system state.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc6d9d4c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 97daf09..b21084e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -28,7 +28,10 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.junit.After;
 import org.junit.Test;
 
@@ -676,6 +679,70 @@ public class TestDFSAdminWithHA {
     assertOutputMatches(message + newLine);
   }
 
+  @Test (timeout = 300000)
+  public void testUpgradeCommand() throws Exception {
+    final String finalizedMsg = "Upgrade finalized for.*";
+    final String notFinalizedMsg = "Upgrade not finalized for.*";
+    final String failMsg = "Getting upgrade status failed for.*" + newLine +
+        "upgrade: .*";
+    final String finalizeSuccessMsg = "Finalize upgrade successful for.*";
+
+    setUpHaCluster(false);
+    MiniDFSCluster dfsCluster = cluster.getDfsCluster();
+
+    // Before upgrade is initialized, the query should return upgrade
+    // finalized (as no upgrade is in progress)
+    String message = finalizedMsg + newLine + finalizedMsg + newLine;
+    verifyUpgradeQueryOutput(message, 0);
+
+    // Shutdown the NNs
+    dfsCluster.shutdownNameNode(0);
+    dfsCluster.shutdownNameNode(1);
+
+    // Start NN1 with -upgrade option
+    dfsCluster.getNameNodeInfos()[0].setStartOpt(
+        HdfsServerConstants.StartupOption.UPGRADE);
+    dfsCluster.restartNameNode(0, true);
+
+    // Running -upgrade query should return "not finalized" for NN1 and
+    // connection exception for NN2 (as NN2 is down)
+    message = notFinalizedMsg + newLine;
+    verifyUpgradeQueryOutput(message, -1);
+    String errorMsg =  failMsg + newLine;
+    verifyUpgradeQueryOutput(errorMsg, -1);
+
+    // Bootstrap the standby (NN2) with the upgraded info.
+    int rc = BootstrapStandby.run(
+        new String[]{"-force"},
+        dfsCluster.getConfiguration(1));
+    assertEquals(0, rc);
+    out.reset();
+
+    // Restart NN2.
+    dfsCluster.restartNameNode(1);
+
+    // Both NNs should return "not finalized" msg for -upgrade query
+    message = notFinalizedMsg + newLine + notFinalizedMsg + newLine;
+    verifyUpgradeQueryOutput(message, 0);
+
+    // Finalize the upgrade
+    int exitCode = admin.run(new String[] {"-upgrade", "finalize"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    message = finalizeSuccessMsg + newLine + finalizeSuccessMsg + newLine;
+    assertOutputMatches(message);
+
+    // NNs should return "upgrade finalized" msg
+    message = finalizedMsg + newLine + finalizedMsg + newLine;
+    verifyUpgradeQueryOutput(message, 0);
+  }
+
+  private void verifyUpgradeQueryOutput(String message, int expected) throws
+      Exception {
+    int exitCode = admin.run(new String[] {"-upgrade", "query"});
+    assertEquals(err.toString().trim(), expected, exitCode);
+    assertOutputMatches(message);
+  }
+
   @Test (timeout = 30000)
   public void testListOpenFilesNN1UpNN2Down() throws Exception{
     setUpHaCluster(false);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to