[ 
https://issues.apache.org/jira/browse/HDDS-1371?focusedWorklogId=225324&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-225324
 ]

ASF GitHub Bot logged work on HDDS-1371:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 09/Apr/19 23:02
            Start Date: 09/Apr/19 23:02
    Worklog Time Spent: 10m 
      Work Description: arp7 commented on pull request #703: HDDS-1371. 
Download RocksDB checkpoint from OM Leader to Follower.
URL: https://github.com/apache/hadoop/pull/703#discussion_r273735579
 
 

 ##########
 File path: 
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
 ##########
 @@ -361,4 +373,149 @@ private static void addFilesToArchive(String source, 
File file,
     }
   }
 
+  /**
+   * If a OM conf is only set with key suffixed with OM Node ID, return the
+   * set value.
+   * @return null if base conf key is set, otherwise the value set for
+   * key suffixed with Node ID.
+   */
+  public static String getConfSuffixedWithOMNodeId(Configuration conf,
+      String confKey, String omNodeId) {
+    String confValue = conf.getTrimmed(confKey);
+    if (StringUtils.isNotEmpty(confValue)) {
+      return null;
+    }
+    String suffixedConfKey = OmUtils.addKeySuffixes(
+        confKey, omNodeId);
+    confValue = conf.getTrimmed(suffixedConfKey);
+    if (StringUtils.isNotEmpty(confValue)) {
+      return confValue;
+    }
+    return null;
+  }
+
+  public static String getHttpAddressForOMPeerNode(Configuration conf,
+      String omNodeId, String omNodeHostAddr) {
+    final Optional<String> bindHost = getHostNameFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omNodeId));
+
+    final Optional<Integer> addressPort = getPortNumberFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omNodeId));
+
+    final Optional<String> addressHost = getHostNameFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omNodeId));
+
+    String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
+
+    return hostName + ":" + 
addressPort.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
+  }
+
+  public static String getHttpsAddressForOMPeerNode(Configuration conf,
+      String omNodeId, String omNodeHostAddr) {
+    final Optional<String> bindHost = getHostNameFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omNodeId));
+
+    final Optional<Integer> addressPort = getPortNumberFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omNodeId));
+
+    final Optional<String> addressHost = getHostNameFromConfigKeys(
+        conf, addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omNodeId));
+
+    String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
+
+    return hostName + ":" + 
addressPort.orElse(OZONE_OM_HTTPS_BIND_PORT_DEFAULT);
+  }
+
+  /**
+   * Get the local directory where ratis logs will be stored.
+   */
+  public static String getOMRatisDirectory(Configuration conf) {
+    String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR);
+
+    if (Strings.isNullOrEmpty(storageDir)) {
+      storageDir = HddsServerUtil.getDefaultRatisDirectory(conf);
+    }
+    return storageDir;
+  }
+
+  public static String getOMRatisSnapshotDirectory(Configuration conf) {
+    String snapshotDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR);
+
+    if (Strings.isNullOrEmpty(snapshotDir)) {
+      snapshotDir = getOMRatisDirectory(conf) + "/snapshot";
+    }
+    return snapshotDir;
+  }
+
+  public static File createOMDir(String dirPath) {
+    File dirFile = new File(dirPath);
+    if (!dirFile.exists() && !dirFile.mkdirs()) {
+      throw new IllegalArgumentException("Unable to create path: " + dirFile);
+    }
+    return dirFile;
+  }
+
+  /**
+   * Untar tar file into destination directory.
+   *
+   * @param tarFile  source tar file
+   * @param destPath destination path to untar to.
+   * @throws IOException ioException
+   */
+  public static void untarCheckpointFile(File tarFile, Path destPath)
+      throws IOException {
+
+    FileInputStream fileInputStream = null;
+    BufferedInputStream buffIn = null;
+    GzipCompressorInputStream gzIn = null;
+    try {
+      fileInputStream = new FileInputStream(tarFile);
+      buffIn = new BufferedInputStream(fileInputStream);
+      gzIn = new GzipCompressorInputStream(buffIn);
+
+      //Create Destination directory if it does not exist.
 
 Review comment:
   Nitpick: Need a space after //.
   
   Also in other places in the same patch.
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 225324)
    Time Spent: 1h  (was: 50m)

> Download RocksDB checkpoint from OM Leader to Follower
> ------------------------------------------------------
>
>                 Key: HDDS-1371
>                 URL: https://issues.apache.org/jira/browse/HDDS-1371
>             Project: Hadoop Distributed Data Store
>          Issue Type: Sub-task
>            Reporter: Hanisha Koneru
>            Assignee: Hanisha Koneru
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> If a follower OM is lagging way behind the leader OM or in case of a restart 
> or bootstrapping, a follower OM might need RocksDB checkpoint from the leader 
> to catch up with it. This is because the leader might have purged its logs 
> after taking a snapshot.
>  This Jira aims to add support to download a RocksDB checkpoint from leader 
> OM to follower OM through a HTTP servlet. We reuse the DBCheckpoint servlet 
> used by Recon server. 



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to