[hadoop] branch branch-2.10 updated: HADOOP-16208. Do Not Log InterruptedException in Client.

2020-12-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 16fd00d  HADOOP-16208. Do Not Log InterruptedException in Client.
16fd00d is described below

commit 16fd00d62de5ee42fc2abeafdcb186bbfbdff7af
Author: David Mollitor 
AuthorDate: Thu Apr 4 21:13:09 2019 +0100

HADOOP-16208. Do Not Log InterruptedException in Client.

Contributed by David Mollitor.

(cherry picked from commit c90736350ba158c7872a39426e7a29c5e5e0bb48)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index bcabf00..ed9def1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1456,10 +1456,12 @@ public class Client implements AutoCloseable {
 connection.sendRpcRequest(call); // send the rpc 
request
   } catch (RejectedExecutionException e) {
 throw new IOException("connection has been closed", e);
-  } catch (InterruptedException e) {
+  } catch (InterruptedException ie) {
 Thread.currentThread().interrupt();
-LOG.warn("interrupted waiting to send rpc request to server", e);
-throw new IOException(e);
+IOException ioe = new InterruptedIOException(
+"Interrupted waiting to send RPC request to server");
+ioe.initCause(ie);
+throw ioe;
   }
 } catch(Exception e) {
   if (isAsynchronousMode()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15569. Speed up the Storage#doRecover during datanode rolling upgrade. Contributed by Hemanth Boyina.

2020-12-22 Thread hemanthboyina
This is an automated email from the ASF dual-hosted git repository.

hemanthboyina pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 16a2050  HDFS-15569. Speed up the Storage#doRecover during datanode 
rolling upgrade. Contributed by Hemanth Boyina.
16a2050 is described below

commit 16a20503cacf12c3d8e27ba90820384f58bed06f
Author: hemanthboyina 
AuthorDate: Tue Dec 22 17:27:31 2020 +0530

HDFS-15569. Speed up the Storage#doRecover during datanode rolling upgrade. 
Contributed by Hemanth Boyina.
---
 .../apache/hadoop/hdfs/server/common/Storage.java  | 36 +-
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index ea10f01..83a8256 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -801,8 +801,7 @@ public abstract class Storage extends StorageInfo {
   case RECOVER_UPGRADE:   // mv previous.tmp -> current
 LOG.info("Recovering storage directory {} from previous upgrade",
 rootPath);
-if (curDir.exists())
-  deleteDir(curDir);
+deleteAsync(curDir);
 rename(getPreviousTmp(), curDir);
 return;
   case COMPLETE_ROLLBACK: // rm removed.tmp
@@ -818,21 +817,19 @@ public abstract class Storage extends StorageInfo {
   case COMPLETE_FINALIZE: // rm finalized.tmp
 LOG.info("Completing previous finalize for storage directory {}",
 rootPath);
-deleteDir(getFinalizedTmp());
+deleteAsync(getFinalizedTmp());
 return;
   case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
 LOG.info("Completing previous checkpoint for storage directory {}",
 rootPath);
 File prevCkptDir = getPreviousCheckpoint();
-if (prevCkptDir.exists())
-  deleteDir(prevCkptDir);
+deleteAsync(prevCkptDir);
 rename(getLastCheckpointTmp(), prevCkptDir);
 return;
   case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
 LOG.info("Recovering storage directory {} from failed checkpoint",
 rootPath);
-if (curDir.exists())
-  deleteDir(curDir);
+deleteAsync(curDir);
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
@@ -840,7 +837,30 @@ public abstract class Storage extends StorageInfo {
 + " for storage directory: " + rootPath);
   }
 }
-
+
+/**
+ * Rename the curDir to curDir.tmp and delete the curDir.tmp parallely.
+ * @throws IOException
+ */
+private void deleteAsync(File curDir) throws IOException {
+  if (curDir.exists()) {
+File curTmp = new File(curDir.getParent(), curDir.getName() + ".tmp");
+if (curTmp.exists()) {
+  deleteDir(curTmp);
+}
+rename(curDir, curTmp);
+new Thread("Async Delete Current.tmp") {
+  public void run() {
+try {
+  deleteDir(curTmp);
+} catch (IOException e) {
+  LOG.warn("Deleting storage directory {} failed", curTmp);
+}
+  }
+}.start();
+  }
+}
+
 /**
  * @return true if the storage directory should prompt the user prior
  * to formatting (i.e if the directory appears to contain some data)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org