hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk fdce51509 -> 332a61fd7


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/trunk
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e153bc8a -> c5a130370


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit 332a61fd74fd2a9874319232c583ab5d2c53ff03)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5a13037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5a13037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5a13037

Branch: refs/heads/branch-2
Commit: c5a13037048eb1e3b5a500aeec0e2e953e7d509a
Parents: 2e153bc
Author: Kihwal Lee 
Authored: Thu Oct 13 14:55:22 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 14:55:22 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 34 
 1 file changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a13037/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 78b6a20..10e4c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -388,17 +388,12 @@ public class DecommissionManager {
  */
 private final int numBlocksPerCheck;
 /**
-<<< HEAD
  * The maximum number of nodes to check per tick.
  */
 private final int numNodesPerCheck;
 /**
  * The maximum number of nodes to track in decomNodeBlocks. A value of 0
  * means no limit.
-===
- * The maximum number of nodes to track in outOfServiceNodeBlocks.
- * A value of 0 means no limit.
->>> 9dcbdbd... HDFS-9392. Admins support for maintenance state. 
Contributed by Ming Ma.
  */
 private final int maxConcurrentTrackedNodes;
 /**
@@ -406,6 +401,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * testing.
  */
@@ -443,6 +442,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -478,7 +478,8 @@ public class DecommissionManager {
 
   while (it.hasNext()
   && !exceededNumBlocksPerCheck()
-  && !exceededNumNodesPerCheck()) {
+  && !exceededNumNodesPerCheck()
+  && namesystem.isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -608,7 +609,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientlyReplicated == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands

hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7a5aaa789 -> ded91992a


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit c5a13037048eb1e3b5a500aeec0e2e953e7d509a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ded91992
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ded91992
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ded91992

Branch: refs/heads/branch-2.8
Commit: ded91992adc08c5ac5cff00abcb9f05c148d8daa
Parents: 7a5aaa7
Author: Kihwal Lee 
Authored: Thu Oct 13 14:57:44 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 14:57:44 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ded91992/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 073332b..be4771d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -355,6 +355,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * testing.
  */
@@ -392,6 +396,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -426,7 +431,8 @@ public class DecommissionManager {
 
   while (it.hasNext()
   && !exceededNumBlocksPerCheck()
-  && !exceededNumNodesPerCheck()) {
+  && !exceededNumNodesPerCheck()
+  && namesystem.isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -544,7 +550,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientlyReplicated == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed By Brahma Reddy Battula.

2017-05-15 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 3252064ea -> b95d63cda


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed By Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b95d63cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b95d63cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b95d63cd

Branch: refs/heads/branch-2.7
Commit: b95d63cdac4b35b2b47152121b18cde8b3b92eaa
Parents: 3252064
Author: Brahma Reddy Battula 
Authored: Tue May 16 10:50:51 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue May 16 10:50:51 2017 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/DecommissionManager.java| 31 ++--
 2 files changed, 31 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95d63cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index da64e6d..beecb58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -280,6 +280,9 @@ Release 2.7.4 - UNRELEASED
 
HDFS-11674. reserveSpaceForReplicas is not released if append request 
failed
due to mirror down and replica recovered (vinayakumarb)
+   
+HDFS-10987. Make Decommission less expensive when lot of blocks present. 
+(Brahma Reddy Battula)
 
 Release 2.7.3 - 2016-08-25
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95d63cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 437d99a..a4715ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -348,6 +348,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * testing.
  */
@@ -385,6 +389,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -417,9 +422,8 @@ public class DecommissionManager {
   it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext()
-  && !exceededNumBlocksPerCheck()
-  && !exceededNumNodesPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck()
+  && !exceededNumNodesPerCheck() && namesystem.isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -544,7 +548,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientlyReplicated == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  //reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfoContiguous block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the conta

[21/50] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-17 Thread umamahesh
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HDFS-10285
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-18 Thread sjlee
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HADOOP-13070
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/51] [abbrv] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread aengineer
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HDFS-7240
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry>
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org