[28/30] hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla .

2015-10-30 Thread jing9
HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a77f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a77f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a77f54

Branch: refs/heads/HDFS-8966
Commit: 19a77f546657b086af8f41fa631099bdde7e010c
Parents: 2d10cb8
Author: Kihwal Lee 
Authored: Wed Oct 28 11:57:56 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 11:58:51 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 47 +---
 3 files changed, 58 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 184b743..7f903b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9302. WebHDFS throws NullPointerException if newLength is not
 provided. (Jagadesh Kiran N via yliu)
 
+HDFS-9297. Decomissioned capacity should not be considered for 
+configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-capacityUsed += node.getDfsUsed();
-blockPoolUsed += node.getBlockPoolUsed();
 xceiverCount += node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed += node.getDfsUsed();
+  blockPoolUsed += node.getBlockPoolUsed();
   nodesInService++;
   nodesInServiceXceiverCount += node.getXceiverCount();
   capacityTotal += node.getCapacity();
   capacityRemaining += node.getRemaining();
-} else {
-  capacityTotal += node.getDfsUsed();
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
 }
-cacheCapacity += node.getCacheCapacity();
-cacheUsed += node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-capacityUsed -= node.getDfsUsed();
-blockPoolUsed -= node.getBlockPoolUsed();
 xceiverCount -= node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed -= node.getDfsUsed();
+  blockPoolUsed -= node.getBlockPoolUsed();
   nodesInService--;
   nodesInServiceXceiverCount -= node.getXceiverCount();
   capacityTotal -= node.getCapacity();
   capacityRemaining -= node.getRemaining();
-} else {
-  capacityTotal -= node.getDfsUsed();
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
 }
-cacheCapacity -= node.getCacheCapacity();
-cacheUsed -= node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.subtractStorage(storageInfo, node);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla . (cherry picked from commit 19a77f546657b086af8f41fa631099bdde7e

2015-10-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7f87dc2d -> 75bc53a86


HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .
(cherry picked from commit 19a77f546657b086af8f41fa631099bdde7e010c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75bc53a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75bc53a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75bc53a8

Branch: refs/heads/branch-2
Commit: 75bc53a86a846b3c528164105b91604a9da9c543
Parents: c7f87dc
Author: Kihwal Lee 
Authored: Wed Oct 28 11:59:36 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 11:59:36 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 47 +---
 3 files changed, 58 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bfba5d4..cdb10f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1344,6 +1344,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9302. WebHDFS throws NullPointerException if newLength is not
 provided. (Jagadesh Kiran N via yliu)
 
+HDFS-9297. Decomissioned capacity should not be considered for 
+configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-capacityUsed += node.getDfsUsed();
-blockPoolUsed += node.getBlockPoolUsed();
 xceiverCount += node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed += node.getDfsUsed();
+  blockPoolUsed += node.getBlockPoolUsed();
   nodesInService++;
   nodesInServiceXceiverCount += node.getXceiverCount();
   capacityTotal += node.getCapacity();
   capacityRemaining += node.getRemaining();
-} else {
-  capacityTotal += node.getDfsUsed();
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
 }
-cacheCapacity += node.getCacheCapacity();
-cacheUsed += node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-capacityUsed -= node.getDfsUsed();
-blockPoolUsed -= node.getBlockPoolUsed();
 xceiverCount -= node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed -= node.getDfsUsed();
+  blockPoolUsed -= node.getBlockPoolUsed();
   nodesInService--;
   nodesInServiceXceiverCount -= node.getXceiverCount();
   capacityTotal -= node.getCapacity();
   capacityRemaining -= node.getRemaining();
-} else {
-  capacityTotal -= node.getDfsUsed();
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
 }
-cacheCapacity -= node.getCacheCapacity();
-cacheUsed -= node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.subtractStorage(storageInfo, node);


hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla .

2015-10-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d10cb8e0 -> 19a77f546


HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a77f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a77f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a77f54

Branch: refs/heads/trunk
Commit: 19a77f546657b086af8f41fa631099bdde7e010c
Parents: 2d10cb8
Author: Kihwal Lee 
Authored: Wed Oct 28 11:57:56 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 11:58:51 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 47 +---
 3 files changed, 58 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 184b743..7f903b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9302. WebHDFS throws NullPointerException if newLength is not
 provided. (Jagadesh Kiran N via yliu)
 
+HDFS-9297. Decomissioned capacity should not be considered for 
+configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-capacityUsed += node.getDfsUsed();
-blockPoolUsed += node.getBlockPoolUsed();
 xceiverCount += node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed += node.getDfsUsed();
+  blockPoolUsed += node.getBlockPoolUsed();
   nodesInService++;
   nodesInServiceXceiverCount += node.getXceiverCount();
   capacityTotal += node.getCapacity();
   capacityRemaining += node.getRemaining();
-} else {
-  capacityTotal += node.getDfsUsed();
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
 }
-cacheCapacity += node.getCacheCapacity();
-cacheUsed += node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-capacityUsed -= node.getDfsUsed();
-blockPoolUsed -= node.getBlockPoolUsed();
 xceiverCount -= node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed -= node.getDfsUsed();
+  blockPoolUsed -= node.getBlockPoolUsed();
   nodesInService--;
   nodesInServiceXceiverCount -= node.getXceiverCount();
   capacityTotal -= node.getCapacity();
   capacityRemaining -= node.getRemaining();
-} else {
-  capacityTotal -= node.getDfsUsed();
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
 }
-cacheCapacity -= node.getCacheCapacity();
-cacheUsed -= node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.subtractStorage(storageInfo, node);