[12/19] git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-07 Thread szetszwo
HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/HDFS-6584
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 

git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b35f8160 - 88209ce18


HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/trunk
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 035112f25 - 1ea388355


HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea38835
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea38835
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea38835

Branch: refs/heads/branch-2
Commit: 1ea388355376342e38a2819291a22e83f845a1ef
Parents: 035112f
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Sep 6 12:04:49 2014 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Sat Sep 6 12:04:49 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5b74293..6a44347 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -188,6 +188,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5d23c1f..de02de1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1619,7 +1636,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[4/5] git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread arp
HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/HDFS-6581
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 

[13/16] git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread seanzhong
HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/MR-2841
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko s...@apache.org
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index