HDFS-7080. Fix finalize and upgrade unit test failures. (Arpit Agarwal)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5940dfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5940dfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5940dfe
Branch: refs/heads/branch-2
Commit: d5940dfe4bfd3429c6f5b1a8f5c3e326cdacf9f8
Parents: e4ef002
Author: arp <[email protected]>
Authored: Wed Sep 17 15:25:04 2014 -0700
Committer: Jitendra Pandey <[email protected]>
Committed: Fri Oct 17 13:42:02 2014 -0700
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 5 +++++
.../server/datanode/fsdataset/impl/BlockPoolSlice.java | 10 ++++++++++
.../test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java | 3 +++
.../test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java | 3 +++
4 files changed, 21 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5940dfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f6c0180..f5bba86 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -133,6 +133,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys
{
public static final int
DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS =
"dfs.datanode.ram.disk.low.watermark.replicas";
public static final int
DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT = 3;
+
+ // This setting is for testing/internal use only.
+ public static final String DFS_DATANODE_DUPLICATE_REPLICA_DELETION =
"dfs.datanode.duplicate.replica.deletion";
+ public static final boolean DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT
= true;
+
public static final String
DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT =
"dfs.namenode.path.based.cache.block.map.allocation.percent";
public static final float
DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5940dfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index c370f3d..06d60b1 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -68,6 +68,7 @@ class BlockPoolSlice {
private static final String DU_CACHE_FILE = "dfsUsed";
private volatile boolean dfsUsedSaved = false;
private static final int SHUTDOWN_HOOK_PRIORITY = 30;
+ private final boolean deleteDuplicateReplicas;
// TODO:FEDERATION scalability issue - a thread per DU is needed
private final DU dfsUsage;
@@ -94,6 +95,10 @@ class BlockPoolSlice {
}
}
+ this.deleteDuplicateReplicas = conf.getBoolean(
+ DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
+ DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);
+
// Files that were being written when the datanode was last shutdown
// are now moved back to the data directory. It is possible that
// in the future, we might want to do some sort of datanode-local
@@ -515,6 +520,11 @@ class BlockPoolSlice {
final ReplicaInfo replica1, final ReplicaInfo replica2,
final ReplicaMap volumeMap) throws IOException {
+ if (!deleteDuplicateReplicas) {
+ // Leave both block replicas in place.
+ return replica1;
+ }
+
ReplicaInfo replicaToKeep;
ReplicaInfo replicaToDelete;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5940dfe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
index 01bfb0d..39d3c96 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
@@ -115,9 +115,12 @@ public class TestDFSFinalize {
* the upgrade. Actually it is ok for those contents to change.
* For now disabling block verification so that the contents are
* not changed.
+ * Disable duplicate replica deletion as the test intentionally
+ * mirrors the contents of storage directories.
*/
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5940dfe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 104b043..bb00144 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -229,6 +229,7 @@ public class TestDFSUpgrade {
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
false);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
@@ -370,6 +371,7 @@ public class TestDFSUpgrade {
{
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
@@ -405,6 +407,7 @@ public class TestDFSUpgrade {
int numDirs = 4;
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs =
conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);