git commit: HDFS-6948. DN rejects blocks if it has older UC block. Contributed by Eric Payne.

2014-09-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6fe5c6b74 - f02d934fe


HDFS-6948. DN rejects blocks if it has older UC block. Contributed by
Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f02d934f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f02d934f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f02d934f

Branch: refs/heads/trunk
Commit: f02d934fedf00f0ce43d6f3f9b06d89ccc6851a5
Parents: 6fe5c6b
Author: Kihwal Lee kih...@apache.org
Authored: Fri Sep 19 08:50:43 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Fri Sep 19 08:50:43 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 14 ---
 .../fsdataset/impl/TestWriteToReplica.java  | 25 +++-
 3 files changed, 38 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 436d2f0..9d76c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
 
 HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
 
+HDFS-6948. DN rejects blocks if it has older UC block
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d28d616..a2179dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1090,9 +1090,17 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
 if (replicaInfo != null) {
-  throw new ReplicaAlreadyExistsException(Block  + b +
-   already exists in state  + replicaInfo.getState() +
-   and thus cannot be created.);
+  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
+   replicaInfo instanceof ReplicaInPipeline) {
+// Stop the previous writer
+((ReplicaInPipeline)replicaInfo)
+  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
+invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
+  } else {
+throw new ReplicaAlreadyExistsException(Block  + b +
+ already exists in state  + replicaInfo.getState() +
+ and thus cannot be created.);
+  }
 }
 
 FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e6a03d2..a870aa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -111,7 +111,7 @@ public class TestWriteToReplica {
   
   // test writeToTemporary
   @Test
-  public void testWriteToTempoary() throws Exception {
+  public void testWriteToTemporary() throws Exception {
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(new 
HdfsConfiguration()).build();
 try {
   cluster.waitActive();
@@ -475,5 +475,28 @@ public class TestWriteToReplica {
 }
 
 dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+
+try {
+  dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+  

git commit: HDFS-6948. DN rejects blocks if it has older UC block. Contributed by Eric Payne. (cherry picked from commit f02d934fedf00f0ce43d6f3f9b06d89ccc6851a5)

2014-09-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9333ee306 - 2a6c9f072


HDFS-6948. DN rejects blocks if it has older UC block. Contributed by
Eric Payne.
(cherry picked from commit f02d934fedf00f0ce43d6f3f9b06d89ccc6851a5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a6c9f07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a6c9f07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a6c9f07

Branch: refs/heads/branch-2
Commit: 2a6c9f0725891f62291c472aeba6b1c3eea21a50
Parents: 9333ee3
Author: Kihwal Lee kih...@apache.org
Authored: Fri Sep 19 08:52:16 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Fri Sep 19 08:52:16 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 14 ---
 .../fsdataset/impl/TestWriteToReplica.java  | 25 +++-
 3 files changed, 38 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a6c9f07/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9bfac36..aeef95a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -227,6 +227,9 @@ Release 2.6.0 - UNRELEASED
 
 HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
 
+HDFS-6948. DN rejects blocks if it has older UC block
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a6c9f07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 1584a96..b87552d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1090,9 +1090,17 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
 if (replicaInfo != null) {
-  throw new ReplicaAlreadyExistsException(Block  + b +
-   already exists in state  + replicaInfo.getState() +
-   and thus cannot be created.);
+  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
+   replicaInfo instanceof ReplicaInPipeline) {
+// Stop the previous writer
+((ReplicaInPipeline)replicaInfo)
+  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
+invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
+  } else {
+throw new ReplicaAlreadyExistsException(Block  + b +
+ already exists in state  + replicaInfo.getState() +
+ and thus cannot be created.);
+  }
 }
 
 FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a6c9f07/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e6a03d2..a870aa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -111,7 +111,7 @@ public class TestWriteToReplica {
   
   // test writeToTemporary
   @Test
-  public void testWriteToTempoary() throws Exception {
+  public void testWriteToTemporary() throws Exception {
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(new 
HdfsConfiguration()).build();
 try {
   cluster.waitActive();
@@ -475,5 +475,28 @@ public class TestWriteToReplica {
 }
 
 dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+
+try {
+  

[1/4] git commit: HDFS-6948. DN rejects blocks if it has older UC block. Contributed by Eric Payne.

2014-09-19 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6581 e4d29fda7 - 222bf0fe6


HDFS-6948. DN rejects blocks if it has older UC block. Contributed by
Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f02d934f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f02d934f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f02d934f

Branch: refs/heads/HDFS-6581
Commit: f02d934fedf00f0ce43d6f3f9b06d89ccc6851a5
Parents: 6fe5c6b
Author: Kihwal Lee kih...@apache.org
Authored: Fri Sep 19 08:50:43 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Fri Sep 19 08:50:43 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 14 ---
 .../fsdataset/impl/TestWriteToReplica.java  | 25 +++-
 3 files changed, 38 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 436d2f0..9d76c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
 
 HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
 
+HDFS-6948. DN rejects blocks if it has older UC block
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d28d616..a2179dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1090,9 +1090,17 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
 if (replicaInfo != null) {
-  throw new ReplicaAlreadyExistsException(Block  + b +
-   already exists in state  + replicaInfo.getState() +
-   and thus cannot be created.);
+  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
+   replicaInfo instanceof ReplicaInPipeline) {
+// Stop the previous writer
+((ReplicaInPipeline)replicaInfo)
+  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
+invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
+  } else {
+throw new ReplicaAlreadyExistsException(Block  + b +
+ already exists in state  + replicaInfo.getState() +
+ and thus cannot be created.);
+  }
 }
 
 FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e6a03d2..a870aa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -111,7 +111,7 @@ public class TestWriteToReplica {
   
   // test writeToTemporary
   @Test
-  public void testWriteToTempoary() throws Exception {
+  public void testWriteToTemporary() throws Exception {
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(new 
HdfsConfiguration()).build();
 try {
   cluster.waitActive();
@@ -475,5 +475,28 @@ public class TestWriteToReplica {
 }
 
 dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+
+try {
+  dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+