This is an automated email from the ASF dual-hosted git repository. surendralilhore pushed a commit to branch branch-3.1 in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push: new a3224ff HDFS-15211. EC: File write hangs during close in case of Exception during updatePipeline. Contributed by Ayush Saxena. a3224ff is described below commit a3224ff799b59e75dc50890c4b49b7113e57a53c Author: Surendra Singh Lilhore <surendralilh...@apache.org> AuthorDate: Sun Mar 15 20:44:32 2020 +0530 HDFS-15211. EC: File write hangs during close in case of Exception during updatePipeline. Contributed by Ayush Saxena. --- .../apache/hadoop/hdfs/DFSStripedOutputStream.java | 54 +++++++++++++--------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index c94c9da..8fc7eaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -393,6 +393,7 @@ public class DFSStripedOutputStream extends DFSOutputStream LOG.debug("newly failed streamers: " + newFailed); } if (failCount > (numAllBlocks - numDataBlocks)) { + closeAllStreamers(); throw new IOException("Failed: the number of failed blocks = " + failCount + " > the number of parity blocks = " + (numAllBlocks - numDataBlocks)); @@ -400,6 +401,13 @@ public class DFSStripedOutputStream extends DFSOutputStream return newFailed; } + private void closeAllStreamers() { + // The write has failed, Close all the streamers. + for (StripedDataStreamer streamer : streamers) { + streamer.close(true); + } + } + private void handleCurrentStreamerFailure(String err, Exception e) throws IOException { currentPacket = null; @@ -654,6 +662,8 @@ public class DFSStripedOutputStream extends DFSOutputStream newFailed = waitCreatingStreamers(healthySet); if (newFailed.size() + failedStreamers.size() > numAllBlocks - numDataBlocks) { + // The write has failed, Close all the streamers. + closeAllStreamers(); throw new IOException( "Data streamers failed while creating new block streams: " + newFailed + ". There are not enough healthy streamers."); @@ -1153,32 +1163,32 @@ public class DFSStripedOutputStream extends DFSOutputStream @Override protected synchronized void closeImpl() throws IOException { - if (isClosed()) { - exceptionLastSeen.check(true); - - // Writing to at least {dataUnits} replicas can be considered as success, - // and the rest of data can be recovered. - final int minReplication = ecPolicy.getNumDataUnits(); - int goodStreamers = 0; - final MultipleIOException.Builder b = new MultipleIOException.Builder(); - for (final StripedDataStreamer si : streamers) { - try { - si.getLastException().check(true); - goodStreamers++; - } catch (IOException e) { - b.add(e); + try { + if (isClosed()) { + exceptionLastSeen.check(true); + + // Writing to at least {dataUnits} replicas can be considered as + // success, and the rest of data can be recovered. + final int minReplication = ecPolicy.getNumDataUnits(); + int goodStreamers = 0; + final MultipleIOException.Builder b = new MultipleIOException.Builder(); + for (final StripedDataStreamer si : streamers) { + try { + si.getLastException().check(true); + goodStreamers++; + } catch (IOException e) { + b.add(e); + } } - } - if (goodStreamers < minReplication) { - final IOException ioe = b.build(); - if (ioe != null) { - throw ioe; + if (goodStreamers < minReplication) { + final IOException ioe = b.build(); + if (ioe != null) { + throw ioe; + } } + return; } - return; - } - try { try { // flush from all upper layers flushBuffer(); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org