Github user mridulm commented on a diff in the pull request: https://github.com/apache/spark/pull/1609#discussion_r15541003 --- Diff: core/src/main/scala/org/apache/spark/storage/BlockObjectWriter.scala --- @@ -107,68 +109,296 @@ private[spark] class DiskBlockObjectWriter( private var fos: FileOutputStream = null private var ts: TimeTrackingOutputStream = null private var objOut: SerializationStream = null + + // Did we create this file or was it already present : used in revert to decide + // if we should delete this file or not. Also used to detect if file was deleted + // between creation of BOW and its actual init + private val initiallyExists = file.exists() && file.isFile private val initialPosition = file.length() private var lastValidPosition = initialPosition + private var initialized = false + // closed explicitly ? + private var closed = false + // Attempt to cleanly close ? (could also be closed via revert) + // Note, a cleanly closed file could be subsequently reverted + private var cleanCloseAttempted = false + // Was the file actually opened atleast once. + // Note: initialized/streams change state with close/revert. + private var wasOpenedOnce = false private var _timeWriting = 0L - override def open(): BlockObjectWriter = { - fos = new FileOutputStream(file, true) - ts = new TimeTrackingOutputStream(fos) - channel = fos.getChannel() + // Due to some directory creation race issues in spark, it has been observed that + // sometimes file creation happens 'before' the actual directory has been created + // So we attempt to retry atleast once with a mkdirs in case directory was missing. + private def init() { + init(canRetry = true) + } + + private def init(canRetry: Boolean) { + + if (closed) throw new IOException("Already closed") + + assert (! initialized) + assert (! wasOpenedOnce) + var exists = false + try { + exists = file.exists() + if (! exists && initiallyExists && 0 != initialPosition && ! Utils.inShutdown) { + // Was deleted by cleanup thread ? + throw new IOException("file " + file + " cleaned up ? exists = " + exists + + ", initiallyExists = " + initiallyExists + ", initialPosition = " + initialPosition) + } + fos = new FileOutputStream(file, true) + } catch { + case fEx: FileNotFoundException => + // There seems to be some race in directory creation. + // Attempts to fix it dont seem to have worked : working around the problem for now. + logDebug("Unable to open " + file + ", canRetry = " + canRetry + ", exists = " + exists + + ", initialPosition = " + initialPosition + ", in shutdown = " + Utils.inShutdown(), fEx) + if (canRetry && ! Utils.inShutdown()) { + // try creating the parent directory if that is the issue. + // Since there can be race with others, dont bother checking for + // success/failure - the call to init() will resolve if fos can be created. + file.getParentFile.mkdirs() + // Note, if directory did not exist, then file does not either - and so + // initialPosition would be zero in either case. + init(canRetry = false) + return + } else throw fEx + } + + try { + // This is to workaround case where creation of object and actual init + // (which can happen much later) happens after a delay and the cleanup thread + // cleaned up the file. + channel = fos.getChannel + val fosPos = channel.position() + if (initialPosition != fosPos) { + throw new IOException("file cleaned up ? " + file.exists() + + ", initialpos = " + initialPosition + + "current len = " + fosPos + ", in shutdown ? " + Utils.inShutdown) + } + + ts = new TimeTrackingOutputStream(fos) + val bos = new BufferedOutputStream(ts, bufferSize) + bs = compressStream(bos) + objOut = serializer.newInstance().serializeStream(bs) + initialized = true + wasOpenedOnce = true; + } finally { + if (! initialized) { + // failed, cleanup state. + val tfos = fos + updateCloseState() + tfos.close() + } + } + } + + private def open(): BlockObjectWriter = { + init() lastValidPosition = initialPosition - bs = compressStream(new BufferedOutputStream(ts, bufferSize)) - objOut = serializer.newInstance().serializeStream(bs) - initialized = true this } - override def close() { - if (initialized) { - if (syncWrites) { - // Force outstanding writes to disk and track how long it takes - objOut.flush() + private def updateCloseState() { + + if (ts ne null) _timeWriting += ts.timeWriting + + bs = null + channel = null + fos = null + ts = null + objOut = null + initialized = false + } + + private def flushAll() { + if (closed) throw new IOException("Already closed") + + // NOTE: Because Kryo doesn't flush the underlying stream we explicitly flush both the + // serializer stream and the lower level stream. + if (objOut ne null) { + objOut.flush() + bs.flush() + } + } + + private def closeAll(needFlush: Boolean, needRevert: Boolean) { + + if (null != objOut) { + val truncatePos = if (needRevert) initialPosition else -1L + assert (! this.closed) + + // In case syncWrites is true or we need to truncate + var cleanlyClosed = false + try { + // Flushing if we need to truncate also. Currently, we reopen to truncate + // so this is not strictly required (since close could write further to streams). + // Keeping it around in case that gets relaxed. + if (needFlush || needRevert) flushAll() + val start = System.nanoTime() - fos.getFD.sync() + try { + if (syncWrites) { + // Force outstanding writes to disk and track how long it takes + fos.getFD.sync() + } + } catch { + case sfe: SyncFailedException => // ignore + } + // must cause cascading close. Note, repeated close on closed streams should not cause + // issues : except some libraries do not honour it - hence not explicitly closing bs/fos + objOut.close() + // bs.close() + // fos.close() _timeWriting += System.nanoTime() - start - } - objOut.close() - _timeWriting += ts.timeWriting + // fos MUST have been closed. + assert((channel eq null) || !channel.isOpen) + cleanlyClosed = true + + } finally { --- End diff -- Do revert handling in finally so that irrespective of how flush/close went through, the file is returned in a sane state if we are reverting.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---