Github user vanzin commented on a diff in the pull request: https://github.com/apache/spark/pull/2471#discussion_r18782185 --- Diff: core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala --- @@ -195,22 +241,68 @@ private[history] class FsHistoryProvider(conf: SparkConf) extends ApplicationHis } } - val newIterator = logInfos.iterator.buffered - val oldIterator = applications.values.iterator.buffered - while (newIterator.hasNext && oldIterator.hasNext) { - if (newIterator.head.endTime > oldIterator.head.endTime) { - addIfAbsent(newIterator.next) - } else { - addIfAbsent(oldIterator.next) + applications.synchronized { + val newIterator = logInfos.iterator.buffered + val oldIterator = applications.values.iterator.buffered + while (newIterator.hasNext && oldIterator.hasNext) { + if (newIterator.head.endTime > oldIterator.head.endTime) { + addIfAbsent(newIterator.next) + } else { + addIfAbsent(oldIterator.next) + } } + newIterator.foreach(addIfAbsent) + oldIterator.foreach(addIfAbsent) + + applications = newApps } - newIterator.foreach(addIfAbsent) - oldIterator.foreach(addIfAbsent) + } + } catch { + case t: Throwable => logError("Exception in checking for event log updates", t) + } + } + + /** + * Deleting apps if setting cleaner. + */ + private def cleanLogs() = { + lastLogCleanTimeMs = getMonotonicTimeMs() + logDebug("Cleaning logs. Time is now %d.".format(lastLogCleanTimeMs)) + try { + val logStatus = fs.listStatus(new Path(resolvedLogDir)) + val logDirs = if (logStatus != null) logStatus.filter(_.isDir).toSeq else Seq[FileStatus]() + val maxAge = conf.getLong("spark.history.fs.maxAge.seconds", + DEFAULT_SPARK_HISTORY_FS_MAXAGE_S) * 1000 + + val now = System.currentTimeMillis() + fs.synchronized { + // scan all logs from the log directory. + // Only directories older than this many seconds will be deleted . + logDirs.foreach { dir => + // history file older than this many seconds will be deleted + // when the history cleaner runs. + if (now - getModificationTime(dir) > maxAge) { + fs.delete(dir.getPath, true) --- End diff -- Because the existing `try..catch` means that if you fail to delete a directory, you'll stop trying to delete others. So if a directory in the middle of the list has wrong permissions, you'll never clean up any directory that is more recent than it is (well, depending on the ordering HDFS returns the file list).
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org