Repository: spark Updated Branches: refs/heads/master 767d48076 -> 72eec70bd
[SPARK-17486] Remove unused TaskMetricsUIData.updatedBlockStatuses field The `TaskMetricsUIData.updatedBlockStatuses` field is assigned to but never read, increasing the memory consumption of the web UI. We should remove this field. Author: Josh Rosen <joshro...@databricks.com> Closes #15038 from JoshRosen/remove-updated-block-statuses-from-TaskMetricsUIData. Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/72eec70b Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/72eec70b Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/72eec70b Branch: refs/heads/master Commit: 72eec70bdbf6fb67c977463db5d8d95dd3040ae8 Parents: 767d480 Author: Josh Rosen <joshro...@databricks.com> Authored: Sun Sep 11 21:51:22 2016 -0700 Committer: Shixiong Zhu <shixi...@databricks.com> Committed: Sun Sep 11 21:51:22 2016 -0700 ---------------------------------------------------------------------- core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala | 3 --- 1 file changed, 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/spark/blob/72eec70b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala ---------------------------------------------------------------------- diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala index 66b8812..74bca99 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala @@ -23,7 +23,6 @@ import scala.collection.mutable.{HashMap, LinkedHashMap} import org.apache.spark.JobExecutionStatus import org.apache.spark.executor.{ShuffleReadMetrics, ShuffleWriteMetrics, TaskMetrics} import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo} -import org.apache.spark.storage.{BlockId, BlockStatus} import org.apache.spark.util.AccumulatorContext import org.apache.spark.util.collection.OpenHashSet @@ -145,7 +144,6 @@ private[spark] object UIData { memoryBytesSpilled = m.memoryBytesSpilled, diskBytesSpilled = m.diskBytesSpilled, peakExecutionMemory = m.peakExecutionMemory, - updatedBlockStatuses = m.updatedBlockStatuses.toList, inputMetrics = InputMetricsUIData(m.inputMetrics.bytesRead, m.inputMetrics.recordsRead), outputMetrics = OutputMetricsUIData(m.outputMetrics.bytesWritten, m.outputMetrics.recordsWritten), @@ -193,7 +191,6 @@ private[spark] object UIData { memoryBytesSpilled: Long, diskBytesSpilled: Long, peakExecutionMemory: Long, - updatedBlockStatuses: Seq[(BlockId, BlockStatus)], inputMetrics: InputMetricsUIData, outputMetrics: OutputMetricsUIData, shuffleReadMetrics: ShuffleReadMetricsUIData, --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org