Github user jerryshao commented on a diff in the pull request: https://github.com/apache/spark/pull/19476#discussion_r144763884 --- Diff: core/src/main/scala/org/apache/spark/internal/config/package.scala --- @@ -355,11 +355,21 @@ package object config { .doc("The blocks of a shuffle request will be fetched to disk when size of the request is " + "above this threshold. This is to avoid a giant request takes too much memory. We can " + "enable this config by setting a specific value(e.g. 200m). Note that this config can " + - "be enabled only when the shuffle shuffle service is newer than Spark-2.2 or the shuffle" + + "be enabled only when the shuffle service is newer than Spark-2.2 or the shuffle" + " service is disabled.") .bytesConf(ByteUnit.BYTE) .createWithDefault(Long.MaxValue) + private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM = + ConfigBuilder("spark.maxRemoteBlockSizeFetchToMem") + .doc("Remote block will be fetched to disk when size of the block is " + + "above this threshold. This is to avoid a giant request takes too much memory. We can " + + "enable this config by setting a specific value(e.g. 200m). Note this configuration will " + + "affect both shuffle fetch and block manager remote block fetch. For users who " + + "enabled external shuffle service, this feature can only be worked when external shuffle" + + " service is newer than Spark 2.2.") + .fallbackConf(REDUCER_MAX_REQ_SIZE_SHUFFLE_TO_MEM) --- End diff -- From my understanding of the current code, it will not fallback to the deprecated config if we're using this api `SparkConf#get[T](entry: ConfigEntry[T])`, unless we specifically add `fallbackConf` definition. This is different from `SparkConf#getOption(key: String)`.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org