Github user squito commented on a diff in the pull request: https://github.com/apache/spark/pull/19703#discussion_r150029549 --- Diff: examples/src/main/scala/org/apache/spark/examples/sql/streaming/StructuredKafkaWordCount.scala --- @@ -46,11 +51,13 @@ object StructuredKafkaWordCount { def main(args: Array[String]): Unit = { if (args.length < 3) { System.err.println("Usage: StructuredKafkaWordCount <bootstrap-servers> " + - "<subscribe-type> <topics>") + "<subscribe-type> <topics> [<checkpoint-location>]") System.exit(1) } - val Array(bootstrapServers, subscribeType, topics) = args + val Array(bootstrapServers, subscribeType, topics, _*) = args + val checkpointLocation = + if (args.length > 3) args(3) else "/tmp/temporary-" + UUID.randomUUID.toString --- End diff -- why bother supplying a default? will this be any better than spark's internal default?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org