Github user cloud-fan commented on a diff in the pull request: https://github.com/apache/spark/pull/21029#discussion_r180467422 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2ScanExec.scala --- @@ -95,21 +77,29 @@ case class DataSourceV2ScanExec( sparkContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY), sparkContext.env) .askSync[Unit](SetReaderPartitions(readerFactories.size)) - new ContinuousDataSourceRDD(sparkContext, sqlContext, readerFactories) - .asInstanceOf[RDD[InternalRow]] - - case r: SupportsScanColumnarBatch if r.enableBatchRead() => - new DataSourceRDD(sparkContext, batchReaderFactories).asInstanceOf[RDD[InternalRow]] - + if (readerFactories.exists(_.dataFormat() == DataFormat.COLUMNAR_BATCH)) { + throw new IllegalArgumentException( + "continuous stream reader does not support columnar read yet.") --- End diff -- cc @jose-torres do you know what's missing for this?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org