Repository: spark
Updated Branches:
  refs/heads/master d30b7e669 -> 9a5071996


http://git-wip-us.apache.org/repos/asf/spark/blob/9a507199/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala
index c6d374f..1aee193 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala
@@ -27,7 +27,7 @@ import org.scalatest.BeforeAndAfter
 import org.apache.spark.sql._
 import org.apache.spark.sql.execution.streaming._
 import org.apache.spark.sql.sources.{StreamSinkProvider, StreamSourceProvider}
-import org.apache.spark.sql.streaming.{ContinuousQuery, OutputMode, 
ProcessingTime, StreamTest}
+import org.apache.spark.sql.streaming.{OutputMode, ProcessingTime, 
StreamingQuery, StreamTest}
 import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
 import org.apache.spark.util.Utils
 
@@ -265,7 +265,7 @@ class DataStreamReaderWriterSuite extends StreamTest with 
BeforeAndAfter {
   test("unique query names") {
 
     /** Start a query with a specific name */
-    def startQueryWithName(name: String = ""): ContinuousQuery = {
+    def startQueryWithName(name: String = ""): StreamingQuery = {
       spark.readStream
         .format("org.apache.spark.sql.streaming.test")
         .load("/test")
@@ -277,7 +277,7 @@ class DataStreamReaderWriterSuite extends StreamTest with 
BeforeAndAfter {
     }
 
     /** Start a query without specifying a name */
-    def startQueryWithoutName(): ContinuousQuery = {
+    def startQueryWithoutName(): StreamingQuery = {
       spark.readStream
         .format("org.apache.spark.sql.streaming.test")
         .load("/test")
@@ -434,13 +434,13 @@ class DataStreamReaderWriterSuite extends StreamTest with 
BeforeAndAfter {
       .format("org.apache.spark.sql.streaming.test")
       .load()
 
-    val cq = df.writeStream
+    val sq = df.writeStream
       .format("console")
       .option("checkpointLocation", newMetadataDir)
       .trigger(ProcessingTime(2.seconds))
       .start()
 
-    cq.awaitTermination(2000L)
+    sq.awaitTermination(2000L)
   }
 
   test("prevent all column partitioning") {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to