http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
index d2e1ea1..2a5295d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
@@ -78,7 +78,7 @@ class PlannerSuite extends SharedSQLContext {
         val schema = StructType(fields)
         val row = Row.fromSeq(Seq.fill(fields.size)(null))
         val rowRDD = sparkContext.parallelize(row :: Nil)
-        spark.createDataFrame(rowRDD, schema).registerTempTable("testLimit")
+        spark.createDataFrame(rowRDD, 
schema).createOrReplaceTempView("testLimit")
 
         val planned = sql(
           """
@@ -132,7 +132,7 @@ class PlannerSuite extends SharedSQLContext {
   test("InMemoryRelation statistics propagation") {
     withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") {
       withTempTable("tiny") {
-        testData.limit(3).registerTempTable("tiny")
+        testData.limit(3).createOrReplaceTempView("tiny")
         sql("CACHE TABLE tiny")
 
         val a = testData.as("a")
@@ -199,9 +199,9 @@ class PlannerSuite extends SharedSQLContext {
 
   test("PartitioningCollection") {
     withTempTable("normal", "small", "tiny") {
-      testData.registerTempTable("normal")
-      testData.limit(10).registerTempTable("small")
-      testData.limit(3).registerTempTable("tiny")
+      testData.createOrReplaceTempView("normal")
+      testData.limit(10).createOrReplaceTempView("small")
+      testData.limit(3).createOrReplaceTempView("tiny")
 
       // Disable broadcast join
       withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
index b31338e..bf3a39c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
@@ -134,7 +134,8 @@ class AggregateBenchmark extends BenchmarkBase {
     val N = 20 << 22
 
     val benchmark = new Benchmark("Aggregate w keys", N)
-    sparkSession.range(N).selectExpr("id", "floor(rand() * 10000) as 
k").registerTempTable("test")
+    sparkSession.range(N).selectExpr("id", "floor(rand() * 10000) as k")
+      .createOrReplaceTempView("test")
 
     def f(): Unit = sparkSession.sql("select k, k, sum(id) from test group by 
k, k").collect()
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
index 2099d4e..e2fb913 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
@@ -42,7 +42,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with 
SharedSQLContext {
   test("default size avoids broadcast") {
     // TODO: Improve this test when we have better statistics
     sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString))
-      .toDF().registerTempTable("sizeTst")
+      .toDF().createOrReplaceTempView("sizeTst")
     spark.catalog.cacheTable("sizeTst")
     assert(
       spark.table("sizeTst").queryExecution.analyzed.statistics.sizeInBytes >
@@ -92,7 +92,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with 
SharedSQLContext {
 
   test("SPARK-2729 regression: timestamp data type") {
     val timestamps = (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time")
-    timestamps.registerTempTable("timestamps")
+    timestamps.createOrReplaceTempView("timestamps")
 
     checkAnswer(
       sql("SELECT time FROM timestamps"),
@@ -133,7 +133,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with 
SharedSQLContext {
 
     assert(df.schema.head.dataType === DecimalType(15, 10))
 
-    df.cache().registerTempTable("test_fixed_decimal")
+    df.cache().createOrReplaceTempView("test_fixed_decimal")
     checkAnswer(
       sql("SELECT * FROM test_fixed_decimal"),
       (1 to 10).map(i => Row(Decimal(i, 15, 10).toJavaBigDecimal)))
@@ -179,7 +179,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with 
SharedSQLContext {
           (i to i + 10).map(j => s"map_key_$j" -> (Long.MaxValue - j)).toMap,
           Row((i - 0.25).toFloat, Seq(true, false, null)))
       }
-    spark.createDataFrame(rdd, 
schema).registerTempTable("InMemoryCache_different_data_types")
+    spark.createDataFrame(rdd, 
schema).createOrReplaceTempView("InMemoryCache_different_data_types")
     // Cache the table.
     sql("cache table InMemoryCache_different_data_types")
     // Make sure the table is indeed cached.

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
index 48c7989..a118cec 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
@@ -63,7 +63,7 @@ class PartitionBatchPruningSuite
       val string = if (((key - 1) / 10) % 2 == 0) null else key.toString
       TestData(key, string)
     }, 5).toDF()
-    pruningData.registerTempTable("pruningData")
+    pruningData.createOrReplaceTempView("pruningData")
     spark.catalog.cacheTable("pruningData")
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 63fe465..46213a2 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -239,7 +239,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
         StructField("nullstr", StringType, true):: Nil)
 
     assert(expectedSchema === jsonDF.schema)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select nullstr, headers.Host from jsonTable"),
@@ -261,7 +261,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -302,7 +302,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     // Access elements of a primitive array.
     checkAnswer(
@@ -376,7 +376,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   test("GetField operation on complex data type") {
     val jsonDF = spark.read.json(complexFieldAndType1)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from 
jsonTable"),
@@ -403,7 +403,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -464,7 +464,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   ignore("Type conflict in primitive field values (Ignored)") {
     val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     // Right now, the analyzer does not promote strings in a boolean 
expression.
     // Number and Boolean conflict: resolve the type as boolean in this query.
@@ -528,7 +528,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -550,7 +550,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -580,7 +580,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
   }
 
   test("Loading a JSON dataset from a text file") {
@@ -601,7 +601,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -633,7 +633,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -674,7 +674,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     // Access elements of a primitive array.
     checkAnswer(
@@ -759,7 +759,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(expectedSchema === jsonDF.schema)
 
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select * from jsonTable"),
@@ -885,7 +885,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(schema === jsonDF1.schema)
 
-    jsonDF1.registerTempTable("jsonTable1")
+    jsonDF1.createOrReplaceTempView("jsonTable1")
 
     checkAnswer(
       sql("select * from jsonTable1"),
@@ -902,7 +902,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     assert(schema === jsonDF2.schema)
 
-    jsonDF2.registerTempTable("jsonTable2")
+    jsonDF2.createOrReplaceTempView("jsonTable2")
 
     checkAnswer(
       sql("select * from jsonTable2"),
@@ -921,7 +921,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
       StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
     val jsonWithSimpleMap = 
spark.read.schema(schemaWithSimpleMap).json(mapType1)
 
-    jsonWithSimpleMap.registerTempTable("jsonWithSimpleMap")
+    jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
 
     checkAnswer(
       sql("select `map` from jsonWithSimpleMap"),
@@ -949,7 +949,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     val jsonWithComplexMap = 
spark.read.schema(schemaWithComplexMap).json(mapType2)
 
-    jsonWithComplexMap.registerTempTable("jsonWithComplexMap")
+    jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
 
     checkAnswer(
       sql("select `map` from jsonWithComplexMap"),
@@ -974,7 +974,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   test("SPARK-2096 Correctly parse dot notations") {
     val jsonDF = spark.read.json(complexFieldAndType2)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from 
jsonTable"),
@@ -992,7 +992,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   test("SPARK-3390 Complex arrays") {
     val jsonDF = spark.read.json(complexFieldAndType2)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql(
@@ -1015,7 +1015,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   test("SPARK-3308 Read top level JSON arrays") {
     val jsonDF = spark.read.json(jsonArray)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     checkAnswer(
       sql(
@@ -1084,7 +1084,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
     withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
       withTempTable("jsonTable") {
         val jsonDF = spark.read.json(corruptRecords)
-        jsonDF.registerTempTable("jsonTable")
+        jsonDF.createOrReplaceTempView("jsonTable")
         val schema = StructType(
           StructField("_unparsed", StringType, true) ::
           StructField("a", StringType, true) ::
@@ -1156,7 +1156,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
   test("SPARK-4068: nulls in arrays") {
     val jsonDF = spark.read.json(nullsInArrays)
-    jsonDF.registerTempTable("jsonTable")
+    jsonDF.createOrReplaceTempView("jsonTable")
 
     val schema = StructType(
       StructField("field1",
@@ -1202,7 +1202,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
     }
 
     val df1 = spark.createDataFrame(rowRDD1, schema1)
-    df1.registerTempTable("applySchema1")
+    df1.createOrReplaceTempView("applySchema1")
     val df2 = df1.toDF
     val result = df2.toJSON.collect()
     // scalastyle:off
@@ -1225,7 +1225,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
     }
 
     val df3 = spark.createDataFrame(rowRDD2, schema2)
-    df3.registerTempTable("applySchema2")
+    df3.createOrReplaceTempView("applySchema2")
     val df4 = df3.toDF
     val result2 = df4.toJSON.collect()
 
@@ -1234,7 +1234,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     val jsonDF = spark.read.json(primitiveFieldAndType)
     val primTable = spark.read.json(jsonDF.toJSON.rdd)
-    primTable.registerTempTable("primitiveTable")
+    primTable.createOrReplaceTempView("primitiveTable")
     checkAnswer(
         sql("select * from primitiveTable"),
       Row(new java.math.BigDecimal("92233720368547758070"),
@@ -1247,7 +1247,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
     val complexJsonDF = spark.read.json(complexFieldAndType1)
     val compTable = spark.read.json(complexJsonDF.toJSON.rdd)
-    compTable.registerTempTable("complexTable")
+    compTable.createOrReplaceTempView("complexTable")
     // Access elements of a primitive array.
     checkAnswer(
       sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from 
complexTable"),
@@ -1387,7 +1387,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
         "col1",
         "abd")
 
-        
spark.read.json(root.getAbsolutePath).registerTempTable("test_myjson_with_part")
+        
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
         checkAnswer(sql(
           "SELECT count(a) FROM test_myjson_with_part where d1 = 1 and 
col1='abc'"), Row(4))
         checkAnswer(sql(
@@ -1531,7 +1531,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
 
         {
           val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
-          jsonDF.registerTempTable("jsonTable")
+          jsonDF.createOrReplaceTempView("jsonTable")
 
           // In HiveContext, backticks should be used to access columns 
starting with a underscore.
           checkAnswer(
@@ -1639,7 +1639,7 @@ class JsonSuite extends QueryTest with SharedSQLContext 
with TestJsonData {
       val schema = (new StructType).add("ts", TimestampType)
       val jsonDF = spark.read.schema(schema).json(timestampAsLong)
 
-      jsonDF.registerTempTable("jsonTable")
+      jsonDF.createOrReplaceTempView("jsonTable")
 
       checkAnswer(
         sql("select ts from jsonTable"),

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
index 8707e13..847ea6b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
@@ -400,7 +400,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with 
ParquetTest with Sha
       // Introduce _temporary dir to the base dir the robustness of the schema 
discovery process.
       new File(base.getCanonicalPath, "_temporary").mkdir()
 
-      spark.read.parquet(base.getCanonicalPath).registerTempTable("t")
+      spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
 
       withTempTable("t") {
         checkAnswer(
@@ -484,7 +484,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with 
ParquetTest with Sha
           makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
       }
 
-      spark.read.parquet(base.getCanonicalPath).registerTempTable("t")
+      spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
 
       withTempTable("t") {
         checkAnswer(
@@ -533,7 +533,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with 
ParquetTest with Sha
       }
 
       val parquetRelation = 
spark.read.format("parquet").load(base.getCanonicalPath)
-      parquetRelation.registerTempTable("t")
+      parquetRelation.createOrReplaceTempView("t")
 
       withTempTable("t") {
         checkAnswer(
@@ -573,7 +573,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with 
ParquetTest with Sha
       }
 
       val parquetRelation = 
spark.read.format("parquet").load(base.getCanonicalPath)
-      parquetRelation.registerTempTable("t")
+      parquetRelation.createOrReplaceTempView("t")
 
       withTempTable("t") {
         checkAnswer(
@@ -609,7 +609,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with 
ParquetTest with Sha
         .option("mergeSchema", "true")
         .format("parquet")
         .load(base.getCanonicalPath)
-        .registerTempTable("t")
+        .createOrReplaceTempView("t")
 
       withTempTable("t") {
         checkAnswer(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
index f9f9f80..725e14c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
@@ -46,7 +46,7 @@ class ParquetQuerySuite extends QueryTest with ParquetTest 
with SharedSQLContext
 
   test("appending") {
     val data = (0 until 10).map(i => (i, i.toString))
-    spark.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+    spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
     // Query appends, don't test with both read modes.
     withParquetTable(data, "t", false) {
       sql("INSERT INTO TABLE t SELECT * FROM tmp")
@@ -58,7 +58,7 @@ class ParquetQuerySuite extends QueryTest with ParquetTest 
with SharedSQLContext
 
   test("overwriting") {
     val data = (0 until 10).map(i => (i, i.toString))
-    spark.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+    spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
     withParquetTable(data, "t") {
       sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
       checkAnswer(spark.table("t"), data.map(Row.fromTuple))

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
index 69a600a..487d7a7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
@@ -75,10 +75,10 @@ object ParquetReadBenchmark {
 
     withTempPath { dir =>
       withTempTable("t1", "tempTable") {
-        spark.range(values).registerTempTable("t1")
+        spark.range(values).createOrReplaceTempView("t1")
         spark.sql("select cast(id as INT) as id from t1")
             .write.parquet(dir.getCanonicalPath)
-        spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+        
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
 
         sqlBenchmark.addCase("SQL Parquet Vectorized") { iter =>
           spark.sql("select sum(id) from tempTable").collect()
@@ -159,10 +159,10 @@ object ParquetReadBenchmark {
   def intStringScanBenchmark(values: Int): Unit = {
     withTempPath { dir =>
       withTempTable("t1", "tempTable") {
-        spark.range(values).registerTempTable("t1")
+        spark.range(values).createOrReplaceTempView("t1")
         spark.sql("select cast(id as INT) as c1, cast(id as STRING) as c2 from 
t1")
             .write.parquet(dir.getCanonicalPath)
-        spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+        
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
 
         val benchmark = new Benchmark("Int and String Scan", values)
 
@@ -193,10 +193,10 @@ object ParquetReadBenchmark {
   def stringDictionaryScanBenchmark(values: Int): Unit = {
     withTempPath { dir =>
       withTempTable("t1", "tempTable") {
-        spark.range(values).registerTempTable("t1")
+        spark.range(values).createOrReplaceTempView("t1")
         spark.sql("select cast((id % 200) + 10000 as STRING) as c1 from t1")
           .write.parquet(dir.getCanonicalPath)
-        spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+        
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
 
         val benchmark = new Benchmark("String Dictionary", values)
 
@@ -225,10 +225,10 @@ object ParquetReadBenchmark {
   def partitionTableScanBenchmark(values: Int): Unit = {
     withTempPath { dir =>
       withTempTable("t1", "tempTable") {
-        spark.range(values).registerTempTable("t1")
+        spark.range(values).createOrReplaceTempView("t1")
         spark.sql("select id % 2 as p, cast(id as INT) as id from t1")
           .write.partitionBy("p").parquet(dir.getCanonicalPath)
-        spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+        
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
 
         val benchmark = new Benchmark("Partitioned Table", values)
 
@@ -260,11 +260,11 @@ object ParquetReadBenchmark {
   def stringWithNullsScanBenchmark(values: Int, fractionOfNulls: Double): Unit 
= {
     withTempPath { dir =>
       withTempTable("t1", "tempTable") {
-        spark.range(values).registerTempTable("t1")
+        spark.range(values).createOrReplaceTempView("t1")
         spark.sql(s"select IF(rand(1) < $fractionOfNulls, NULL, cast(id as 
STRING)) as c1, " +
           s"IF(rand(2) < $fractionOfNulls, NULL, cast(id as STRING)) as c2 
from t1")
           .write.parquet(dir.getCanonicalPath)
-        spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+        
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
 
         val benchmark = new Benchmark("String with Nulls Scan", values)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
index 08b7eb3..228ae6f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
@@ -1187,7 +1187,7 @@ object TPCDSBenchmark {
 
   def setupTables(dataLocation: String): Map[String, Long] = {
     tables.map { tableName =>
-      
spark.read.parquet(s"$dataLocation/$tableName").registerTempTable(tableName)
+      
spark.read.parquet(s"$dataLocation/$tableName").createOrReplaceTempView(tableName)
       tableName -> spark.table(tableName).count()
     }.toMap
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
index 1b82769..08f596f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
@@ -165,7 +165,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SharedSQLContext {
     // Because SortMergeJoin may skip different rows if the number of 
partitions is different, this
     // test should use the deterministic number of partitions.
     val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
-    testDataForJoin.registerTempTable("testDataForJoin")
+    testDataForJoin.createOrReplaceTempView("testDataForJoin")
     withTempTable("testDataForJoin") {
       // Assume the execution plan is
       // ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
@@ -183,7 +183,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SharedSQLContext {
     // Because SortMergeJoin may skip different rows if the number of 
partitions is different,
     // this test should use the deterministic number of partitions.
     val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
-    testDataForJoin.registerTempTable("testDataForJoin")
+    testDataForJoin.createOrReplaceTempView("testDataForJoin")
     withTempTable("testDataForJoin") {
       // Assume the execution plan is
       // ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
@@ -237,7 +237,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SharedSQLContext {
 
   test("BroadcastNestedLoopJoin metrics") {
     val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
-    testDataForJoin.registerTempTable("testDataForJoin")
+    testDataForJoin.createOrReplaceTempView("testDataForJoin")
     withTempTable("testDataForJoin") {
       // Assume the execution plan is
       // ... -> BroadcastNestedLoopJoin(nodeId = 1) -> TungstenProject(nodeId 
= 0)
@@ -265,7 +265,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SharedSQLContext {
 
   test("CartesianProduct metrics") {
     val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
-    testDataForJoin.registerTempTable("testDataForJoin")
+    testDataForJoin.createOrReplaceTempView("testDataForJoin")
     withTempTable("testDataForJoin") {
       // Assume the execution plan is
       // ... -> CartesianProduct(nodeId = 1) -> TungstenProject(nodeId = 0)

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 44d1b9d..9c9abfe 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -441,7 +441,7 @@ class JDBCSuite extends SparkFunSuite
   test("test DATE types in cache") {
     val rows = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new 
Properties).collect()
     spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties)
-      .cache().registerTempTable("mycached_date")
+      .cache().createOrReplaceTempView("mycached_date")
     val cachedRows = sql("select * from mycached_date").collect()
     assert(rows(0).getAs[java.sql.Date](1) === 
java.sql.Date.valueOf("1996-01-01"))
     assert(cachedRows(0).getAs[java.sql.Date](1) === 
java.sql.Date.valueOf("1996-01-01"))

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
index c1dc9b9..03c18ad 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
@@ -34,7 +34,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with 
SharedSQLContext with
     super.beforeAll()
     path = Utils.createTempDir()
     val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str${i}"}"""))
-    caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
+    caseInsensitiveContext.read.json(rdd).createOrReplaceTempView("jt")
   }
 
   override def afterAll(): Unit = {

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
index 5ac39f5..854fec5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
@@ -31,7 +31,7 @@ class InsertSuite extends DataSourceTest with 
SharedSQLContext {
     super.beforeAll()
     path = Utils.createTempDir()
     val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str$i"}"""))
-    caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
+    caseInsensitiveContext.read.json(rdd).createOrReplaceTempView("jt")
     sql(
       s"""
         |CREATE TEMPORARY TABLE jsonTable (a int, b string)
@@ -111,7 +111,7 @@ class InsertSuite extends DataSourceTest with 
SharedSQLContext {
 
     // Writing the table to less part files.
     val rdd1 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str$i"}"""), 5)
-    caseInsensitiveContext.read.json(rdd1).registerTempTable("jt1")
+    caseInsensitiveContext.read.json(rdd1).createOrReplaceTempView("jt1")
     sql(
       s"""
          |INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt1
@@ -123,7 +123,7 @@ class InsertSuite extends DataSourceTest with 
SharedSQLContext {
 
     // Writing the table to more part files.
     val rdd2 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str$i"}"""), 10)
-    caseInsensitiveContext.read.json(rdd2).registerTempTable("jt2")
+    caseInsensitiveContext.read.json(rdd2).createOrReplaceTempView("jt2")
     sql(
       s"""
          |INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt2

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
index bb2c54a..7738e41 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
@@ -42,7 +42,7 @@ class SaveLoadSuite extends DataSourceTest with 
SharedSQLContext with BeforeAndA
 
     val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str${i}"}"""))
     df = caseInsensitiveContext.read.json(rdd)
-    df.registerTempTable("jsonTable")
+    df.createOrReplaceTempView("jsonTable")
   }
 
   override def afterAll(): Unit = {
@@ -123,7 +123,7 @@ class SaveLoadSuite extends DataSourceTest with 
SharedSQLContext with BeforeAndA
     // verify the append mode
     df.write.mode(SaveMode.Append).json(path.toString)
     val df2 = df.union(df)
-    df2.registerTempTable("jsonTable2")
+    df2.createOrReplaceTempView("jsonTable2")
 
     checkLoad(df2, "jsonTable2")
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
index 013b731..b742206 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
@@ -77,7 +77,7 @@ class StreamSuite extends StreamTest with SharedSQLContext {
 
   test("sql queries") {
     val inputData = MemoryStream[Int]
-    inputData.toDF().registerTempTable("stream")
+    inputData.toDF().createOrReplaceTempView("stream")
     val evens = sql("SELECT * FROM stream WHERE value % 2 = 0")
 
     testStream(evens)(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
index 03369c5..421f6bc 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
@@ -41,14 +41,14 @@ private[sql] trait SQLTestData { self =>
   protected lazy val emptyTestData: DataFrame = {
     val df = spark.sparkContext.parallelize(
       Seq.empty[Int].map(i => TestData(i, i.toString))).toDF()
-    df.registerTempTable("emptyTestData")
+    df.createOrReplaceTempView("emptyTestData")
     df
   }
 
   protected lazy val testData: DataFrame = {
     val df = spark.sparkContext.parallelize(
       (1 to 100).map(i => TestData(i, i.toString))).toDF()
-    df.registerTempTable("testData")
+    df.createOrReplaceTempView("testData")
     df
   }
 
@@ -60,7 +60,7 @@ private[sql] trait SQLTestData { self =>
       TestData2(2, 2) ::
       TestData2(3, 1) ::
       TestData2(3, 2) :: Nil, 2).toDF()
-    df.registerTempTable("testData2")
+    df.createOrReplaceTempView("testData2")
     df
   }
 
@@ -68,14 +68,14 @@ private[sql] trait SQLTestData { self =>
     val df = spark.sparkContext.parallelize(
       TestData3(1, None) ::
       TestData3(2, Some(2)) :: Nil).toDF()
-    df.registerTempTable("testData3")
+    df.createOrReplaceTempView("testData3")
     df
   }
 
   protected lazy val negativeData: DataFrame = {
     val df = spark.sparkContext.parallelize(
       (1 to 100).map(i => TestData(-i, (-i).toString))).toDF()
-    df.registerTempTable("negativeData")
+    df.createOrReplaceTempView("negativeData")
     df
   }
 
@@ -87,7 +87,7 @@ private[sql] trait SQLTestData { self =>
       LargeAndSmallInts(2, 2) ::
       LargeAndSmallInts(2147483646, 1) ::
       LargeAndSmallInts(3, 2) :: Nil).toDF()
-    df.registerTempTable("largeAndSmallInts")
+    df.createOrReplaceTempView("largeAndSmallInts")
     df
   }
 
@@ -99,7 +99,7 @@ private[sql] trait SQLTestData { self =>
       DecimalData(2, 2) ::
       DecimalData(3, 1) ::
       DecimalData(3, 2) :: Nil).toDF()
-    df.registerTempTable("decimalData")
+    df.createOrReplaceTempView("decimalData")
     df
   }
 
@@ -110,7 +110,7 @@ private[sql] trait SQLTestData { self =>
       BinaryData("122".getBytes(StandardCharsets.UTF_8), 3) ::
       BinaryData("121".getBytes(StandardCharsets.UTF_8), 2) ::
       BinaryData("123".getBytes(StandardCharsets.UTF_8), 4) :: Nil).toDF()
-    df.registerTempTable("binaryData")
+    df.createOrReplaceTempView("binaryData")
     df
   }
 
@@ -122,7 +122,7 @@ private[sql] trait SQLTestData { self =>
       UpperCaseData(4, "D") ::
       UpperCaseData(5, "E") ::
       UpperCaseData(6, "F") :: Nil).toDF()
-    df.registerTempTable("upperCaseData")
+    df.createOrReplaceTempView("upperCaseData")
     df
   }
 
@@ -132,7 +132,7 @@ private[sql] trait SQLTestData { self =>
       LowerCaseData(2, "b") ::
       LowerCaseData(3, "c") ::
       LowerCaseData(4, "d") :: Nil).toDF()
-    df.registerTempTable("lowerCaseData")
+    df.createOrReplaceTempView("lowerCaseData")
     df
   }
 
@@ -140,7 +140,7 @@ private[sql] trait SQLTestData { self =>
     val rdd = spark.sparkContext.parallelize(
       ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3))) ::
       ArrayData(Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil)
-    rdd.toDF().registerTempTable("arrayData")
+    rdd.toDF().createOrReplaceTempView("arrayData")
     rdd
   }
 
@@ -151,13 +151,13 @@ private[sql] trait SQLTestData { self =>
       MapData(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) ::
       MapData(Map(1 -> "a4", 2 -> "b4")) ::
       MapData(Map(1 -> "a5")) :: Nil)
-    rdd.toDF().registerTempTable("mapData")
+    rdd.toDF().createOrReplaceTempView("mapData")
     rdd
   }
 
   protected lazy val repeatedData: RDD[StringData] = {
     val rdd = spark.sparkContext.parallelize(List.fill(2)(StringData("test")))
-    rdd.toDF().registerTempTable("repeatedData")
+    rdd.toDF().createOrReplaceTempView("repeatedData")
     rdd
   }
 
@@ -165,7 +165,7 @@ private[sql] trait SQLTestData { self =>
     val rdd = spark.sparkContext.parallelize(
       List.fill(2)(StringData(null)) ++
       List.fill(2)(StringData("test")))
-    rdd.toDF().registerTempTable("nullableRepeatedData")
+    rdd.toDF().createOrReplaceTempView("nullableRepeatedData")
     rdd
   }
 
@@ -175,7 +175,7 @@ private[sql] trait SQLTestData { self =>
       NullInts(2) ::
       NullInts(3) ::
       NullInts(null) :: Nil).toDF()
-    df.registerTempTable("nullInts")
+    df.createOrReplaceTempView("nullInts")
     df
   }
 
@@ -185,7 +185,7 @@ private[sql] trait SQLTestData { self =>
       NullInts(null) ::
       NullInts(null) ::
       NullInts(null) :: Nil).toDF()
-    df.registerTempTable("allNulls")
+    df.createOrReplaceTempView("allNulls")
     df
   }
 
@@ -194,13 +194,13 @@ private[sql] trait SQLTestData { self =>
       NullStrings(1, "abc") ::
       NullStrings(2, "ABC") ::
       NullStrings(3, null) :: Nil).toDF()
-    df.registerTempTable("nullStrings")
+    df.createOrReplaceTempView("nullStrings")
     df
   }
 
   protected lazy val tableName: DataFrame = {
     val df = spark.sparkContext.parallelize(TableName("test") :: Nil).toDF()
-    df.registerTempTable("tableName")
+    df.createOrReplaceTempView("tableName")
     df
   }
 
@@ -215,7 +215,7 @@ private[sql] trait SQLTestData { self =>
   // An RDD with 4 elements and 8 partitions
   protected lazy val withEmptyParts: RDD[IntField] = {
     val rdd = spark.sparkContext.parallelize((1 to 4).map(IntField), 8)
-    rdd.toDF().registerTempTable("withEmptyParts")
+    rdd.toDF().createOrReplaceTempView("withEmptyParts")
     rdd
   }
 
@@ -223,7 +223,7 @@ private[sql] trait SQLTestData { self =>
     val df = spark.sparkContext.parallelize(
       Person(0, "mike", 30) ::
       Person(1, "jim", 20) :: Nil).toDF()
-    df.registerTempTable("person")
+    df.createOrReplaceTempView("person")
     df
   }
 
@@ -231,7 +231,7 @@ private[sql] trait SQLTestData { self =>
     val df = spark.sparkContext.parallelize(
       Salary(0, 2000.0) ::
       Salary(1, 1000.0) :: Nil).toDF()
-    df.registerTempTable("salary")
+    df.createOrReplaceTempView("salary")
     df
   }
 
@@ -240,7 +240,7 @@ private[sql] trait SQLTestData { self =>
       ComplexData(Map("1" -> 1), TestData(1, "1"), Seq(1, 1, 1), true) ::
       ComplexData(Map("2" -> 2), TestData(2, "2"), Seq(2, 2, 2), false) ::
       Nil).toDF()
-    df.registerTempTable("complexData")
+    df.createOrReplaceTempView("complexData")
     df
   }
 
@@ -251,7 +251,7 @@ private[sql] trait SQLTestData { self =>
         CourseSales("dotNET", 2012, 5000) ::
         CourseSales("dotNET", 2013, 48000) ::
         CourseSales("Java", 2013, 30000) :: Nil).toDF()
-    df.registerTempTable("courseSales")
+    df.createOrReplaceTempView("courseSales")
     df
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
index 64f2ded..f664d5a 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
@@ -57,7 +57,7 @@ public class JavaDataFrameSuite {
       jsonObjects.add("{\"key\":" + i + ", \"value\":\"str" + i + "\"}");
     }
     df = hc.read().json(sc.parallelize(jsonObjects));
-    df.registerTempTable("window_table");
+    df.createOrReplaceTempView("window_table");
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
index f13c32d..e73117c 100644
--- 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
+++ 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
@@ -85,7 +85,7 @@ public class JavaMetastoreDataSourcesSuite {
     }
     JavaRDD<String> rdd = sc.parallelize(jsonObjects);
     df = sqlContext.read().json(rdd);
-    df.registerTempTable("jsonTable");
+    df.createOrReplaceTempView("jsonTable");
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
index d96eb01..d2cb62c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
@@ -33,8 +33,8 @@ class ErrorPositionSuite extends QueryTest with 
TestHiveSingleton with BeforeAnd
     if (spark.wrapped.tableNames().contains("src")) {
       spark.catalog.dropTempView("src")
     }
-    Seq((1, "")).toDF("key", "value").registerTempTable("src")
-    Seq((1, 1, 1)).toDF("a", "a", "b").registerTempTable("dupAttributes")
+    Seq((1, "")).toDF("key", "value").createOrReplaceTempView("src")
+    Seq((1, 1, 1)).toDF("a", "a", "b").createOrReplaceTempView("dupAttributes")
   }
 
   override protected def afterEach(): Unit = {

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
index b5af758..e2304b5 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
@@ -51,7 +51,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest 
with TestHiveSingleton
   test("Converting Hive to Parquet Table via saveAsParquetFile") {
     withTempPath { dir =>
       sql("SELECT * FROM src").write.parquet(dir.getCanonicalPath)
-      hiveContext.read.parquet(dir.getCanonicalPath).registerTempTable("p")
+      
hiveContext.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("p")
       withTempTable("p") {
         checkAnswer(
           sql("SELECT * FROM src ORDER BY key"),
@@ -65,7 +65,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest 
with TestHiveSingleton
     withParquetTable((1 to 10).map(i => (i, s"val_$i")), "t", false) {
       withTempPath { file =>
         sql("SELECT * FROM t LIMIT 1").write.parquet(file.getCanonicalPath)
-        hiveContext.read.parquet(file.getCanonicalPath).registerTempTable("p")
+        
hiveContext.read.parquet(file.getCanonicalPath).createOrReplaceTempView("p")
         withTempTable("p") {
           // let's do three overwrites for good measure
           sql("INSERT OVERWRITE TABLE p SELECT * FROM t")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
index d05a362..a4bbe96 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
@@ -355,7 +355,7 @@ object TemporaryHiveUDFTest extends Logging {
       """.stripMargin)
     val source =
       hiveContext.createDataFrame((1 to 10).map(i => (i, 
s"str$i"))).toDF("key", "val")
-    source.registerTempTable("sourceTable")
+    source.createOrReplaceTempView("sourceTable")
     // Actually use the loaded UDF.
     logInfo("Using the UDF.")
     val result = hiveContext.sql(
@@ -393,7 +393,7 @@ object PermanentHiveUDFTest1 extends Logging {
       """.stripMargin)
     val source =
       hiveContext.createDataFrame((1 to 10).map(i => (i, 
s"str$i"))).toDF("key", "val")
-    source.registerTempTable("sourceTable")
+    source.createOrReplaceTempView("sourceTable")
     // Actually use the loaded UDF.
     logInfo("Using the UDF.")
     val result = hiveContext.sql(
@@ -429,7 +429,7 @@ object PermanentHiveUDFTest2 extends Logging {
     hiveContext.sessionState.catalog.createFunction(function, ignoreIfExists = 
false)
     val source =
       hiveContext.createDataFrame((1 to 10).map(i => (i, 
s"str$i"))).toDF("key", "val")
-    source.registerTempTable("sourceTable")
+    source.createOrReplaceTempView("sourceTable")
     // Actually use the loaded UDF.
     logInfo("Using the UDF.")
     val result = hiveContext.sql(
@@ -491,7 +491,7 @@ object SparkSubmitClassLoaderTest extends Logging {
       """.stripMargin)
     val source =
       hiveContext.createDataFrame((1 to 10).map(i => (i, 
s"str$i"))).toDF("key", "val")
-    source.registerTempTable("sourceTable")
+    source.createOrReplaceTempView("sourceTable")
     // Load a Hive SerDe from the jar.
     logInfo("Creating a Hive table with a SerDe provided in a jar.")
     hiveContext.sql(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 883cdac..b256845 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -45,8 +45,8 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
     // Since every we are doing tests for DDL statements,
     // it is better to reset before every test.
     hiveContext.reset()
-    // Register the testData, which will be used in every test.
-    testData.registerTempTable("testData")
+    // Creates a temporary view with testData, which will be used in all tests.
+    testData.createOrReplaceTempView("testData")
   }
 
   test("insertInto() HiveTable") {
@@ -98,7 +98,7 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
     val rowRDD = hiveContext.sparkContext.parallelize(
       (1 to 100).map(i => Row(scala.collection.mutable.HashMap(s"key$i" -> 
s"value$i"))))
     val df = hiveContext.createDataFrame(rowRDD, schema)
-    df.registerTempTable("tableWithMapValue")
+    df.createOrReplaceTempView("tableWithMapValue")
     sql("CREATE TABLE hiveTableWithMapValue(m MAP <STRING, STRING>)")
     sql("INSERT OVERWRITE TABLE hiveTableWithMapValue SELECT m FROM 
tableWithMapValue")
 
@@ -171,7 +171,7 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
       StructField("a", ArrayType(StringType, containsNull = false))))
     val rowRDD = hiveContext.sparkContext.parallelize((1 to 100).map(i => 
Row(Seq(s"value$i"))))
     val df = hiveContext.createDataFrame(rowRDD, schema)
-    df.registerTempTable("tableWithArrayValue")
+    df.createOrReplaceTempView("tableWithArrayValue")
     sql("CREATE TABLE hiveTableWithArrayValue(a Array <STRING>)")
     sql("INSERT OVERWRITE TABLE hiveTableWithArrayValue SELECT a FROM 
tableWithArrayValue")
 
@@ -188,7 +188,7 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
     val rowRDD = hiveContext.sparkContext.parallelize(
       (1 to 100).map(i => Row(Map(s"key$i" -> s"value$i"))))
     val df = hiveContext.createDataFrame(rowRDD, schema)
-    df.registerTempTable("tableWithMapValue")
+    df.createOrReplaceTempView("tableWithMapValue")
     sql("CREATE TABLE hiveTableWithMapValue(m Map <STRING, STRING>)")
     sql("INSERT OVERWRITE TABLE hiveTableWithMapValue SELECT m FROM 
tableWithMapValue")
 
@@ -205,7 +205,7 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
     val rowRDD = hiveContext.sparkContext.parallelize(
       (1 to 100).map(i => Row(Row(s"value$i"))))
     val df = hiveContext.createDataFrame(rowRDD, schema)
-    df.registerTempTable("tableWithStructValue")
+    df.createOrReplaceTempView("tableWithStructValue")
     sql("CREATE TABLE hiveTableWithStructValue(s Struct <f: STRING>)")
     sql("INSERT OVERWRITE TABLE hiveTableWithStructValue SELECT s FROM 
tableWithStructValue")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index b507018..00adb9a 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -80,7 +80,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
          """.stripMargin)
 
       withTempTable("expectedJsonTable") {
-        read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+        read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
         checkAnswer(
           sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM jsonTable"),
           sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM 
expectedJsonTable"))
@@ -110,7 +110,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
       assert(expectedSchema === table("jsonTable").schema)
 
       withTempTable("expectedJsonTable") {
-        read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+        read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
         checkAnswer(
           sql("SELECT b, `<d>`.`=` FROM jsonTable"),
           sql("SELECT b, `<d>`.`=` FROM expectedJsonTable"))
@@ -248,7 +248,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
          """.stripMargin)
 
       withTempTable("expectedJsonTable") {
-        read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+        read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
 
         checkAnswer(
           sql("SELECT * FROM jsonTable"),
@@ -554,7 +554,7 @@ class MetastoreDataSourcesSuite extends QueryTest with 
SQLTestUtils with TestHiv
   test("scan a parquet table created through a CTAS statement") {
     withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "true") {
       withTempTable("jt") {
-        (1 to 10).map(i => i -> s"str$i").toDF("a", 
"b").registerTempTable("jt")
+        (1 to 10).map(i => i -> s"str$i").toDF("a", 
"b").createOrReplaceTempView("jt")
 
         withTable("test_parquet_ctas") {
           sql(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
index 3f6418c..ac89bbb 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
@@ -74,7 +74,7 @@ class ParquetHiveCompatibilitySuite extends 
ParquetCompatibilityTest with TestHi
 
             val schema = spark.table("parquet_compat").schema
             val rowRDD = spark.sparkContext.parallelize(rows).coalesce(1)
-            spark.createDataFrame(rowRDD, schema).registerTempTable("data")
+            spark.createDataFrame(rowRDD, 
schema).createOrReplaceTempView("data")
             spark.sql("INSERT INTO TABLE parquet_compat SELECT * FROM data")
           }
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
index 78569c5..cc05e56 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
@@ -32,7 +32,7 @@ class QueryPartitionSuite extends QueryTest with SQLTestUtils 
with TestHiveSingl
     withSQLConf((SQLConf.HIVE_VERIFY_PARTITION_PATH.key, "true")) {
       val testData = sparkContext.parallelize(
         (1 to 10).map(i => TestData(i, i.toString))).toDF()
-      testData.registerTempTable("testData")
+      testData.createOrReplaceTempView("testData")
 
       val tmpDir = Files.createTempDir()
       // create the table for test

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index 8060ef7..7011cd8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -115,7 +115,7 @@ class StatisticsSuite extends QueryTest with 
TestHiveSingleton {
     sql("DROP TABLE analyzeTable_part").collect()
 
     // Try to analyze a temp table
-    sql("""SELECT * FROM src""").registerTempTable("tempTable")
+    sql("""SELECT * FROM src""").createOrReplaceTempView("tempTable")
     intercept[UnsupportedOperationException] {
       hiveContext.sql("ANALYZE TABLE tempTable COMPUTE STATISTICS")
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
index d1aa5aa..d121bcb 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
@@ -53,7 +53,7 @@ class UDFSuite
     sql("USE default")
 
     testDF = (1 to 10).map(i => s"sTr$i").toDF("value")
-    testDF.registerTempTable(testTableName)
+    testDF.createOrReplaceTempView(testTableName)
     expectedDF = (1 to 10).map(i => s"STR$i").toDF("value")
     super.beforeAll()
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
index c97b3f3..a2bae2e 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
@@ -180,7 +180,7 @@ abstract class AggregationQuerySuite extends QueryTest with 
SQLTestUtils with Te
     val emptyDF = spark.createDataFrame(
       sparkContext.emptyRDD[Row],
       StructType(StructField("key", StringType) :: StructField("value", 
IntegerType) :: Nil))
-    emptyDF.registerTempTable("emptyTable")
+    emptyDF.createOrReplaceTempView("emptyTable")
 
     // Register UDAFs
     spark.udf.register("mydoublesum", new MyDoubleSum)
@@ -200,7 +200,7 @@ abstract class AggregationQuerySuite extends QueryTest with 
SQLTestUtils with Te
   }
 
   test("group by function") {
-    Seq((1, 2)).toDF("a", "b").registerTempTable("data")
+    Seq((1, 2)).toDF("a", "b").createOrReplaceTempView("data")
 
     checkAnswer(
       sql("SELECT floor(a) AS a, collect_set(b) FROM data GROUP BY floor(a) 
ORDER BY a"),
@@ -783,7 +783,7 @@ abstract class AggregationQuerySuite extends QueryTest with 
SQLTestUtils with Te
       (5, 8, 17),
       (6, 2, 11)).toDF("a", "b", "c")
 
-    covar_tab.registerTempTable("covar_tab")
+    covar_tab.createOrReplaceTempView("covar_tab")
 
     checkAnswer(
       spark.sql(
@@ -938,7 +938,7 @@ abstract class AggregationQuerySuite extends QueryTest with 
SQLTestUtils with Te
       spark.createDataFrame(
         sparkContext.parallelize(data, 2),
         schema)
-        .registerTempTable("noInputSchemaUDAF")
+        .createOrReplaceTempView("noInputSchemaUDAF")
 
       checkAnswer(
         spark.sql(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
index 17422ca..131b06a 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
@@ -80,7 +80,7 @@ class HiveExplainSuite extends QueryTest with SQLTestUtils 
with TestHiveSingleto
   test("SPARK-6212: The EXPLAIN output of CTAS only shows the analyzed plan") {
     withTempTable("jt") {
       val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str$i"}"""))
-      hiveContext.read.json(rdd).registerTempTable("jt")
+      hiveContext.read.json(rdd).createOrReplaceTempView("jt")
       val outputs = sql(
         s"""
            |EXPLAIN EXTENDED

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
index b252c6e..4d2f190 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
@@ -29,8 +29,8 @@ class HiveOperatorQueryableSuite extends QueryTest with 
TestHiveSingleton {
   test("SPARK-5324 query result of describe command") {
     hiveContext.loadTestTable("src")
 
-    // register a describe command to be a temp table
-    sql("desc src").registerTempTable("mydesc")
+    // Creates a temporary view with the output of a describe command
+    sql("desc src").createOrReplaceTempView("mydesc")
     checkAnswer(
       sql("desc mydesc"),
       Seq(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
index d8d3448..78c0d1f 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
@@ -28,7 +28,7 @@ class HivePlanTest extends QueryTest with TestHiveSingleton {
   import hiveContext.implicits._
 
   test("udf constant folding") {
-    Seq.empty[Tuple1[Int]].toDF("a").registerTempTable("t")
+    Seq.empty[Tuple1[Int]].toDF("a").createOrReplaceTempView("t")
     val optimized = sql("SELECT cos(null) AS c FROM 
t").queryExecution.optimizedPlan
     val correctAnswer = sql("SELECT cast(null as double) AS c FROM 
t").queryExecution.optimizedPlan
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 19f8cb3..2aaaaad 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -685,12 +685,12 @@ class HiveQuerySuite extends HiveComparisonTest with 
BeforeAndAfter {
   createQueryTest("case sensitivity when query Hive table",
     "SELECT srcalias.KEY, SRCALIAS.value FROM sRc SrCAlias WHERE SrCAlias.kEy 
< 15")
 
-  test("case sensitivity: registered table") {
+  test("case sensitivity: created temporary view") {
     val testData =
       TestHive.sparkContext.parallelize(
         TestData(1, "str1") ::
         TestData(2, "str2") :: Nil)
-    testData.toDF().registerTempTable("REGisteredTABle")
+    testData.toDF().createOrReplaceTempView("REGisteredTABle")
 
     assertResult(Array(Row(2, "str2"))) {
       sql("SELECT tablealias.A, TABLEALIAS.b FROM reGisteredTABle TableAlias " 
+
@@ -715,7 +715,7 @@ class HiveQuerySuite extends HiveComparisonTest with 
BeforeAndAfter {
   test("SPARK-2180: HAVING support in GROUP BY clauses (positive)") {
     val fixture = List(("foo", 2), ("bar", 1), ("foo", 4), ("bar", 3))
       .zipWithIndex.map {case ((value, attr), key) => HavingRow(key, value, 
attr)}
-    
TestHive.sparkContext.parallelize(fixture).toDF().registerTempTable("having_test")
+    
TestHive.sparkContext.parallelize(fixture).toDF().createOrReplaceTempView("having_test")
     val results =
       sql("SELECT value, max(attr) AS attr FROM having_test GROUP BY value 
HAVING attr > 3")
       .collect()
@@ -819,12 +819,12 @@ class HiveQuerySuite extends HiveComparisonTest with 
BeforeAndAfter {
         .collect()
     }
 
-    // Describe a registered temporary table.
+    // Describe a temporary view.
     val testData =
       TestHive.sparkContext.parallelize(
         TestData(1, "str1") ::
         TestData(1, "str2") :: Nil)
-    testData.toDF().registerTempTable("test_describe_commands2")
+    testData.toDF().createOrReplaceTempView("test_describe_commands2")
 
     assertResult(
       Array(
@@ -996,9 +996,9 @@ class HiveQuerySuite extends HiveComparisonTest with 
BeforeAndAfter {
     }
   }
 
-  test("SPARK-3414 regression: should store analyzed logical plan when 
registering a temp table") {
-    
sparkContext.makeRDD(Seq.empty[LogEntry]).toDF().registerTempTable("rawLogs")
-    
sparkContext.makeRDD(Seq.empty[LogFile]).toDF().registerTempTable("logFiles")
+  test("SPARK-3414 regression: should store analyzed logical plan when 
creating a temporary view") {
+    
sparkContext.makeRDD(Seq.empty[LogEntry]).toDF().createOrReplaceTempView("rawLogs")
+    
sparkContext.makeRDD(Seq.empty[LogFile]).toDF().createOrReplaceTempView("logFiles")
 
     sql(
       """
@@ -1009,7 +1009,7 @@ class HiveQuerySuite extends HiveComparisonTest with 
BeforeAndAfter {
         FROM logFiles
       ) files
       ON rawLogs.filename = files.name
-      """).registerTempTable("boom")
+      """).createOrReplaceTempView("boom")
 
     // This should be successfully analyzed
     sql("SELECT * FROM boom").queryExecution.analyzed

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
index dd13b83..b2f19d7 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
@@ -32,14 +32,14 @@ class HiveResolutionSuite extends HiveComparisonTest {
 
   test("SPARK-3698: case insensitive test for nested data") {
     read.json(sparkContext.makeRDD(
-      """{"a": [{"a": {"a": 1}}]}""" :: Nil)).registerTempTable("nested")
+      """{"a": [{"a": {"a": 1}}]}""" :: Nil)).createOrReplaceTempView("nested")
     // This should be successfully analyzed
     sql("SELECT a[0].A.A from nested").queryExecution.analyzed
   }
 
   test("SPARK-5278: check ambiguous reference to fields") {
     read.json(sparkContext.makeRDD(
-      """{"a": [{"b": 1, "B": 2}]}""" :: Nil)).registerTempTable("nested")
+      """{"a": [{"b": 1, "B": 2}]}""" :: 
Nil)).createOrReplaceTempView("nested")
 
     // there are 2 filed matching field name "b", we should report Ambiguous 
reference error
     val exception = intercept[AnalysisException] {
@@ -78,7 +78,7 @@ class HiveResolutionSuite extends HiveComparisonTest {
   test("case insensitivity with scala reflection") {
     // Test resolution with Scala Reflection
     sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: 
Nil)
-      .toDF().registerTempTable("caseSensitivityTest")
+      .toDF().createOrReplaceTempView("caseSensitivityTest")
 
     val query = sql("SELECT a, b, A, B, n.a, n.b, n.A, n.B FROM 
caseSensitivityTest")
     assert(query.schema.fields.map(_.name) === Seq("a", "b", "A", "B", "a", 
"b", "A", "B"),
@@ -89,14 +89,14 @@ class HiveResolutionSuite extends HiveComparisonTest {
   ignore("case insensitivity with scala reflection joins") {
     // Test resolution with Scala Reflection
     sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: 
Nil)
-      .toDF().registerTempTable("caseSensitivityTest")
+      .toDF().createOrReplaceTempView("caseSensitivityTest")
 
     sql("SELECT * FROM casesensitivitytest a JOIN casesensitivitytest b ON a.a 
= b.a").collect()
   }
 
   test("nested repeated resolution") {
     sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: 
Nil)
-      .toDF().registerTempTable("nestedRepeatedTest")
+      .toDF().createOrReplaceTempView("nestedRepeatedTest")
     assert(sql("SELECT nestedArray[0].a FROM 
nestedRepeatedTest").collect().head(0) === 1)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
index 8c9c37f..60f8be5 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
@@ -84,7 +84,7 @@ class HiveTableScanSuite extends HiveComparisonTest {
     sql("""insert into table spark_4959 select "hi" from src limit 1""")
     table("spark_4959").select(
       'col1.as("CaseSensitiveColName"),
-      'col1.as("CaseSensitiveColName2")).registerTempTable("spark_4959_2")
+      
'col1.as("CaseSensitiveColName2")).createOrReplaceTempView("spark_4959_2")
 
     assert(sql("select CaseSensitiveColName from spark_4959_2").head() === 
Row("hi"))
     assert(sql("select casesensitivecolname from spark_4959_2").head() === 
Row("hi"))

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index 521964e..23b7f6c 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -153,7 +153,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
   test("UDFIntegerToString") {
     val testData = hiveContext.sparkContext.parallelize(
       IntegerCaseClass(1) :: IntegerCaseClass(2) :: Nil).toDF()
-    testData.registerTempTable("integerTable")
+    testData.createOrReplaceTempView("integerTable")
 
     val udfName = classOf[UDFIntegerToString].getName
     sql(s"CREATE TEMPORARY FUNCTION testUDFIntegerToString AS '$udfName'")
@@ -167,7 +167,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
 
   test("UDFToListString") {
     val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: 
Nil).toDF()
-    testData.registerTempTable("inputTable")
+    testData.createOrReplaceTempView("inputTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFToListString AS 
'${classOf[UDFToListString].getName}'")
     val errMsg = intercept[AnalysisException] {
@@ -182,7 +182,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
 
   test("UDFToListInt") {
     val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: 
Nil).toDF()
-    testData.registerTempTable("inputTable")
+    testData.createOrReplaceTempView("inputTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFToListInt AS 
'${classOf[UDFToListInt].getName}'")
     val errMsg = intercept[AnalysisException] {
@@ -197,7 +197,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
 
   test("UDFToStringIntMap") {
     val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: 
Nil).toDF()
-    testData.registerTempTable("inputTable")
+    testData.createOrReplaceTempView("inputTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFToStringIntMap " +
       s"AS '${classOf[UDFToStringIntMap].getName}'")
@@ -213,7 +213,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
 
   test("UDFToIntIntMap") {
     val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: 
Nil).toDF()
-    testData.registerTempTable("inputTable")
+    testData.createOrReplaceTempView("inputTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFToIntIntMap " +
       s"AS '${classOf[UDFToIntIntMap].getName}'")
@@ -232,7 +232,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
       ListListIntCaseClass(Nil) ::
       ListListIntCaseClass(Seq((1, 2, 3))) ::
       ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) :: Nil).toDF()
-    testData.registerTempTable("listListIntTable")
+    testData.createOrReplaceTempView("listListIntTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFListListInt AS 
'${classOf[UDFListListInt].getName}'")
     checkAnswer(
@@ -247,7 +247,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
     val testData = hiveContext.sparkContext.parallelize(
       ListStringCaseClass(Seq("a", "b", "c")) ::
       ListStringCaseClass(Seq("d", "e")) :: Nil).toDF()
-    testData.registerTempTable("listStringTable")
+    testData.createOrReplaceTempView("listStringTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFListString AS 
'${classOf[UDFListString].getName}'")
     checkAnswer(
@@ -261,7 +261,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
   test("UDFStringString") {
     val testData = hiveContext.sparkContext.parallelize(
       StringCaseClass("world") :: StringCaseClass("goodbye") :: Nil).toDF()
-    testData.registerTempTable("stringTable")
+    testData.createOrReplaceTempView("stringTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testStringStringUDF AS 
'${classOf[UDFStringString].getName}'")
     checkAnswer(
@@ -283,7 +283,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
       ListListIntCaseClass(Seq((1, 2, 3))) ::
       ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) ::
       Nil).toDF()
-    testData.registerTempTable("TwoListTable")
+    testData.createOrReplaceTempView("TwoListTable")
 
     sql(s"CREATE TEMPORARY FUNCTION testUDFTwoListList AS 
'${classOf[UDFTwoListList].getName}'")
     checkAnswer(
@@ -295,7 +295,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
   }
 
   test("Hive UDFs with insufficient number of input arguments should trigger 
an analysis error") {
-    Seq((1, 2)).toDF("a", "b").registerTempTable("testUDF")
+    Seq((1, 2)).toDF("a", "b").createOrReplaceTempView("testUDF")
 
     {
       // HiveSimpleUDF
@@ -352,7 +352,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton 
with SQLTestUtils {
 
   test("Hive UDF in group by") {
     withTempTable("tab1") {
-      Seq(Tuple1(1451400761)).toDF("test_date").registerTempTable("tab1")
+      Seq(Tuple1(1451400761)).toDF("test_date").createOrReplaceTempView("tab1")
       sql(s"CREATE TEMPORARY FUNCTION testUDFToDate AS 
'${classOf[GenericUDFToDate].getName}'")
       val count = sql("select testUDFToDate(cast(test_date as timestamp))" +
         " from tab1 group by testUDFToDate(cast(test_date as 
timestamp))").count()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to