[SPARK-15171][SQL] Remove the references to deprecated method 
dataset.registerTempTable

## What changes were proposed in this pull request?

Update the unit test code, examples, and documents to remove calls to 
deprecated method `dataset.registerTempTable`.

## How was this patch tested?

This PR only changes the unit test code, examples, and comments. It should be 
safe.
This is a follow up of PR https://github.com/apache/spark/pull/12945 which was 
merged.

Author: Sean Zhong <seanzh...@databricks.com>

Closes #13098 from clockfly/spark-15171-remove-deprecation.

(cherry picked from commit 25b315e6cad7c27b62dcaa2c194293c1115fdfb3)
Signed-off-by: Cheng Lian <l...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5f5270ea
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5f5270ea
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5f5270ea

Branch: refs/heads/branch-2.0
Commit: 5f5270ead86d5294af6c871e36112e2a833e9d64
Parents: 1db3741
Author: Sean Zhong <seanzh...@databricks.com>
Authored: Wed May 18 09:01:59 2016 +0800
Committer: Cheng Lian <l...@databricks.com>
Committed: Wed May 18 09:05:34 2016 +0800

----------------------------------------------------------------------
 docs/sql-programming-guide.md                   |  48 ++++-----
 docs/streaming-programming-guide.md             |  12 +--
 .../apache/spark/examples/sql/JavaSparkSQL.java |   8 +-
 .../streaming/JavaSqlNetworkWordCount.java      |   2 +-
 examples/src/main/python/sql.py                 |   2 +-
 .../python/streaming/sql_network_wordcount.py   |   2 +-
 .../apache/spark/examples/sql/RDDRelation.scala |   6 +-
 .../spark/examples/sql/hive/HiveFromSpark.scala |   4 +-
 .../streaming/SqlNetworkWordCount.scala         |   2 +-
 .../org/apache/spark/ml/JavaPipelineSuite.java  |   2 +-
 .../JavaLogisticRegressionSuite.java            |  10 +-
 .../regression/JavaLinearRegressionSuite.java   |   4 +-
 python/pyspark/sql/context.py                   |   4 +-
 python/pyspark/sql/readwriter.py                |   2 +-
 python/pyspark/sql/session.py                   |   2 +-
 python/pyspark/sql/tests.py                     |  25 ++---
 .../scala/org/apache/spark/sql/SQLContext.scala |   2 +-
 .../apache/spark/sql/JavaApplySchemaSuite.java  |   8 +-
 .../spark/sql/sources/JavaSaveLoadSuite.java    |   2 +-
 .../org/apache/spark/sql/CachedTableSuite.scala |  60 +++++------
 .../spark/sql/ColumnExpressionSuite.scala       |   2 +-
 .../spark/sql/DataFrameTimeWindowingSuite.scala |   2 +-
 .../apache/spark/sql/DataFrameWindowSuite.scala |  22 ++--
 .../scala/org/apache/spark/sql/JoinSuite.scala  |   4 +-
 .../org/apache/spark/sql/ListTablesSuite.scala  |   4 +-
 .../org/apache/spark/sql/SQLContextSuite.scala  |   2 +-
 .../org/apache/spark/sql/SQLQuerySuite.scala    | 103 ++++++++++---------
 .../sql/ScalaReflectionRelationSuite.scala      |  10 +-
 .../org/apache/spark/sql/SubquerySuite.scala    |   8 +-
 .../scala/org/apache/spark/sql/UDFSuite.scala   |  12 +--
 .../apache/spark/sql/UserDefinedTypeSuite.scala |   2 +-
 .../spark/sql/execution/PlannerSuite.scala      |  10 +-
 .../benchmark/AggregateBenchmark.scala          |   3 +-
 .../columnar/InMemoryColumnarQuerySuite.scala   |   8 +-
 .../columnar/PartitionBatchPruningSuite.scala   |   2 +-
 .../execution/datasources/json/JsonSuite.scala  |  58 +++++------
 .../ParquetPartitionDiscoverySuite.scala        |  10 +-
 .../datasources/parquet/ParquetQuerySuite.scala |   4 +-
 .../parquet/ParquetReadBenchmark.scala          |  20 ++--
 .../datasources/parquet/TPCDSBenchmark.scala    |   2 +-
 .../sql/execution/metric/SQLMetricsSuite.scala  |   8 +-
 .../org/apache/spark/sql/jdbc/JDBCSuite.scala   |   2 +-
 .../sql/sources/CreateTableAsSelectSuite.scala  |   2 +-
 .../apache/spark/sql/sources/InsertSuite.scala  |   6 +-
 .../spark/sql/sources/SaveLoadSuite.scala       |   4 +-
 .../spark/sql/streaming/StreamSuite.scala       |   2 +-
 .../org/apache/spark/sql/test/SQLTestData.scala |  46 ++++-----
 .../spark/sql/hive/JavaDataFrameSuite.java      |   2 +-
 .../sql/hive/JavaMetastoreDataSourcesSuite.java |   2 +-
 .../spark/sql/hive/ErrorPositionSuite.scala     |   4 +-
 .../spark/sql/hive/HiveParquetSuite.scala       |   4 +-
 .../spark/sql/hive/HiveSparkSubmitSuite.scala   |   8 +-
 .../sql/hive/InsertIntoHiveTableSuite.scala     |  12 +--
 .../sql/hive/MetastoreDataSourcesSuite.scala    |   8 +-
 .../hive/ParquetHiveCompatibilitySuite.scala    |   2 +-
 .../spark/sql/hive/QueryPartitionSuite.scala    |   2 +-
 .../apache/spark/sql/hive/StatisticsSuite.scala |   2 +-
 .../org/apache/spark/sql/hive/UDFSuite.scala    |   2 +-
 .../hive/execution/AggregationQuerySuite.scala  |   8 +-
 .../sql/hive/execution/HiveExplainSuite.scala   |   2 +-
 .../execution/HiveOperatorQueryableSuite.scala  |   4 +-
 .../spark/sql/hive/execution/HivePlanTest.scala |   2 +-
 .../sql/hive/execution/HiveQuerySuite.scala     |  18 ++--
 .../hive/execution/HiveResolutionSuite.scala    |  10 +-
 .../sql/hive/execution/HiveTableScanSuite.scala |   2 +-
 .../spark/sql/hive/execution/HiveUDFSuite.scala |  22 ++--
 .../sql/hive/execution/SQLQuerySuite.scala      |  66 ++++++------
 .../hive/execution/SQLWindowFunctionSuite.scala |  16 +--
 .../hive/orc/OrcPartitionDiscoverySuite.scala   |   8 +-
 .../spark/sql/hive/orc/OrcQuerySuite.scala      |  12 +--
 .../spark/sql/hive/orc/OrcSourceSuite.scala     |   2 +-
 .../apache/spark/sql/hive/parquetSuites.scala   |   9 +-
 .../sql/sources/HadoopFsRelationTest.scala      |  10 +-
 .../hive/HiveContextCompatibilitySuite.scala    |   4 +-
 74 files changed, 407 insertions(+), 401 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/docs/sql-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index a16a6bb..a9e1f9d 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -529,7 +529,7 @@ case class Person(name: String, age: Int)
 
 // Create an RDD of Person objects and register it as a table.
 val people = 
sc.textFile("examples/src/main/resources/people.txt").map(_.split(",")).map(p 
=> Person(p(0), p(1).trim.toInt)).toDF()
-people.registerTempTable("people")
+people.createOrReplaceTempView("people")
 
 // SQL statements can be run by using the sql methods provided by sqlContext.
 val teenagers = sqlContext.sql("SELECT name, age FROM people WHERE age >= 13 
AND age <= 19")
@@ -605,7 +605,7 @@ JavaRDD<Person> people = 
sc.textFile("examples/src/main/resources/people.txt").m
 
 // Apply a schema to an RDD of JavaBeans and register it as a table.
 DataFrame schemaPeople = sqlContext.createDataFrame(people, Person.class);
-schemaPeople.registerTempTable("people");
+schemaPeople.createOrReplaceTempView("people");
 
 // SQL can be run over RDDs that have been registered as tables.
 DataFrame teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 
AND age <= 19")
@@ -643,7 +643,7 @@ people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
 
 # Infer the schema, and register the DataFrame as a table.
 schemaPeople = sqlContext.createDataFrame(people)
-schemaPeople.registerTempTable("people")
+schemaPeople.createOrReplaceTempView("people")
 
 # SQL can be run over DataFrames that have been registered as a table.
 teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 
19")
@@ -703,8 +703,8 @@ val rowRDD = people.map(_.split(",")).map(p => Row(p(0), 
p(1).trim))
 // Apply the schema to the RDD.
 val peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema)
 
-// Register the DataFrames as a table.
-peopleDataFrame.registerTempTable("people")
+// Creates a temporary view using the DataFrame.
+peopleDataFrame.createOrReplaceTempView("people")
 
 // SQL statements can be run by using the sql methods provided by sqlContext.
 val results = sqlContext.sql("SELECT name FROM people")
@@ -771,10 +771,10 @@ JavaRDD<Row> rowRDD = people.map(
 // Apply the schema to the RDD.
 DataFrame peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema);
 
-// Register the DataFrame as a table.
-peopleDataFrame.registerTempTable("people");
+// Creates a temporary view using the DataFrame.
+peopleDataFrame.createOrReplaceTempView("people");
 
-// SQL can be run over RDDs that have been registered as tables.
+// SQL can be run over a temporary view created using DataFrames.
 DataFrame results = sqlContext.sql("SELECT name FROM people");
 
 // The results of SQL queries are DataFrames and support all the normal RDD 
operations.
@@ -824,8 +824,8 @@ schema = StructType(fields)
 # Apply the schema to the RDD.
 schemaPeople = sqlContext.createDataFrame(people, schema)
 
-# Register the DataFrame as a table.
-schemaPeople.registerTempTable("people")
+# Creates a temporary view using the DataFrame
+schemaPeople.createOrReplaceTempView("people")
 
 # SQL can be run over DataFrames that have been registered as a table.
 results = sqlContext.sql("SELECT name FROM people")
@@ -844,7 +844,7 @@ for name in names.collect():
 # Data Sources
 
 Spark SQL supports operating on a variety of data sources through the 
`DataFrame` interface.
-A DataFrame can be operated on as normal RDDs and can also be registered as a 
temporary table.
+A DataFrame can be operated on as normal RDDs and can also be used to create a 
temporary view.
 Registering a DataFrame as a table allows you to run SQL queries over its 
data. This section
 describes the general methods for loading and saving data using the Spark Data 
Sources and then
 goes into specific options that are available for the built-in data sources.
@@ -1072,8 +1072,8 @@ people.write.parquet("people.parquet")
 // The result of loading a Parquet file is also a DataFrame.
 val parquetFile = sqlContext.read.parquet("people.parquet")
 
-//Parquet files can also be registered as tables and then used in SQL 
statements.
-parquetFile.registerTempTable("parquetFile")
+// Parquet files can also be used to create a temporary view and then used in 
SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile")
 val teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 
AND age <= 19")
 teenagers.map(t => "Name: " + t(0)).collect().foreach(println)
 {% endhighlight %}
@@ -1094,8 +1094,8 @@ schemaPeople.write().parquet("people.parquet");
 // The result of loading a parquet file is also a DataFrame.
 DataFrame parquetFile = sqlContext.read().parquet("people.parquet");
 
-// Parquet files can also be registered as tables and then used in SQL 
statements.
-parquetFile.registerTempTable("parquetFile");
+// Parquet files can also be used to create a temporary view and then used in 
SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile");
 DataFrame teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age 
>= 13 AND age <= 19");
 List<String> teenagerNames = teenagers.javaRDD().map(new Function<Row, 
String>() {
   public String call(Row row) {
@@ -1120,8 +1120,8 @@ schemaPeople.write.parquet("people.parquet")
 # The result of loading a parquet file is also a DataFrame.
 parquetFile = sqlContext.read.parquet("people.parquet")
 
-# Parquet files can also be registered as tables and then used in SQL 
statements.
-parquetFile.registerTempTable("parquetFile");
+# Parquet files can also be used to create a temporary view and then used in 
SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile");
 teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 AND 
age <= 19")
 teenNames = teenagers.map(lambda p: "Name: " + p.name)
 for teenName in teenNames.collect():
@@ -1144,7 +1144,7 @@ write.parquet(schemaPeople, "people.parquet")
 # The result of loading a parquet file is also a DataFrame.
 parquetFile <- read.parquet(sqlContext, "people.parquet")
 
-# Parquet files can also be registered as tables and then used in SQL 
statements.
+# Parquet files can also be used to create a temporary view and then used in 
SQL statements.
 registerTempTable(parquetFile, "parquetFile")
 teenagers <- sql(sqlContext, "SELECT name FROM parquetFile WHERE age >= 13 AND 
age <= 19")
 schema <- structType(structField("name", "string"))
@@ -1506,8 +1506,8 @@ people.printSchema()
 //  |-- age: long (nullable = true)
 //  |-- name: string (nullable = true)
 
-// Register this DataFrame as a table.
-people.registerTempTable("people")
+// Creates a temporary view using the DataFrame
+people.createOrReplaceTempView("people")
 
 // SQL statements can be run by using the sql methods provided by sqlContext.
 val teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND 
age <= 19")
@@ -1544,8 +1544,8 @@ people.printSchema();
 //  |-- age: long (nullable = true)
 //  |-- name: string (nullable = true)
 
-// Register this DataFrame as a table.
-people.registerTempTable("people");
+// Creates a temporary view using the DataFrame
+people.createOrReplaceTempView("people");
 
 // SQL statements can be run by using the sql methods provided by sqlContext.
 DataFrame teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 
AND age <= 19");
@@ -1582,8 +1582,8 @@ people.printSchema()
 #  |-- age: long (nullable = true)
 #  |-- name: string (nullable = true)
 
-# Register this DataFrame as a table.
-people.registerTempTable("people")
+# Creates a temporary view using the DataFrame.
+people.createOrReplaceTempView("people")
 
 # SQL statements can be run by using the sql methods provided by `sqlContext`.
 teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 
19")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/docs/streaming-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/streaming-programming-guide.md 
b/docs/streaming-programming-guide.md
index 9ca9b18..4d0a112 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -1553,8 +1553,8 @@ words.foreachRDD { rdd =>
   // Convert RDD[String] to DataFrame
   val wordsDataFrame = rdd.toDF("word")
 
-  // Register as table
-  wordsDataFrame.registerTempTable("words")
+  // Create a temporary view
+  wordsDataFrame.createOrReplaceTempView("words")
 
   // Do word count on DataFrame using SQL and print it
   val wordCountsDataFrame = 
@@ -1606,8 +1606,8 @@ words.foreachRDD(
       });
       DataFrame wordsDataFrame = sqlContext.createDataFrame(rowRDD, 
JavaRow.class);
 
-      // Register as table
-      wordsDataFrame.registerTempTable("words");
+      // Creates a temporary view using the DataFrame
+      wordsDataFrame.createOrReplaceTempView("words");
 
       // Do word count on table using SQL and print it
       DataFrame wordCountsDataFrame =
@@ -1646,8 +1646,8 @@ def process(time, rdd):
         rowRdd = rdd.map(lambda w: Row(word=w))
         wordsDataFrame = sqlContext.createDataFrame(rowRdd)
 
-        # Register as table
-        wordsDataFrame.registerTempTable("words")
+        # Creates a temporary view using the DataFrame
+        wordsDataFrame.createOrReplaceTempView("words")
 
         # Do word count on table using SQL and print it
         wordCountsDataFrame = sqlContext.sql("select word, count(*) as total 
from words group by word")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java 
b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
index cf0167f..55e591d 100644
--- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
+++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
@@ -73,11 +73,11 @@ public class JavaSparkSQL {
         }
       });
 
-    // Apply a schema to an RDD of Java Beans and register it as a table.
+    // Apply a schema to an RDD of Java Beans and create a temporary view
     Dataset<Row> schemaPeople = spark.createDataFrame(people, Person.class);
     schemaPeople.createOrReplaceTempView("people");
 
-    // SQL can be run over RDDs that have been registered as tables.
+    // SQL can be run over RDDs which backs a temporary view.
     Dataset<Row> teenagers = spark.sql("SELECT name FROM people WHERE age >= 
13 AND age <= 19");
 
     // The results of SQL queries are DataFrames and support all the normal 
RDD operations.
@@ -101,7 +101,7 @@ public class JavaSparkSQL {
     // The result of loading a parquet file is also a DataFrame.
     Dataset<Row> parquetFile = spark.read().parquet("people.parquet");
 
-    //Parquet files can also be registered as tables and then used in SQL 
statements.
+    // A temporary view can be created by using Parquet files and then used in 
SQL statements.
     parquetFile.createOrReplaceTempView("parquetFile");
     Dataset<Row> teenagers2 =
       spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
@@ -130,7 +130,7 @@ public class JavaSparkSQL {
     //  |-- age: IntegerType
     //  |-- name: StringType
 
-    // Register this DataFrame as a table.
+    // Creates a temporary view using the DataFrame
     peopleFromJsonFile.createOrReplaceTempView("people");
 
     // SQL statements can be run by using the sql methods provided by `spark`

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
index 5130522..b8e9e12 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
@@ -94,7 +94,7 @@ public final class JavaSqlNetworkWordCount {
         });
         Dataset<Row> wordsDataFrame = spark.createDataFrame(rowRDD, 
JavaRecord.class);
 
-        // Register as table
+        // Creates a temporary view using the DataFrame
         wordsDataFrame.createOrReplaceTempView("words");
 
         // Do word count on table using SQL and print it

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/python/sql.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql.py b/examples/src/main/python/sql.py
index 2340240..ac72469 100644
--- a/examples/src/main/python/sql.py
+++ b/examples/src/main/python/sql.py
@@ -66,7 +66,7 @@ if __name__ == "__main__":
     #  |-- age: long (nullable = true)
     #  |-- name: string (nullable = true)
 
-    # Register this DataFrame as a temporary table.
+    # Creates a temporary view using the DataFrame.
     people.createOrReplaceTempView("people")
 
     # SQL statements can be run by using the sql methods provided by `spark`

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/python/streaming/sql_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py 
b/examples/src/main/python/streaming/sql_network_wordcount.py
index 25e8215..398ac8d 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -70,7 +70,7 @@ if __name__ == "__main__":
             rowRdd = rdd.map(lambda w: Row(word=w))
             wordsDataFrame = spark.createDataFrame(rowRdd)
 
-            # Register as table
+            # Creates a temporary view using the DataFrame.
             wordsDataFrame.createOrReplaceTempView("words")
 
             # Do word count on table using SQL and print it

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala 
b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index d1bda0f..1b019fb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -35,8 +35,8 @@ object RDDRelation {
     import spark.implicits._
 
     val df = spark.createDataFrame((1 to 100).map(i => Record(i, s"val_$i")))
-    // Any RDD containing case classes can be registered as a table.  The 
schema of the table is
-    // automatically inferred using scala reflection.
+    // Any RDD containing case classes can be used to create a temporary view. 
 The schema of the
+    // view is automatically inferred using scala reflection.
     df.createOrReplaceTempView("records")
 
     // Once tables have been registered, you can run SQL queries over them.
@@ -66,7 +66,7 @@ object RDDRelation {
     // Queries can be run using the DSL on parquet files just like the 
original RDD.
     parquetFile.where($"key" === 
1).select($"value".as("a")).collect().foreach(println)
 
-    // These files can also be registered as tables.
+    // These files can also be used to create a temporary view.
     parquetFile.createOrReplaceTempView("parquetFile")
     spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
 
b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index a15cf5d..7293cb5 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -70,9 +70,9 @@ object HiveFromSpark {
       case Row(key: Int, value: String) => s"Key: $key, Value: $value"
     }
 
-    // You can also register RDDs as temporary tables within a HiveContext.
+    // You can also use RDDs to create temporary views within a HiveContext.
     val rdd = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i")))
-    rdd.toDF().registerTempTable("records")
+    rdd.toDF().createOrReplaceTempView("records")
 
     // Queries can then join RDD data with data stored in Hive.
     println("Result of SELECT *:")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
 
b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 688c5b2..787bbec 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -66,7 +66,7 @@ object SqlNetworkWordCount {
       // Convert RDD[String] to RDD[case class] to DataFrame
       val wordsDataFrame = rdd.map(w => Record(w)).toDF()
 
-      // Register as table
+      // Creates a temporary view using the DataFrame
       wordsDataFrame.createOrReplaceTempView("words")
 
       // Do word count on table using SQL and print it

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
----------------------------------------------------------------------
diff --git a/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java 
b/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
index 46c26e8..a81a36d 100644
--- a/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
@@ -68,7 +68,7 @@ public class JavaPipelineSuite {
     Pipeline pipeline = new Pipeline()
       .setStages(new PipelineStage[]{scaler, lr});
     PipelineModel model = pipeline.fit(dataset);
-    model.transform(dataset).registerTempTable("prediction");
+    model.transform(dataset).createOrReplaceTempView("prediction");
     Dataset<Row> predictions = spark.sql("SELECT label, probability, 
prediction FROM prediction");
     predictions.collectAsList();
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
index 98abca2..b8da04c 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
@@ -54,7 +54,7 @@ public class JavaLogisticRegressionSuite implements 
Serializable {
     List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
     datasetRDD = jsc.parallelize(points, 2);
     dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
-    dataset.registerTempTable("dataset");
+    dataset.createOrReplaceTempView("dataset");
   }
 
   @After
@@ -68,7 +68,7 @@ public class JavaLogisticRegressionSuite implements 
Serializable {
     LogisticRegression lr = new LogisticRegression();
     Assert.assertEquals(lr.getLabelCol(), "label");
     LogisticRegressionModel model = lr.fit(dataset);
-    model.transform(dataset).registerTempTable("prediction");
+    model.transform(dataset).createOrReplaceTempView("prediction");
     Dataset<Row> predictions = spark.sql("SELECT label, probability, 
prediction FROM prediction");
     predictions.collectAsList();
     // Check defaults
@@ -97,14 +97,14 @@ public class JavaLogisticRegressionSuite implements 
Serializable {
 
     // Modify model params, and check that the params worked.
     model.setThreshold(1.0);
-    model.transform(dataset).registerTempTable("predAllZero");
+    model.transform(dataset).createOrReplaceTempView("predAllZero");
     Dataset<Row> predAllZero = spark.sql("SELECT prediction, myProbability 
FROM predAllZero");
     for (Row r : predAllZero.collectAsList()) {
       Assert.assertEquals(0.0, r.getDouble(0), eps);
     }
     // Call transform with params, and check that the params worked.
     model.transform(dataset, model.threshold().w(0.0), 
model.probabilityCol().w("myProb"))
-      .registerTempTable("predNotAllZero");
+      .createOrReplaceTempView("predNotAllZero");
     Dataset<Row> predNotAllZero = spark.sql("SELECT prediction, myProb FROM 
predNotAllZero");
     boolean foundNonZero = false;
     for (Row r : predNotAllZero.collectAsList()) {
@@ -130,7 +130,7 @@ public class JavaLogisticRegressionSuite implements 
Serializable {
     LogisticRegressionModel model = lr.fit(dataset);
     Assert.assertEquals(2, model.numClasses());
 
-    model.transform(dataset).registerTempTable("transformed");
+    model.transform(dataset).createOrReplaceTempView("transformed");
     Dataset<Row> trans1 = spark.sql("SELECT rawPrediction, probability FROM 
transformed");
     for (Row row : trans1.collectAsList()) {
       Vector raw = (Vector) row.get(0);

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
index d3ef5f6..126aa62 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
@@ -50,7 +50,7 @@ public class JavaLinearRegressionSuite implements 
Serializable {
     List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
     datasetRDD = jsc.parallelize(points, 2);
     dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
-    dataset.registerTempTable("dataset");
+    dataset.createOrReplaceTempView("dataset");
   }
 
   @After
@@ -65,7 +65,7 @@ public class JavaLinearRegressionSuite implements 
Serializable {
     assertEquals("label", lr.getLabelCol());
     assertEquals("auto", lr.getSolver());
     LinearRegressionModel model = lr.fit(dataset);
-    model.transform(dataset).registerTempTable("prediction");
+    model.transform(dataset).createOrReplaceTempView("prediction");
     Dataset<Row> predictions = spark.sql("SELECT label, prediction FROM 
prediction");
     predictions.collect();
     // Check defaults

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/python/pyspark/sql/context.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index ca111ae..e8e60c6 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -57,7 +57,7 @@ class SQLContext(object):
         ...     b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
         ...     time=datetime(2014, 8, 1, 14, 1, 5))])
         >>> df = allTypes.toDF()
-        >>> df.registerTempTable("allTypes")
+        >>> df.createOrReplaceTempView("allTypes")
         >>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, 
row.a '
         ...            'from allTypes where b and i > 0').collect()
         [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT 
b)=False, list[1]=2, \
@@ -106,7 +106,7 @@ class SQLContext(object):
     def newSession(self):
         """
         Returns a new SQLContext as new session, that has separate SQLConf,
-        registered temporary tables and UDFs, but shared SparkContext and
+        registered temporary views and UDFs, but shared SparkContext and
         table cache.
         """
         return self.__class__(self._sc, self.sparkSession.newSession())

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/python/pyspark/sql/readwriter.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index c98aef1..8e6bce9 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -266,7 +266,7 @@ class DataFrameReader(object):
         :param tableName: string, name of the table.
 
         >>> df = 
spark.read.parquet('python/test_support/sql/parquet_partitioned')
-        >>> df.registerTempTable('tmpTable')
+        >>> df.createOrReplaceTempView('tmpTable')
         >>> spark.read.table('tmpTable').dtypes
         [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
         """

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/python/pyspark/sql/session.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 0781b44..257a239 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -186,7 +186,7 @@ class SparkSession(object):
     def newSession(self):
         """
         Returns a new SparkSession as new session, that has separate SQLConf,
-        registered temporary tables and UDFs, but shared SparkContext and
+        registered temporary views and UDFs, but shared SparkContext and
         table cache.
         """
         return self.__class__(self._sc, self._jsparkSession.newSession())

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/python/pyspark/sql/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 0977c43..e86f442 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -294,7 +294,8 @@ class SQLTests(ReusedPySparkTestCase):
 
     def test_udf2(self):
         self.spark.catalog.registerFunction("strlen", lambda string: 
len(string), IntegerType())
-        
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")])).registerTempTable("test")
+        self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
+            .createOrReplaceTempView("test")
         [res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 
1").collect()
         self.assertEqual(4, res[0])
 
@@ -320,7 +321,7 @@ class SQLTests(ReusedPySparkTestCase):
     def test_udf_with_array_type(self):
         d = [Row(l=list(range(3)), d={"key": list(range(5))})]
         rdd = self.sc.parallelize(d)
-        self.spark.createDataFrame(rdd).registerTempTable("test")
+        self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
         self.spark.catalog.registerFunction("copylist", lambda l: list(l), 
ArrayType(IntegerType()))
         self.spark.catalog.registerFunction("maplen", lambda d: len(d), 
IntegerType())
         [(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from 
test").collect()
@@ -360,7 +361,7 @@ class SQLTests(ReusedPySparkTestCase):
         self.assertTrue(df.is_cached)
         self.assertEqual(2, df.count())
 
-        df.registerTempTable("temp")
+        df.createOrReplaceTempView("temp")
         df = self.spark.sql("select foo from temp")
         df.count()
         df.collect()
@@ -420,7 +421,7 @@ class SQLTests(ReusedPySparkTestCase):
         df = self.spark.createDataFrame(rdd)
         self.assertEqual([], df.rdd.map(lambda r: r.l).first())
         self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
-        df.registerTempTable("test")
+        df.createOrReplaceTempView("test")
         result = self.spark.sql("SELECT l[0].a from test where d['key'].d = 
'2'")
         self.assertEqual(1, result.head()[0])
 
@@ -428,7 +429,7 @@ class SQLTests(ReusedPySparkTestCase):
         self.assertEqual(df.schema, df2.schema)
         self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
         self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
-        df2.registerTempTable("test2")
+        df2.createOrReplaceTempView("test2")
         result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = 
'2'")
         self.assertEqual(1, result.head()[0])
 
@@ -487,7 +488,7 @@ class SQLTests(ReusedPySparkTestCase):
              datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
         self.assertEqual(r, results.first())
 
-        df.registerTempTable("table2")
+        df.createOrReplaceTempView("table2")
         r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
                            "short1 + 1 AS short1, short2 - 1 AS short2, int1 - 
1 AS int1, " +
                            "float1 + 1.5 as float1 FROM table2").first()
@@ -515,7 +516,7 @@ class SQLTests(ReusedPySparkTestCase):
         row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
         self.assertEqual(1, row.asDict()['l'][0].a)
         df = self.sc.parallelize([row]).toDF()
-        df.registerTempTable("test")
+        df.createOrReplaceTempView("test")
         row = self.spark.sql("select l, d from test").head()
         self.assertEqual(1, row.asDict()["l"][0].a)
         self.assertEqual(1.0, row.asDict()['d']['key'].c)
@@ -556,7 +557,7 @@ class SQLTests(ReusedPySparkTestCase):
         schema = df.schema
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), ExamplePointUDT)
-        df.registerTempTable("labeled_point")
+        df.createOrReplaceTempView("labeled_point")
         point = self.spark.sql("SELECT point FROM labeled_point").head().point
         self.assertEqual(point, ExamplePoint(1.0, 2.0))
 
@@ -565,7 +566,7 @@ class SQLTests(ReusedPySparkTestCase):
         schema = df.schema
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), PythonOnlyUDT)
-        df.registerTempTable("labeled_point")
+        df.createOrReplaceTempView("labeled_point")
         point = self.spark.sql("SELECT point FROM labeled_point").head().point
         self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
 
@@ -1427,7 +1428,7 @@ class SQLTests(ReusedPySparkTestCase):
         spark.sql("CREATE DATABASE some_db")
         self.assertEquals(spark.catalog.listTables(), [])
         self.assertEquals(spark.catalog.listTables("some_db"), [])
-        spark.createDataFrame([(1, 1)]).registerTempTable("temp_tab")
+        spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
         spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
         spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT)")
         tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
@@ -1554,8 +1555,8 @@ class SQLTests(ReusedPySparkTestCase):
 
     def test_cache(self):
         spark = self.spark
-        spark.createDataFrame([(2, 2), (3, 3)]).registerTempTable("tab1")
-        spark.createDataFrame([(2, 2), (3, 3)]).registerTempTable("tab2")
+        spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
+        spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
         self.assertFalse(spark.catalog.isCached("tab1"))
         self.assertFalse(spark.catalog.isCached("tab2"))
         spark.catalog.cacheTable("tab1")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 4451188..a3e2b49 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -376,7 +376,7 @@ class SQLContext private[sql](
    *  // |-- name: string (nullable = false)
    *  // |-- age: integer (nullable = true)
    *
-   *  dataFrame.registerTempTable("people")
+   *  dataFrame.createOrReplaceTempView("people")
    *  sqlContext.sql("select name from people").collect.foreach(println)
    * }}}
    *

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
index f2ae40e..573d0e3 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
@@ -108,7 +108,7 @@ public class JavaApplySchemaSuite implements Serializable {
     StructType schema = DataTypes.createStructType(fields);
 
     Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
-    df.registerTempTable("people");
+    df.createOrReplaceTempView("people");
     List<Row> actual = spark.sql("SELECT * FROM people").collectAsList();
 
     List<Row> expected = new ArrayList<>(2);
@@ -144,7 +144,7 @@ public class JavaApplySchemaSuite implements Serializable {
     StructType schema = DataTypes.createStructType(fields);
 
     Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
-    df.registerTempTable("people");
+    df.createOrReplaceTempView("people");
     List<String> actual = spark.sql("SELECT * FROM people").toJavaRDD()
       .map(new Function<Row, String>() {
         @Override
@@ -202,14 +202,14 @@ public class JavaApplySchemaSuite implements Serializable 
{
     Dataset<Row> df1 = spark.read().json(jsonRDD);
     StructType actualSchema1 = df1.schema();
     Assert.assertEquals(expectedSchema, actualSchema1);
-    df1.registerTempTable("jsonTable1");
+    df1.createOrReplaceTempView("jsonTable1");
     List<Row> actual1 = spark.sql("select * from jsonTable1").collectAsList();
     Assert.assertEquals(expectedResult, actual1);
 
     Dataset<Row> df2 = spark.read().schema(expectedSchema).json(jsonRDD);
     StructType actualSchema2 = df2.schema();
     Assert.assertEquals(expectedSchema, actualSchema2);
-    df2.registerTempTable("jsonTable2");
+    df2.createOrReplaceTempView("jsonTable2");
     List<Row> actual2 = spark.sql("select * from jsonTable2").collectAsList();
     Assert.assertEquals(expectedResult, actual2);
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
 
b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
index d0435e4..9840bc4 100644
--- 
a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
+++ 
b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
@@ -72,7 +72,7 @@ public class JavaSaveLoadSuite {
     }
     JavaRDD<String> rdd = jsc.parallelize(jsonObjects);
     df = spark.read().json(rdd);
-    df.registerTempTable("jsonTable");
+    df.createOrReplaceTempView("jsonTable");
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index 6d8de80..1c96bdc 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -71,7 +71,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
   }
 
   test("cache temp table") {
-    testData.select('key).registerTempTable("tempTable")
+    testData.select('key).createOrReplaceTempView("tempTable")
     assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
     spark.catalog.cacheTable("tempTable")
     assertCached(sql("SELECT COUNT(*) FROM tempTable"))
@@ -99,8 +99,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
   }
 
   test("uncaching temp table") {
-    testData.select('key).registerTempTable("tempTable1")
-    testData.select('key).registerTempTable("tempTable2")
+    testData.select('key).createOrReplaceTempView("tempTable1")
+    testData.select('key).createOrReplaceTempView("tempTable2")
     spark.catalog.cacheTable("tempTable1")
 
     assertCached(sql("SELECT COUNT(*) FROM tempTable1"))
@@ -116,7 +116,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
   test("too big for memory") {
     val data = "*" * 1000
     sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF()
-      .registerTempTable("bigData")
+      .createOrReplaceTempView("bigData")
     spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK)
     assert(spark.table("bigData").count() === 200000L)
     spark.table("bigData").unpersist(blocking = true)
@@ -191,7 +191,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
   }
 
   test("SELECT star from cached table") {
-    sql("SELECT * FROM testData").registerTempTable("selectStar")
+    sql("SELECT * FROM testData").createOrReplaceTempView("selectStar")
     spark.catalog.cacheTable("selectStar")
     checkAnswer(
       sql("SELECT * FROM selectStar WHERE key = 1"),
@@ -286,15 +286,15 @@ class CachedTableSuite extends QueryTest with 
SQLTestUtils with SharedSQLContext
   }
 
   test("Drops temporary table") {
-    testData.select('key).registerTempTable("t1")
+    testData.select('key).createOrReplaceTempView("t1")
     spark.table("t1")
     spark.catalog.dropTempView("t1")
     intercept[AnalysisException](spark.table("t1"))
   }
 
   test("Drops cached temporary table") {
-    testData.select('key).registerTempTable("t1")
-    testData.select('key).registerTempTable("t2")
+    testData.select('key).createOrReplaceTempView("t1")
+    testData.select('key).createOrReplaceTempView("t2")
     spark.catalog.cacheTable("t1")
 
     assert(spark.catalog.isCached("t1"))
@@ -306,15 +306,15 @@ class CachedTableSuite extends QueryTest with 
SQLTestUtils with SharedSQLContext
   }
 
   test("Clear all cache") {
-    sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
-    sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+    sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+    sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
     spark.catalog.cacheTable("t1")
     spark.catalog.cacheTable("t2")
     spark.catalog.clearCache()
     assert(spark.cacheManager.isEmpty)
 
-    sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
-    sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+    sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+    sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
     spark.catalog.cacheTable("t1")
     spark.catalog.cacheTable("t2")
     sql("Clear CACHE")
@@ -322,8 +322,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
   }
 
   test("Clear accumulators when uncacheTable to prevent memory leaking") {
-    sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
-    sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+    sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+    sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
 
     spark.catalog.cacheTable("t1")
     spark.catalog.cacheTable("t2")
@@ -350,7 +350,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
 
   test("SPARK-10327 Cache Table is not working while subquery has alias in its 
project list") {
     sparkContext.parallelize((1, 1) :: (2, 2) :: Nil)
-      .toDF("key", "value").selectExpr("key", "value", 
"key+1").registerTempTable("abc")
+      .toDF("key", "value").selectExpr("key", "value", 
"key+1").createOrReplaceTempView("abc")
     spark.catalog.cacheTable("abc")
 
     val sparkPlan = sql(
@@ -371,9 +371,9 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
 
   test("A cached table preserves the partitioning and ordering of its cached 
SparkPlan") {
     val table3x = testData.union(testData).union(testData)
-    table3x.registerTempTable("testData3x")
+    table3x.createOrReplaceTempView("testData3x")
 
-    sql("SELECT key, value FROM testData3x ORDER BY 
key").registerTempTable("orderedTable")
+    sql("SELECT key, value FROM testData3x ORDER BY 
key").createOrReplaceTempView("orderedTable")
     spark.catalog.cacheTable("orderedTable")
     assertCached(spark.table("orderedTable"))
     // Should not have an exchange as the query is already sorted on the group 
by key.
@@ -388,8 +388,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
     // different number of partitions.
     for (numPartitions <- 1 until 10 by 4) {
       withTempTable("t1", "t2") {
-        testData.repartition(numPartitions, $"key").registerTempTable("t1")
-        testData2.repartition(numPartitions, $"a").registerTempTable("t2")
+        testData.repartition(numPartitions, 
$"key").createOrReplaceTempView("t1")
+        testData2.repartition(numPartitions, 
$"a").createOrReplaceTempView("t2")
         spark.catalog.cacheTable("t1")
         spark.catalog.cacheTable("t2")
 
@@ -410,8 +410,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
 
     // Distribute the tables into non-matching number of partitions. Need to 
shuffle one side.
     withTempTable("t1", "t2") {
-      testData.repartition(6, $"key").registerTempTable("t1")
-      testData2.repartition(3, $"a").registerTempTable("t2")
+      testData.repartition(6, $"key").createOrReplaceTempView("t1")
+      testData2.repartition(3, $"a").createOrReplaceTempView("t2")
       spark.catalog.cacheTable("t1")
       spark.catalog.cacheTable("t2")
 
@@ -427,8 +427,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
 
     // One side of join is not partitioned in the desired way. Need to shuffle 
one side.
     withTempTable("t1", "t2") {
-      testData.repartition(6, $"value").registerTempTable("t1")
-      testData2.repartition(6, $"a").registerTempTable("t2")
+      testData.repartition(6, $"value").createOrReplaceTempView("t1")
+      testData2.repartition(6, $"a").createOrReplaceTempView("t2")
       spark.catalog.cacheTable("t1")
       spark.catalog.cacheTable("t2")
 
@@ -443,8 +443,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
     }
 
     withTempTable("t1", "t2") {
-      testData.repartition(6, $"value").registerTempTable("t1")
-      testData2.repartition(12, $"a").registerTempTable("t2")
+      testData.repartition(6, $"value").createOrReplaceTempView("t1")
+      testData2.repartition(12, $"a").createOrReplaceTempView("t2")
       spark.catalog.cacheTable("t1")
       spark.catalog.cacheTable("t2")
 
@@ -462,8 +462,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
     // the side that has already partitioned is smaller than the side that is 
not partitioned,
     // we shuffle both side.
     withTempTable("t1", "t2") {
-      testData.repartition(6, $"value").registerTempTable("t1")
-      testData2.repartition(3, $"a").registerTempTable("t2")
+      testData.repartition(6, $"value").createOrReplaceTempView("t1")
+      testData2.repartition(3, $"a").createOrReplaceTempView("t2")
       spark.catalog.cacheTable("t1")
       spark.catalog.cacheTable("t2")
 
@@ -479,7 +479,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
     // repartition's column ordering is different from group by column 
ordering.
     // But they use the same set of columns.
     withTempTable("t1") {
-      testData.repartition(6, $"value", $"key").registerTempTable("t1")
+      testData.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
       spark.catalog.cacheTable("t1")
 
       val query = sql("SELECT value, key from t1 group by key, value")
@@ -496,9 +496,9 @@ class CachedTableSuite extends QueryTest with SQLTestUtils 
with SharedSQLContext
     // See PartitioningSuite for more details.
     withTempTable("t1", "t2") {
       val df1 = testData
-      df1.repartition(6, $"value", $"key").registerTempTable("t1")
+      df1.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
       val df2 = testData2.select($"a", $"b".cast("string"))
-      df2.repartition(6, $"a", $"b").registerTempTable("t2")
+      df2.repartition(6, $"a", $"b").createOrReplaceTempView("t2")
       spark.catalog.cacheTable("t1")
       spark.catalog.cacheTable("t2")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
index a5aecca..e89fa32 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
@@ -321,7 +321,7 @@ class ColumnExpressionSuite extends QueryTest with 
SharedSQLContext {
         nanvl($"b", $"e"), nanvl($"e", $"f")),
       Row(null, 3.0, 10.0, null, Double.PositiveInfinity, 3.0, 1.0)
     )
-    testData.registerTempTable("t")
+    testData.createOrReplaceTempView("t")
     checkAnswer(
       sql(
         "select nanvl(a, 5), nanvl(b, 10), nanvl(10, b), nanvl(c, null), 
nanvl(d, 10), " +

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
index 4ee2006..a15b4e1 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
@@ -245,7 +245,7 @@ class DataFrameTimeWindowingSuite extends QueryTest with 
SharedSQLContext with B
     Seq(
       ("2016-03-27 19:39:34", 1),
       ("2016-03-27 19:39:56", 2),
-      ("2016-03-27 19:39:27", 4)).toDF("time", 
"value").registerTempTable(tableName)
+      ("2016-03-27 19:39:27", 4)).toDF("time", 
"value").createOrReplaceTempView(tableName)
     try {
       f(tableName)
     } finally {

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
index 91095af..07aad3c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
@@ -49,7 +49,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("lead") {
     val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
 
     checkAnswer(
       df.select(
@@ -59,7 +59,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("lag") {
     val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
 
     checkAnswer(
       df.select(
@@ -70,7 +70,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
   test("lead with default value") {
     val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
                  (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         lead("value", 2, 
"n/a").over(Window.partitionBy("key").orderBy("value"))),
@@ -80,7 +80,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
   test("lag with default value") {
     val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
                  (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         lag("value", 2, 
"n/a").over(Window.partitionBy($"key").orderBy($"value"))),
@@ -89,7 +89,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("rank functions in unspecific window") {
     val df = Seq((1, "1"), (2, "2"), (1, "2"), (2, "2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         $"key",
@@ -112,7 +112,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and rows between") {
     val df = Seq((1, "1"), (2, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", 
"value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         
avg("key").over(Window.partitionBy($"value").orderBy($"key").rowsBetween(-1, 
2))),
@@ -121,7 +121,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and range between") {
     val df = Seq((1, "1"), (1, "1"), (3, "1"), (2, "2"), (2, "1"), (2, 
"2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         
avg("key").over(Window.partitionBy($"value").orderBy($"key").rangeBetween(-1, 
1))),
@@ -131,7 +131,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and rows between with unbounded") {
     val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, 
"3")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         $"key",
@@ -146,7 +146,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and range between with unbounded") {
     val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, 
"2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     checkAnswer(
       df.select(
         $"key",
@@ -357,7 +357,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and rows between with unbounded + predicate pushdown") {
     val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, 
"3")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     val selectList = Seq($"key", $"value",
       last("key").over(
         Window.partitionBy($"value").orderBy($"key").rowsBetween(0, 
Long.MaxValue)),
@@ -372,7 +372,7 @@ class DataFrameWindowSuite extends QueryTest with 
SharedSQLContext {
 
   test("aggregation and range between with unbounded + predicate pushdown") {
     val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, 
"2")).toDF("key", "value")
-    df.registerTempTable("window_table")
+    df.createOrReplaceTempView("window_table")
     val selectList = Seq($"key", $"value",
       last("value").over(
         Window.partitionBy($"value").orderBy($"key").rangeBetween(-2, 
-1)).equalTo("2")

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
index da567db..a6b83b3 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
@@ -344,8 +344,8 @@ class JoinSuite extends QueryTest with SharedSQLContext {
   }
 
   test("full outer join") {
-    upperCaseData.where('N <= 4).registerTempTable("`left`")
-    upperCaseData.where('N >= 3).registerTempTable("`right`")
+    upperCaseData.where('N <= 4).createOrReplaceTempView("`left`")
+    upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
 
     val left = UnresolvedRelation(TableIdentifier("left"), None)
     val right = UnresolvedRelation(TableIdentifier("right"), None)

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
index 1c6e6cc..65fe271 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
@@ -29,7 +29,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfter 
with SharedSQLContex
   private lazy val df = (1 to 10).map(i => (i, s"str$i")).toDF("key", "value")
 
   before {
-    df.registerTempTable("listtablessuitetable")
+    df.createOrReplaceTempView("listtablessuitetable")
   }
 
   after {
@@ -74,7 +74,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfter 
with SharedSQLContex
       case tableDF =>
         assert(expectedSchema === tableDF.schema)
 
-        tableDF.registerTempTable("tables")
+        tableDF.createOrReplaceTempView("tables")
         checkAnswer(
           sql(
             "SELECT isTemporary, tableName from tables WHERE tableName = 
'listtablessuitetable'"),

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
index 1d5fc57..38d7b6e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
@@ -60,7 +60,7 @@ class SQLContextSuite extends SparkFunSuite with 
SharedSparkContext {
 
     // temporary table should not be shared
     val df = session1.range(10)
-    df.registerTempTable("test1")
+    df.createOrReplaceTempView("test1")
     assert(session1.tableNames().contains("test1"))
     assert(!session2.tableNames().contains("test1"))
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index b67e2bd..010dea5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -39,7 +39,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
   setupTestData()
 
   test("having clause") {
-    Seq(("one", 1), ("two", 2), ("three", 3), ("one", 5)).toDF("k", 
"v").registerTempTable("hav")
+    Seq(("one", 1), ("two", 2), ("three", 3), ("one", 5)).toDF("k", "v")
+      .createOrReplaceTempView("hav")
     checkAnswer(
       sql("SELECT k, sum(v) FROM hav GROUP BY k HAVING sum(v) > 2"),
       Row("one", 6) :: Row("three", 3) :: Nil)
@@ -47,7 +48,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
 
   test("SPARK-8010: promote numeric to string") {
     val df = Seq((1, 1)).toDF("key", "value")
-    df.registerTempTable("src")
+    df.createOrReplaceTempView("src")
     val queryCaseWhen = sql("select case when true then 1.0 else '1' end from 
src ")
     val queryCoalesce = sql("select coalesce(null, 1, '1') from src ")
 
@@ -100,7 +101,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
       (83, 0, 38),
       (26, 0, 79),
       (43, 81, 24)
-    ).toDF("a", "b", "c").registerTempTable("cachedData")
+    ).toDF("a", "b", "c").createOrReplaceTempView("cachedData")
 
     spark.catalog.cacheTable("cachedData")
     checkAnswer(
@@ -109,7 +110,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
   }
 
   test("self join with aliases") {
-    Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", 
"str").registerTempTable("df")
+    Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", 
"str").createOrReplaceTempView("df")
 
     checkAnswer(
       sql(
@@ -137,7 +138,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
         .toDF("int", "str")
         .groupBy("str")
         .agg($"str", count("str").as("strCount"))
-        .registerTempTable("df")
+        .createOrReplaceTempView("df")
 
     checkAnswer(
       sql(
@@ -195,7 +196,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
   test("grouping on nested fields") {
     spark.read.json(sparkContext.parallelize(
       """{"nested": {"attribute": 1}, "value": 2}""" :: Nil))
-     .registerTempTable("rows")
+     .createOrReplaceTempView("rows")
 
     checkAnswer(
       sql(
@@ -214,7 +215,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
     spark.read.json(
       sparkContext.parallelize(
         Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}")))
-      .registerTempTable("d")
+      .createOrReplaceTempView("d")
 
     checkAnswer(
       sql("select * from d where d.a in (1,2)"),
@@ -225,7 +226,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
     spark.read.json(
       sparkContext.parallelize(
         Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}", "")))
-      .registerTempTable("d")
+      .createOrReplaceTempView("d")
 
     checkAnswer(
       sql("select count(1) from d"),
@@ -261,7 +262,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
     spark.table("testData")
       .union(spark.table("testData"))
       .union(spark.table("testData"))
-      .registerTempTable("testData3x")
+      .createOrReplaceTempView("testData3x")
 
     try {
       // Just to group rows.
@@ -391,7 +392,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
   }
 
   test("SPARK-3173 Timestamp support in the parser") {
-    (0 to 3).map(i => Tuple1(new 
Timestamp(i))).toDF("time").registerTempTable("timestamps")
+    (0 to 3).map(i => Tuple1(new 
Timestamp(i))).toDF("time").createOrReplaceTempView("timestamps")
 
     checkAnswer(sql(
       "SELECT time FROM timestamps WHERE time='1969-12-31 16:00:00.0'"),
@@ -746,7 +747,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext 
{
 
   test("count of empty table") {
     withTempTable("t") {
-      Seq.empty[(Int, Int)].toDF("a", "b").registerTempTable("t")
+      Seq.empty[(Int, Int)].toDF("a", "b").createOrReplaceTempView("t")
       checkAnswer(
         sql("select count(a) from t"),
         Row(0))
@@ -891,10 +892,10 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-3349 partitioning after limit") {
     sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n DESC")
       .limit(2)
-      .registerTempTable("subset1")
+      .createOrReplaceTempView("subset1")
     sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n ASC")
       .limit(2)
-      .registerTempTable("subset2")
+      .createOrReplaceTempView("subset2")
     checkAnswer(
       sql("SELECT * FROM lowerCaseData INNER JOIN subset1 ON subset1.n = 
lowerCaseData.n"),
       Row(3, "c", 3) ::
@@ -1111,7 +1112,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     }
 
     val df1 = spark.createDataFrame(rowRDD1, schema1)
-    df1.registerTempTable("applySchema1")
+    df1.createOrReplaceTempView("applySchema1")
     checkAnswer(
       sql("SELECT * FROM applySchema1"),
       Row(1, "A1", true, null) ::
@@ -1141,7 +1142,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     }
 
     val df2 = spark.createDataFrame(rowRDD2, schema2)
-    df2.registerTempTable("applySchema2")
+    df2.createOrReplaceTempView("applySchema2")
     checkAnswer(
       sql("SELECT * FROM applySchema2"),
       Row(Row(1, true), Map("A1" -> null)) ::
@@ -1166,7 +1167,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     }
 
     val df3 = spark.createDataFrame(rowRDD3, schema2)
-    df3.registerTempTable("applySchema3")
+    df3.createOrReplaceTempView("applySchema3")
 
     checkAnswer(
       sql("SELECT f1.f11, f2['D4'] FROM applySchema3"),
@@ -1214,7 +1215,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     def validateMetadata(rdd: DataFrame): Unit = {
       assert(rdd.schema("name").metadata.getString(docKey) == docValue)
     }
-    personWithMeta.registerTempTable("personWithMeta")
+    personWithMeta.createOrReplaceTempView("personWithMeta")
     validateMetadata(personWithMeta.select($"name"))
     validateMetadata(personWithMeta.select($"name"))
     validateMetadata(personWithMeta.select($"id", $"name"))
@@ -1409,7 +1410,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-3483 Special chars in column names") {
     val data = sparkContext.parallelize(
       Seq("""{"key?number1": "value1", "key.number2": "value2"}"""))
-    spark.read.json(data).registerTempTable("records")
+    spark.read.json(data).createOrReplaceTempView("records")
     sql("SELECT `key?number1`, `key.number2` FROM records")
   }
 
@@ -1451,12 +1452,12 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
 
   test("SPARK-4322 Grouping field with struct field as sub expression") {
     spark.read.json(sparkContext.makeRDD("""{"a": {"b": [{"c": 1}]}}""" :: 
Nil))
-      .registerTempTable("data")
+      .createOrReplaceTempView("data")
     checkAnswer(sql("SELECT a.b[0].c FROM data GROUP BY a.b[0].c"), Row(1))
     spark.catalog.dropTempView("data")
 
     spark.read.json(
-      sparkContext.makeRDD("""{"a": {"b": 1}}""" :: 
Nil)).registerTempTable("data")
+      sparkContext.makeRDD("""{"a": {"b": 1}}""" :: 
Nil)).createOrReplaceTempView("data")
     checkAnswer(sql("SELECT a.b + 1 FROM data GROUP BY a.b + 1"), Row(2))
     spark.catalog.dropTempView("data")
   }
@@ -1478,10 +1479,10 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("Supporting relational operator '<=>' in Spark SQL") {
     val nullCheckData1 = TestData(1, "1") :: TestData(2, null) :: Nil
     val rdd1 = sparkContext.parallelize((0 to 1).map(i => nullCheckData1(i)))
-    rdd1.toDF().registerTempTable("nulldata1")
+    rdd1.toDF().createOrReplaceTempView("nulldata1")
     val nullCheckData2 = TestData(1, "1") :: TestData(2, null) :: Nil
     val rdd2 = sparkContext.parallelize((0 to 1).map(i => nullCheckData2(i)))
-    rdd2.toDF().registerTempTable("nulldata2")
+    rdd2.toDF().createOrReplaceTempView("nulldata2")
     checkAnswer(sql("SELECT nulldata1.key FROM nulldata1 join " +
       "nulldata2 on nulldata1.value <=> nulldata2.value"),
         (1 to 2).map(i => Row(i)))
@@ -1490,7 +1491,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("Multi-column COUNT(DISTINCT ...)") {
     val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
     val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
-    rdd.toDF().registerTempTable("distinctData")
+    rdd.toDF().createOrReplaceTempView("distinctData")
     checkAnswer(sql("SELECT COUNT(DISTINCT key,value) FROM distinctData"), 
Row(2))
   }
 
@@ -1498,7 +1499,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
       val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
       val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
-      rdd.toDF().registerTempTable("testTable1")
+      rdd.toDF().createOrReplaceTempView("testTable1")
       checkAnswer(sql("SELECT VALUE FROM TESTTABLE1 where KEY = 1"), 
Row("val_1"))
     }
   }
@@ -1506,7 +1507,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-6145: ORDER BY test for nested fields") {
     spark.read.json(sparkContext.makeRDD(
         """{"a": {"b": 1, "a": {"a": 1}}, "c": [{"d": 1}]}""" :: Nil))
-      .registerTempTable("nestedOrder")
+      .createOrReplaceTempView("nestedOrder")
 
     checkAnswer(sql("SELECT 1 FROM nestedOrder ORDER BY a.b"), Row(1))
     checkAnswer(sql("SELECT a.b FROM nestedOrder ORDER BY a.b"), Row(1))
@@ -1517,8 +1518,10 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   }
 
   test("SPARK-6145: special cases") {
-    spark.read.json(sparkContext.makeRDD(
-      """{"a": {"b": [1]}, "b": [{"a": 1}], "_c0": {"a": 1}}""" :: 
Nil)).registerTempTable("t")
+    spark.read
+      .json(sparkContext.makeRDD("""{"a": {"b": [1]}, "b": [{"a": 1}], "_c0": 
{"a": 1}}""" :: Nil))
+      .createOrReplaceTempView("t")
+
     checkAnswer(sql("SELECT a.b[0] FROM t ORDER BY _c0.a"), Row(1))
     checkAnswer(sql("SELECT b[0].a FROM t ORDER BY _c0.a"), Row(1))
   }
@@ -1526,14 +1529,14 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-6898: complete support for special chars in column names") {
     spark.read.json(sparkContext.makeRDD(
       """{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""" :: 
Nil))
-      .registerTempTable("t")
+      .createOrReplaceTempView("t")
 
     checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM 
t"), Row(1, 1, 1))
   }
 
   test("SPARK-6583 order by aggregated function") {
     Seq("1" -> 3, "1" -> 4, "2" -> 7, "2" -> 8, "3" -> 5, "3" -> 6, "4" -> 1, 
"4" -> 2)
-      .toDF("a", "b").registerTempTable("orderByData")
+      .toDF("a", "b").createOrReplaceTempView("orderByData")
 
     checkAnswer(
       sql(
@@ -1619,7 +1622,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
         (0, null, null, false),
         (1, null, null, false),
         (null, null, null, true)
-      ).toDF("i", "b", "r1", "r2").registerTempTable("t")
+      ).toDF("i", "b", "r1", "r2").createOrReplaceTempView("t")
 
       checkAnswer(sql("select i = b from t"), sql("select r1 from t"))
       checkAnswer(sql("select i <=> b from t"), sql("select r2 from t"))
@@ -1629,14 +1632,14 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-7067: order by queries for complex ExtractValue chain") {
     withTempTable("t") {
       spark.read.json(sparkContext.makeRDD(
-        """{"a": {"b": [{"c": 1}]}, "b": [{"d": 1}]}""" :: 
Nil)).registerTempTable("t")
+        """{"a": {"b": [{"c": 1}]}, "b": [{"d": 1}]}""" :: 
Nil)).createOrReplaceTempView("t")
       checkAnswer(sql("SELECT a.b FROM t ORDER BY b[0].d"), Row(Seq(Row(1))))
     }
   }
 
   test("SPARK-8782: ORDER BY NULL") {
     withTempTable("t") {
-      Seq((1, 2), (1, 2)).toDF("a", "b").registerTempTable("t")
+      Seq((1, 2), (1, 2)).toDF("a", "b").createOrReplaceTempView("t")
       checkAnswer(sql("SELECT * FROM t ORDER BY NULL"), Seq(Row(1, 2), Row(1, 
2)))
     }
   }
@@ -1645,7 +1648,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     withTempTable("t") {
       val df = Seq(1 -> "a").toDF("count", "sort")
       checkAnswer(df.filter("count > 0"), Row(1, "a"))
-      df.registerTempTable("t")
+      df.createOrReplaceTempView("t")
       checkAnswer(sql("select count, sort from t"), Row(1, "a"))
     }
   }
@@ -1759,7 +1762,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     withTempTable("1one") {
       sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
         .toDF("num", "str")
-        .registerTempTable("1one")
+        .createOrReplaceTempView("1one")
       checkAnswer(sql("select count(num) from 1one"), Row(10))
     }
   }
@@ -1801,7 +1804,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
 
   test("SPARK-10130 type coercion for IF should have children resolved first") 
{
     withTempTable("src") {
-      Seq((1, 1), (-1, 1)).toDF("key", "value").registerTempTable("src")
+      Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
       checkAnswer(
         sql("SELECT IF(a > 0, a, 0) FROM (SELECT key a FROM src) temp"), 
Seq(Row(1), Row(0)))
     }
@@ -1809,7 +1812,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
 
   test("SPARK-10389: order by non-attribute grouping expression on Aggregate") 
{
     withTempTable("src") {
-      Seq((1, 1), (-1, 1)).toDF("key", "value").registerTempTable("src")
+      Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
       checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY 
key + 1"),
         Seq(Row(1), Row(1)))
       checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY 
(key + 1) * 2"),
@@ -1872,7 +1875,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
 
   test("SPARK-11032: resolve having correctly") {
     withTempTable("src") {
-      Seq(1 -> "a").toDF("i", "j").registerTempTable("src")
+      Seq(1 -> "a").toDF("i", "j").createOrReplaceTempView("src")
       checkAnswer(
         sql("SELECT MIN(t.i) FROM (SELECT * FROM src WHERE i > 0) t 
HAVING(COUNT(1) > 0)"),
         Row(1))
@@ -1910,8 +1913,8 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
       Row(1, 1, 1, 1) :: Row(1, 2, 2, 1) :: Row(2, 1, 1, 2) :: Row(2, 2, 2, 2) 
::
         Row(3, 1, 1, 3) :: Row(3, 2, 2, 3) :: Nil)
 
-    // Try with a registered table.
-    sql("select struct(a, b) as record from 
testData2").registerTempTable("structTable")
+    // Try with a temporary view
+    sql("select struct(a, b) as record from 
testData2").createOrReplaceTempView("structTable")
     checkAnswer(
       sql("SELECT record.* FROM structTable"),
       Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 
2) :: Nil)
@@ -1975,9 +1978,9 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
       nestedStructData.select($"record.r1.*"),
       Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 
2) :: Nil)
 
-    // Try with a registered table
+    // Try with a temporary view
     withTempTable("nestedStructTable") {
-      nestedStructData.registerTempTable("nestedStructTable")
+      nestedStructData.createOrReplaceTempView("nestedStructTable")
       checkAnswer(
         sql("SELECT record.* FROM nestedStructTable"),
         nestedStructData.select($"record.*"))
@@ -2000,7 +2003,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
         |   (SELECT struct(a, b) as `col$.a_`, struct(b, a) as `a.b.c.` FROM 
testData2) tmp
       """.stripMargin)
     withTempTable("specialCharacterTable") {
-      specialCharacterPath.registerTempTable("specialCharacterTable")
+      specialCharacterPath.createOrReplaceTempView("specialCharacterTable")
       checkAnswer(
         specialCharacterPath.select($"`r&&b.c`.*"),
         nestedStructData.select($"record.*"))
@@ -2024,7 +2027,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     // Create a data set that contains a naming conflict
     val nameConflict = sql("SELECT struct(a, b) as nameConflict, a as a FROM 
testData2")
     withTempTable("nameConflict") {
-      nameConflict.registerTempTable("nameConflict")
+      nameConflict.createOrReplaceTempView("nameConflict")
       // Unqualified should resolve to table.
       checkAnswer(sql("SELECT nameConflict.* FROM nameConflict"),
         Row(Row(1, 1), 1) :: Row(Row(1, 2), 1) :: Row(Row(2, 1), 2) :: 
Row(Row(2, 2), 2) ::
@@ -2328,7 +2331,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("SPARK-13056: Null in map value causes NPE") {
     val df = Seq(1 -> Map("abc" -> "somestring", "cba" -> null)).toDF("key", 
"value")
     withTempTable("maptest") {
-      df.registerTempTable("maptest")
+      df.createOrReplaceTempView("maptest")
       // local optimization will by pass codegen code, so we should keep the 
filter `key=1`
       checkAnswer(sql("SELECT value['abc'] FROM maptest where key = 1"), 
Row("somestring"))
       checkAnswer(sql("SELECT value['cba'] FROM maptest where key = 1"), 
Row(null))
@@ -2338,7 +2341,7 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
   test("hash function") {
     val df = Seq(1 -> "a", 2 -> "b").toDF("i", "j")
     withTempTable("tbl") {
-      df.registerTempTable("tbl")
+      df.createOrReplaceTempView("tbl")
       checkAnswer(
         df.select(hash($"i", $"j")),
         sql("SELECT hash(i, j) from tbl")
@@ -2390,8 +2393,8 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     val df1 = Seq(("one", 1), ("two", 2), ("three", 3)).toDF("k", "v1")
     val df2 = Seq(("one", 1), ("two", 22), ("one", 5)).toDF("k", "v2")
     withTempTable("nt1", "nt2") {
-      df1.registerTempTable("nt1")
-      df2.registerTempTable("nt2")
+      df1.createOrReplaceTempView("nt1")
+      df2.createOrReplaceTempView("nt2")
       checkAnswer(
         sql("SELECT * FROM nt1 natural join nt2 where k = \"one\""),
         Row("one", 1, 1) :: Row("one", 1, 5) :: Nil)
@@ -2418,9 +2421,9 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     val df3 = Seq((null, "r1c2", "t3r1c3"),
       ("r2c1", "r2c2", "t3r2c3"), ("r3c1y", "r3c2", "t3r3c3")).toDF("c1", 
"c2", "c3")
     withTempTable("t1", "t2", "t3") {
-      df1.registerTempTable("t1")
-      df2.registerTempTable("t2")
-      df3.registerTempTable("t3")
+      df1.createOrReplaceTempView("t1")
+      df2.createOrReplaceTempView("t2")
+      df3.createOrReplaceTempView("t3")
       // inner join with one using column
       checkAnswer(
         sql("SELECT * FROM t1 join t2 using (c1)"),

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
index 295f02f..491bdb3 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
@@ -78,7 +78,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with 
SharedSQLContext {
   test("query case class RDD") {
     val data = ReflectData("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 
1.toByte, true,
       new java.math.BigDecimal(1), Date.valueOf("1970-01-01"), new 
Timestamp(12345), Seq(1, 2, 3))
-    Seq(data).toDF().registerTempTable("reflectData")
+    Seq(data).toDF().createOrReplaceTempView("reflectData")
 
     assert(sql("SELECT * FROM reflectData").collect().head ===
       Row("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 1.toByte, true,
@@ -88,7 +88,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with 
SharedSQLContext {
 
   test("query case class RDD with nulls") {
     val data = NullReflectData(null, null, null, null, null, null, null)
-    Seq(data).toDF().registerTempTable("reflectNullData")
+    Seq(data).toDF().createOrReplaceTempView("reflectNullData")
 
     assert(sql("SELECT * FROM reflectNullData").collect().head ===
       Row.fromSeq(Seq.fill(7)(null)))
@@ -96,7 +96,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with 
SharedSQLContext {
 
   test("query case class RDD with Nones") {
     val data = OptionalReflectData(None, None, None, None, None, None, None)
-    Seq(data).toDF().registerTempTable("reflectOptionalData")
+    Seq(data).toDF().createOrReplaceTempView("reflectOptionalData")
 
     assert(sql("SELECT * FROM reflectOptionalData").collect().head ===
       Row.fromSeq(Seq.fill(7)(null)))
@@ -104,7 +104,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite 
with SharedSQLContext {
 
   // Equality is broken for Arrays, so we test that separately.
   test("query binary data") {
-    
Seq(ReflectBinary(Array[Byte](1))).toDF().registerTempTable("reflectBinary")
+    
Seq(ReflectBinary(Array[Byte](1))).toDF().createOrReplaceTempView("reflectBinary")
 
     val result = sql("SELECT data FROM reflectBinary")
       .collect().head(0).asInstanceOf[Array[Byte]]
@@ -124,7 +124,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite 
with SharedSQLContext {
         Map(10 -> Some(100L), 20 -> Some(200L), 30 -> None),
         Nested(None, "abc")))
 
-    Seq(data).toDF().registerTempTable("reflectComplexData")
+    Seq(data).toDF().createOrReplaceTempView("reflectComplexData")
     assert(sql("SELECT * FROM reflectComplexData").collect().head ===
       Row(
         Seq(1, 2, 3),

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
index 17ac0c8..4819692 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
@@ -49,9 +49,9 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
 
   protected override def beforeAll(): Unit = {
     super.beforeAll()
-    l.registerTempTable("l")
-    r.registerTempTable("r")
-    t.registerTempTable("t")
+    l.createOrReplaceTempView("l")
+    r.createOrReplaceTempView("r")
+    t.createOrReplaceTempView("t")
   }
 
   test("simple uncorrelated scalar subquery") {
@@ -99,7 +99,7 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
 
   test("uncorrelated scalar subquery on a DataFrame generated query") {
     val df = Seq((1, "one"), (2, "two"), (3, "three")).toDF("key", "value")
-    df.registerTempTable("subqueryData")
+    df.createOrReplaceTempView("subqueryData")
 
     checkAnswer(
       sql("select (select key from subqueryData where key > 2 order by key 
limit 1) + 1"),

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
index 9221543..547d3c1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
@@ -53,7 +53,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
 
   test("SPARK-8003 spark_partition_id") {
     val df = Seq((1, "Tearing down the walls that divide us")).toDF("id", 
"saying")
-    df.registerTempTable("tmp_table")
+    df.createOrReplaceTempView("tmp_table")
     checkAnswer(sql("select spark_partition_id() from tmp_table").toDF(), 
Row(0))
     spark.catalog.dropTempView("tmp_table")
   }
@@ -62,7 +62,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
     withTempPath { dir =>
       val data = sparkContext.parallelize(0 to 10, 2).toDF("id")
       data.write.parquet(dir.getCanonicalPath)
-      spark.read.parquet(dir.getCanonicalPath).registerTempTable("test_table")
+      
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("test_table")
       val answer = sql("select input_file_name() from 
test_table").head().getString(0)
       assert(answer.contains(dir.getCanonicalPath))
       assert(sql("select input_file_name() from 
test_table").distinct().collect().length >= 2)
@@ -107,7 +107,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
 
     val df = sparkContext.parallelize(
       (1 to 100).map(i => TestData(i, i.toString))).toDF()
-    df.registerTempTable("integerData")
+    df.createOrReplaceTempView("integerData")
 
     val result =
       sql("SELECT * FROM integerData WHERE oneArgFilter(key)")
@@ -119,7 +119,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
 
     val df = Seq(("red", 1), ("red", 2), ("blue", 10),
       ("green", 100), ("green", 200)).toDF("g", "v")
-    df.registerTempTable("groupData")
+    df.createOrReplaceTempView("groupData")
 
     val result =
       sql(
@@ -138,7 +138,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
 
     val df = Seq(("red", 1), ("red", 2), ("blue", 10),
       ("green", 100), ("green", 200)).toDF("g", "v")
-    df.registerTempTable("groupData")
+    df.createOrReplaceTempView("groupData")
 
     val result =
       sql(
@@ -158,7 +158,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
 
     val df = Seq(("red", 1), ("red", 2), ("blue", 10),
       ("green", 100), ("green", 200)).toDF("g", "v")
-    df.registerTempTable("groupData")
+    df.createOrReplaceTempView("groupData")
 
     val result =
       sql(

http://git-wip-us.apache.org/repos/asf/spark/blob/5f5270ea/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
index 3057e01..7d7b486 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
@@ -95,7 +95,7 @@ class UserDefinedTypeSuite extends QueryTest with 
SharedSQLContext with ParquetT
 
   test("UDTs and UDFs") {
     spark.udf.register("testType", (d: UDT.MyDenseVector) => 
d.isInstanceOf[UDT.MyDenseVector])
-    pointsRDD.registerTempTable("points")
+    pointsRDD.createOrReplaceTempView("points")
     checkAnswer(
       sql("SELECT testType(features) from points"),
       Seq(Row(true), Row(true)))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to