Repository: spark
Updated Branches:
  refs/heads/branch-2.0 f805b989b -> 0d7e1d11d


[SPARK-16037][SQL] Follow-up: add DataFrameWriter.insertInto() test cases for 
by position resolution

## What changes were proposed in this pull request?

This PR migrates some test cases introduced in #12313 as a follow-up of #13754 
and #13766. These test cases cover `DataFrameWriter.insertInto()`, while the 
former two only cover SQL `INSERT` statements.

Note that the `testPartitionedTable` utility method tests both Hive SerDe 
tables and data source tables.

## How was this patch tested?

N/A

Author: Cheng Lian <l...@databricks.com>

Closes #13810 from liancheng/spark-16037-follow-up-tests.

(cherry picked from commit f4a3d45e38f18278bbdb7cc32486ded50f76d54b)
Signed-off-by: Yin Huai <yh...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/0d7e1d11
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/0d7e1d11
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/0d7e1d11

Branch: refs/heads/branch-2.0
Commit: 0d7e1d11d2ea2b7005208951518fdf882fc36ec2
Parents: f805b98
Author: Cheng Lian <l...@databricks.com>
Authored: Tue Jun 21 11:58:33 2016 -0700
Committer: Yin Huai <yh...@databricks.com>
Committed: Tue Jun 21 11:58:54 2016 -0700

----------------------------------------------------------------------
 .../sql/hive/InsertIntoHiveTableSuite.scala     | 48 ++++++++++++++++++++
 1 file changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/0d7e1d11/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 4643251..d9ce1c3 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -469,4 +469,52 @@ class InsertIntoHiveTableSuite extends QueryTest with 
TestHiveSingleton with Bef
         )
       }
   }
+
+  testPartitionedTable("insertInto() should match columns by position and 
ignore column names") {
+    tableName =>
+      withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
+        // Columns `df.c` and `df.d` are resolved by position, and thus mapped 
to partition columns
+        // `b` and `c` of the target table.
+        val df = Seq((1, 2, 3, 4)).toDF("a", "b", "c", "d")
+        df.write.insertInto(tableName)
+
+        checkAnswer(
+          sql(s"SELECT a, b, c, d FROM $tableName"),
+          Row(1, 3, 4, 2)
+        )
+      }
+  }
+
+  testPartitionedTable("insertInto() should match unnamed columns by 
position") {
+    tableName =>
+      withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
+        // Columns `c + 1` and `d + 1` are resolved by position, and thus 
mapped to partition
+        // columns `b` and `c` of the target table.
+        val df = Seq((1, 2, 3, 4)).toDF("a", "b", "c", "d")
+        df.select('a + 1, 'b + 1, 'c + 1, 'd + 1).write.insertInto(tableName)
+
+        checkAnswer(
+          sql(s"SELECT a, b, c, d FROM $tableName"),
+          Row(2, 4, 5, 3)
+        )
+      }
+  }
+
+  testPartitionedTable("insertInto() should reject missing columns") {
+    tableName =>
+      sql("CREATE TABLE t (a INT, b INT)")
+
+      intercept[AnalysisException] {
+        spark.table("t").write.insertInto(tableName)
+      }
+  }
+
+  testPartitionedTable("insertInto() should reject extra columns") {
+    tableName =>
+      sql("CREATE TABLE t (a INT, b INT, c INT, d INT, e INT)")
+
+      intercept[AnalysisException] {
+        spark.table("t").write.insertInto(tableName)
+      }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to