marchpure commented on a change in pull request #3981:
URL: https://github.com/apache/carbondata/pull/3981#discussion_r510863739



##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/merge/MergeTestCase.scala
##########
@@ -723,6 +723,15 @@ class MergeTestCase extends QueryTest with 
BeforeAndAfterAll {
     assert(getDeleteDeltaFileCount("target", "0") == 0)
     checkAnswer(sql("select count(*) from target"), Seq(Row(3)))
     checkAnswer(sql("select * from target order by key"), Seq(Row("c", "200"), 
Row("d", "3"), Row("e", "100")))
+
+    // insert overwrite a partition. make sure the merge executed before still 
works.
+    sql(
+      """insert overwrite table target
+        | partition (value=3)
+        | select * from target where value = 100""".stripMargin)
+    checkAnswer(sql("select * from target"), Seq(Row("c", "200"), Row("e", 
"3"), Row("e", "100")))

Review comment:
       I have modifed code according to your suggestion

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
##########
@@ -69,6 +69,60 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql("""drop table iud.zerorows""")
   }
 
+  test("update and insert overwrite partition") {
+    sql("""drop table if exists iud.updateinpartition""")
+    sql(
+      """CREATE TABLE iud.updateinpartition (id STRING, sales INT)
+        | PARTITIONED BY (dtm STRING)
+        | STORED AS carbondata""".stripMargin)
+    sql(
+      s"""load data local
+         | inpath '$resourcesPath/IUD/updateinpartition.csv' into table 
updateinpartition""".stripMargin)
+    sql(
+      """update iud.updateinpartition u set (u.sales) = (u.sales + 1) where 
id='001'""".stripMargin)
+    sql(
+      """update iud.updateinpartition u set (u.sales) = (u.sales + 2) where 
id='011'""".stripMargin)
+
+    // delete data from a partition, make sure the update executed before 
still works.
+    sql("""delete from updateinpartition where dtm=20200908 and 
id='012'""".stripMargin)
+    checkAnswer(
+      sql("""select sales from iud.updateinpartition where 
id='001'""".stripMargin), Seq(Row(1))
+    )
+    checkAnswer(
+      sql("""select sales from iud.updateinpartition where 
id='011'""".stripMargin), Seq(Row(2))
+    )
+    checkAnswer(
+      sql("""select sales from iud.updateinpartition where 
id='012'""".stripMargin), Seq()
+    )
+
+    // insert overwrite a partition. make sure the update executed before 
still works.
+    sql(
+      """insert overwrite table iud.updateinpartition
+        | partition (dtm=20200908)
+        | select * from iud.updateinpartition where dtm = 
20200907""".stripMargin)
+    checkAnswer(
+      sql(
+        """select sales from iud.updateinpartition
+          | where dtm=20200908 and id='001'""".stripMargin), Seq(Row(1))
+    )
+    checkAnswer(
+      sql(
+        """select sales from iud.updateinpartition
+          | where dtm=20200908 and id='001'""".stripMargin), Seq(Row(1))
+    )

Review comment:
       I have modifed code according to your suggestion

##########
File path: integration/spark/src/test/resources/IUD/updateinpartition.csv
##########
@@ -0,0 +1,21 @@
+id,sales,dtm
+001,0,20200907
+002,0,20200907
+003,0,20200907
+004,0,20200907
+005,0,20200907
+006,0,20200907
+007,0,20200907
+008,0,20200907
+009,0,20200907
+010,0,20200907
+011,0,20200908
+012,0,20200908
+013,0,20200908
+014,0,20200908
+015,0,20200908
+016,0,20200908
+017,0,20200908
+018,0,20200908
+019,0,20200908
+020,0,20200908

Review comment:
       I have modifed code according to your suggestion




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to