ajantha-bhat commented on a change in pull request #4020:
URL: https://github.com/apache/carbondata/pull/4020#discussion_r532364698



##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
##########
@@ -186,6 +187,206 @@ class MajorCompactionIgnoreInMinorTest extends QueryTest 
with BeforeAndAfterAll
 
   }
 
+  def generateData(numOrders: Int = 100000): DataFrame = {
+    import sqlContext.implicits._
+    sqlContext.sparkContext.parallelize(1 to numOrders, 4)
+      .map { x => ("country" + x, x, "07/23/2015", "name" + x, "phonetype" + x 
% 10,
+        "serialname" + x, x + 10000)
+      }.toDF("country", "ID", "date", "name", "phonetype", "serialname", 
"salary")
+  }
+
+  test("test skip segment whose data size exceed threshold in minor compaction 
" +
+    "in system level control") {
+    
CarbonProperties.getInstance().addProperty("carbon.compaction.level.threshold", 
"2,2")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
+    // set threshold to 1MB in this test case
+    CarbonProperties.getInstance().addProperty("carbon.minor.compaction.size", 
"1")
+
+    sql("drop table if exists  minor_threshold")
+    sql("drop table if exists  tmp")
+
+    sql(
+      "CREATE TABLE IF NOT EXISTS minor_threshold (country String, ID Int, 
date Timestamp," +
+        " name String, phonetype String, serialname String, salary Int) STORED 
AS carbondata"
+    )
+    sql(
+      "CREATE TABLE IF NOT EXISTS tmp (country String, ID Int, date 
Timestamp," +
+        " name String, phonetype String, serialname String, salary Int) STORED 
AS carbondata"
+    )
+
+    val initframe = generateData(100000)
+    initframe.write
+      .format("carbondata")
+      .option("tablename", "tmp")
+      .mode(SaveMode.Overwrite)
+      .save()
+    // load 3 segments
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE 
minor_threshold  OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+
+    // insert a new segment(id is 3) data size exceed 1 MB
+    sql("insert into minor_threshold select * from tmp")
+
+    // load another 3 segments
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE 
minor_threshold  OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+
+    sql("show segments for table minor_threshold").show(100, false)
+    // do minor compaction
+    sql("alter table minor_threshold compact 'minor'"
+    )
+    // check segment 3 whose size exceed the limit should not be compacted
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(
+      CarbonCommonConstants.DATABASE_DEFAULT_NAME, "minor_threshold")
+    val carbonTablePath = carbonTable.getMetadataPath
+    val segments = SegmentStatusManager.readLoadMetadata(carbonTablePath);
+    assertResult(SegmentStatus.SUCCESS)(segments(3).getSegmentStatus)
+    assertResult(100030)(sql("select count(*) from 
minor_threshold").collect().head.get(0))
+    // reset to 0
+    CarbonProperties.getInstance().addProperty("carbon.minor.compaction.size", 
"0")
+  }

Review comment:
       support dynamically changing the table property also, by alter table 
set/unset tblpeoperties command. With that in the same testcase you can test 
table level by loading some more data. No need to create new tables again to 
test it 

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
##########
@@ -186,6 +187,206 @@ class MajorCompactionIgnoreInMinorTest extends QueryTest 
with BeforeAndAfterAll
 
   }
 
+  def generateData(numOrders: Int = 100000): DataFrame = {
+    import sqlContext.implicits._
+    sqlContext.sparkContext.parallelize(1 to numOrders, 4)
+      .map { x => ("country" + x, x, "07/23/2015", "name" + x, "phonetype" + x 
% 10,
+        "serialname" + x, x + 10000)
+      }.toDF("country", "ID", "date", "name", "phonetype", "serialname", 
"salary")
+  }
+
+  test("test skip segment whose data size exceed threshold in minor compaction 
" +
+    "in system level control") {
+    
CarbonProperties.getInstance().addProperty("carbon.compaction.level.threshold", 
"2,2")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
+    // set threshold to 1MB in this test case
+    CarbonProperties.getInstance().addProperty("carbon.minor.compaction.size", 
"1")
+
+    sql("drop table if exists  minor_threshold")
+    sql("drop table if exists  tmp")
+
+    sql(
+      "CREATE TABLE IF NOT EXISTS minor_threshold (country String, ID Int, 
date Timestamp," +
+        " name String, phonetype String, serialname String, salary Int) STORED 
AS carbondata"
+    )
+    sql(
+      "CREATE TABLE IF NOT EXISTS tmp (country String, ID Int, date 
Timestamp," +
+        " name String, phonetype String, serialname String, salary Int) STORED 
AS carbondata"
+    )
+
+    val initframe = generateData(100000)
+    initframe.write
+      .format("carbondata")
+      .option("tablename", "tmp")
+      .mode(SaveMode.Overwrite)
+      .save()
+    // load 3 segments
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE 
minor_threshold  OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+
+    // insert a new segment(id is 3) data size exceed 1 MB
+    sql("insert into minor_threshold select * from tmp")
+
+    // load another 3 segments
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE 
minor_threshold  OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE 
minor_threshold OPTIONS" +
+      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
+    )
+
+    sql("show segments for table minor_threshold").show(100, false)
+    // do minor compaction
+    sql("alter table minor_threshold compact 'minor'"
+    )
+    // check segment 3 whose size exceed the limit should not be compacted
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(
+      CarbonCommonConstants.DATABASE_DEFAULT_NAME, "minor_threshold")
+    val carbonTablePath = carbonTable.getMetadataPath
+    val segments = SegmentStatusManager.readLoadMetadata(carbonTablePath);
+    assertResult(SegmentStatus.SUCCESS)(segments(3).getSegmentStatus)
+    assertResult(100030)(sql("select count(*) from 
minor_threshold").collect().head.get(0))
+    // reset to 0
+    CarbonProperties.getInstance().addProperty("carbon.minor.compaction.size", 
"0")
+  }

Review comment:
       support dynamically changing the table property also, by alter table 
set/unset tblproperties command. With that in the same testcase you can test 
table level by loading some more data. No need to create new tables again to 
test it 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to