[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504467243



##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestSkipEmptyLines.scala
##
@@ -33,50 +33,65 @@ class TestSkipEmptyLines extends QueryTest with 
BeforeAndAfterAll {
   test("test load options with true") {
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-s"OPTIONS('skip_empty_line'='true')")
-checkAnswer(sql("select * from skipEmptyRowCarbonTable"), 
Seq(Row("a",25),Row("b",22),Row("c",23)))
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable OPTIONS('skip_empty_line'='true')")
+checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
+  Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
   }
 
   test("test load options with false") {
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-s"OPTIONS('skip_empty_line'='false')")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable 
OPTIONS('skip_empty_line'='false')")
 checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-  
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+  Seq(Row("a", 25),
+Row("b", 22),
+Row("c", 23),
+Row(null, null),
+Row(null, null),
+Row(null, null)))
   }
 
   test("test carbonproperties with true") {
 
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
 "true")
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable")
 checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-  Seq(Row("a",25),Row("b",22),Row("c",23)))
+  Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
 
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
   CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE_DEFAULT)
   }
 
   test("test carbonproperties with false") {
-
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
 "false")
+CarbonProperties.getInstance()
+  .addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE, "false")
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable")
 checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-  
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+  Seq(Row("a", 25),

Review comment:
   changed for all places

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
##
@@ -46,21 +47,23 @@ class AllDataTypesTestCaseAggregate extends QueryTest with 
BeforeAndAfterAll {
   "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno 
int, deptname " +
   "String, projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance " +
   "int,utilization int,salary int)row format delimited fields terminated 
by ','")
-sql(
-  s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithoutheader.csv' INTO 
TABLE alldatatypesAGG_hive")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithoutheader.csv' " +
+"INTO TABLE alldatatypesAGG_hive")
   }
 
   test(
 "select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypestableAGG where " +
 "empname in ('arvind','ayushi') group by empno,empname,utilization")
   {
 checkAnswer(
-  sql(
-"select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypestableAGG where" +
-" empname in ('arvind','ayushi') group by empno,empname,utilization"),
-  sql(
-"select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypesAGG_hive where" +
-" empname in ('arvind','ayushi') group by empno,empname,utili

[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504467565



##
File path: 
integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
##
@@ -96,25 +98,97 @@ class TestStreamingTableQueryFilter extends QueryTest with 
BeforeAndAfterAll {
 // filter
 checkAnswer(
   sql("select * from streaming_table_filter.stream_filter where id = 1"),
-  Seq(Row(1, "name_1", "city_1", 1.0, BigDecimal.valueOf(0.01), 80.01, 
Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"), ("1" + longStrValue), 
Row(wrap(Array("school_1", "school_11")), 1
+  Seq(Row(1,

Review comment:
   done, changed for all places





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504466643



##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala
##
@@ -803,27 +840,38 @@ class BloomCoarseGrainIndexFunctionSuite  extends 
QueryTest with BeforeAndAfterA
 sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v2', 2, 'c3v2'")
 
 // two segments both has index files
-val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(SparkTestQueryExecutor.spark)
+val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(
+  SparkTestQueryExecutor.spark)
 import scala.collection.JavaConverters._
 (0 to 1).foreach { segId =>
-  val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath, segId.toString, 
indexName)
-  assert(FileUtils.listFiles(FileUtils.getFile(indexPath), 
Array("bloomindexmerge"), true).asScala.nonEmpty)
+  val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath,

Review comment:
   changed for all places

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexSuite.scala
##
@@ -82,16 +82,24 @@ class BloomCoarseGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll with B
   sql(s"select * from $bloomSampleTable where id = 1 and city='city_1'", 
indexName, shouldHit),
   sql(s"select * from $normalTable where id = 1 and city='city_1'"))
 checkAnswer(
-  sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'", indexName, shouldHit),
+  sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'",
+indexName,
+shouldHit),

Review comment:
   changed for all places

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##
@@ -186,12 +191,21 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
 
 sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE index_test1 
OPTIONS('header'='false')")
 
-checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"), 
sql(s"select * from index_test1 where name='n10'"))
-
-var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext.sparkSession)
-var indexes = 
carbonTable.getIndexMetadata.getIndexesMap.get(IndexType.LUCENE.getIndexProviderName)
-  .asScala.filter(p => 
p._2.get(CarbonCommonConstants.INDEX_STATUS).equalsIgnoreCase(IndexStatus.ENABLED.name()))
-assert(indexes.exists(p => p._1.equals("dm12") && 
p._2.get(CarbonCommonConstants.INDEX_STATUS) == IndexStatus.ENABLED.name()))
+checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"),
+  sql(s"select * from index_test1 where name='n10'"))
+
+var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext
+  .sparkSession)
+val indexes = carbonTable.getIndexMetadata

Review comment:
   done

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##
@@ -863,8 +891,13 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
  | ON index_test_table (name)
  | AS 'bloomfilter'
   """.stripMargin)
-sql("show indexes on table index_test_table").show(false)
-checkExistence(sql("show indexes on table index_test_table"), true, "dm", 
"dm1", "lucene", "bloomfilter")
+sql("show indexes on table index_test_table").collect()
+checkExistence(sql("show indexes on table index_test_table"),

Review comment:
   done

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
##
@@ -143,9 +147,52 @@ class IntegerDataTypeTestCase extends QueryTest with 
BeforeAndAfterAll {
 
 val ff = BigInt(2147484000L)
 checkAnswer(
-  sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5,begin_time6,begin_time7,begin_time8,begin_time9,begin_time10,begin_time11,begin_time12,begin_time13,begin_time14,begin_time15,begin_time16,begin_time17,begin_time18,begin_time19,begin_time20
 from all_encoding_table"),
-  
Seq(Row(1497376581,1,8388600,125,1497376581,8386600,1,100,125,1497376581,1497423738,2139095000,1497376581,1497423738,32000,123.4,11.1,3200.1,214744460.2,1497376581,1497376581),
-
Row(1497408581,32000,45000,25,1,55000,32000,75,35,1497423838,1497423838,ff,1497423838,1497423838,31900,838860.7,12.3,127.1,214748360.2,1497408581,1497408581))
+  sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5," +

Review comment:
   done





This is an automated messa

[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504467105



##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithCompression.scala
##
@@ -528,8 +571,9 @@ class TestLoadDataWithCompression extends QueryTest with 
BeforeAndAfterEach with
 var exception = intercept[RuntimeException] {
   loadData()
 }
+// scalastyle:off lineLength
 assertResult("For not carbondata native supported compressor, the result 
of method getName() should be the full class name. Expected 
'org.apache.carbondata.core.datastore.compression.ZstdCompressor', found 
'zstd'")(exception.getMessage)
-
+// scalastyle:on lineLength

Review comment:
   done

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestSkipEmptyLines.scala
##
@@ -33,50 +33,65 @@ class TestSkipEmptyLines extends QueryTest with 
BeforeAndAfterAll {
   test("test load options with true") {
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-s"OPTIONS('skip_empty_line'='true')")
-checkAnswer(sql("select * from skipEmptyRowCarbonTable"), 
Seq(Row("a",25),Row("b",22),Row("c",23)))
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable OPTIONS('skip_empty_line'='true')")
+checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
+  Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
   }
 
   test("test load options with false") {
 sql("drop table if exists skipEmptyRowCarbonTable")
 sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-s"OPTIONS('skip_empty_line'='false')")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+"into table skipEmptyRowCarbonTable 
OPTIONS('skip_empty_line'='false')")
 checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-  
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+  Seq(Row("a", 25),

Review comment:
   changed for all places





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504466455



##
File path: 
integration/spark/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
##
@@ -17,13 +17,16 @@
 
 package org.apache.carbondata.sdk.util;
 
+import java.io.BufferedInputStream;

Review comment:
   done

##
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala
##
@@ -77,7 +76,11 @@ class BloomCoarseGrainIndexFunctionSuite  extends QueryTest 
with BeforeAndAfterA
  | properties('BLOOM_SIZE'='64')
   """.stripMargin)
 
-IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, 
IndexStatus.ENABLED.name(), sqlContext.sparkSession, IndexType.BLOOMFILTER)
+IndexStatusUtil.checkIndexStatus(bloomSampleTable,

Review comment:
   changed for all places





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464628



##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
 "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
 "utilization int,salary int) STORED AS CARBONDATA")
 sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+"'BAD_RECORDS_ACTION'='FORCE')")
 val withoutIndex =
-  sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+  sql("select empno from carbontable " +

Review comment:
   changed for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
 "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
 "utilization int,salary int) STORED AS CARBONDATA")
 sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+"'BAD_RECORDS_ACTION'='FORCE')")
 val withoutIndex =
-  sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+  sql("select empno from carbontable " +
+  "where empname = 'ayushi' or empname = 'krithin' or empname = 
'madhan'")
 .collect().toSeq
-sql("create index empnameindex on table carbontable 
(workgroupcategoryname,empname) AS 'carbondata'")
+sql(
+  "create index empnameindex on table carbontable (" +
+  "workgroupcategoryname,empname) AS 'carbondata'")
 
-checkAnswer(sql("select empno from carbontable where empname = 'ayushi' or 
empname = 'krithin' or empname = 'madhan'"),
+checkAnswer(sql(

Review comment:
   changed for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestIndexModelWithUnsafeColumnPage.scala
##
@@ -35,8 +35,8 @@ class TestIndexModelWithUnsafeColumnPage extends QueryTest 
with BeforeAndAfterAl
   }
 
   test("Test secondry index data count") {
-checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable")
-,Seq(Row(1)))
+checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"),

Review comment:
   changed for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##
@@ -163,8 +163,11 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
   // Query has EqualTo - So SI = Yes
   assert(checkSIColumnsSize(ch24, 1)) // EqualTo
 
-}finally{
-  sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+} finally {
+  sql(s"set carbon.si.lookup.partialstring=${
+CarbonCommonConstants

Review comment:
   changed for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##
@@ -188,14 +191,17 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
 
   val ch15 = sql("select count(*) from seccust where 
c_phone='25-989-741-2988' and c_mktsegment like 'BUI%LDING'")
   // equals on c_phone of I1, I2 & (length & startsWith & endswith) on 
c_mktsegment of I2 so SI - Yes
-  assert(checkSIColumnsSize(ch15, 3)) //size = EqualTo on c_phone, length, 
StartsWith
+  assert(checkSIColumnsSize(ch15, 3)) // size = EqualTo on c_phone, 
length, StartsWith
 
   val ch16 = sql("select * from seccust where c_phone='25-989-741-2988'")
   // Query has EqualTo so SI - Yes
   assert(checkSIColumnsSize(ch16, 1)) // size = EqualTo
 
-} finally{
-  sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+} finally {
+  sql(s"set carbon.si.lookup.partialstring=${
+CarbonCommonConstants
+  .ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT

Review comment:
   changed for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala

[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504465234



##
File path: 
integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala
##
@@ -58,7 +57,11 @@ class TestCarbonPartitionWriter extends QueryTest with 
BeforeAndAfterAll{
 
   val dataCount = 1000
   val source = getTestSource(dataCount)
-  executeStreamingEnvironment(tablePath, writerProperties, 
carbonProperties, environment, source)
+  executeStreamingEnvironment(tablePath,

Review comment:
   change for all places





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464757



##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestIndexModelWithUnsafeColumnPage.scala
##
@@ -35,8 +35,8 @@ class TestIndexModelWithUnsafeColumnPage extends QueryTest 
with BeforeAndAfterAl
   }
 
   test("Test secondry index data count") {
-checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable")
-,Seq(Row(1)))
+checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"),

Review comment:
   change for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##
@@ -163,8 +163,11 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
   // Query has EqualTo - So SI = Yes
   assert(checkSIColumnsSize(ch24, 1)) // EqualTo
 
-}finally{
-  sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+} finally {
+  sql(s"set carbon.si.lookup.partialstring=${
+CarbonCommonConstants

Review comment:
   change for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##
@@ -188,14 +191,17 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
 
   val ch15 = sql("select count(*) from seccust where 
c_phone='25-989-741-2988' and c_mktsegment like 'BUI%LDING'")
   // equals on c_phone of I1, I2 & (length & startsWith & endswith) on 
c_mktsegment of I2 so SI - Yes
-  assert(checkSIColumnsSize(ch15, 3)) //size = EqualTo on c_phone, length, 
StartsWith
+  assert(checkSIColumnsSize(ch15, 3)) // size = EqualTo on c_phone, 
length, StartsWith
 
   val ch16 = sql("select * from seccust where c_phone='25-989-741-2988'")
   // Query has EqualTo so SI - Yes
   assert(checkSIColumnsSize(ch16, 1)) // size = EqualTo
 
-} finally{
-  sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+} finally {
+  sql(s"set carbon.si.lookup.partialstring=${
+CarbonCommonConstants
+  .ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT

Review comment:
   change for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala
##
@@ -128,25 +129,32 @@ class TestSIWithSecondryIndex extends QueryTest with 
BeforeAndAfterAll {
 
   test("test create secondary index global sort on partition table") {
 sql("drop table if exists partition_carbon_table")
-sql("create table partition_carbon_table (name string, id string, country 
string) PARTITIONED BY(dateofjoin " +
+sql("create table partition_carbon_table (" +
+"name string, id string, country string) PARTITIONED BY(dateofjoin " +
   "string) stored as carbondata")
 // create SI before the inserting the data
-sql("create index partition_carbon_table_index on table 
partition_carbon_table(id, country) as 'carbondata' properties" +
+sql("create index partition_carbon_table_index on table 
partition_carbon_table(" +
+"id, country) as 'carbondata' properties" +
 "('sort_scope'='global_sort', 'Global_sort_partitions'='3')")
 sql("insert into partition_carbon_table select 'xx', '2', 'china', '2020' 
" +
 "union all select 'xx', '1', 'india', '2021'")
 checkAnswerWithoutSort(sql("select id, country from 
partition_carbon_table_index"),
   Seq(Row("1", "india"), Row("2", "china")))
 // check for valid sort_scope
-checkExistence(sql("describe formatted partition_carbon_table_index"), 
true, "Sort Scope global_sort")
+checkExistence(sql("describe formatted partition_carbon_table_index"),
+  true,
+  "Sort Scope global_sort")

Review comment:
   done

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala
##
@@ -296,14 +308,18 @@ class TestSIWithSecondryIndex extends QueryTest with 
BeforeAndAfterAll {
 
 sql(s"""ALTER TABLE default.index1 SET
|SERDEPROPERTIES ('isSITableEnabled' = 'false')""".stripMargin)
-sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-"TABLE uniqdata OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE uniqdata 
" +
+
"OPTIONS('DELIMITER'=',','BAD_RECORDS_LOGGER_ENABLE'='FALSE','BAD_RECORDS_ACTION'='FORCE')")
 val count1 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'").count()
-val df1 = sql("select * from uniqdata where workgroupcategoryn

[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464628



##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
 "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
 "utilization int,salary int) STORED AS CARBONDATA")
 sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+"'BAD_RECORDS_ACTION'='FORCE')")
 val withoutIndex =
-  sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+  sql("select empno from carbontable " +

Review comment:
   change for all places

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
 "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
 "utilization int,salary int) STORED AS CARBONDATA")
 sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+"TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+"'BAD_RECORDS_ACTION'='FORCE')")
 val withoutIndex =
-  sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+  sql("select empno from carbontable " +
+  "where empname = 'ayushi' or empname = 'krithin' or empname = 
'madhan'")
 .collect().toSeq
-sql("create index empnameindex on table carbontable 
(workgroupcategoryname,empname) AS 'carbondata'")
+sql(
+  "create index empnameindex on table carbontable (" +
+  "workgroupcategoryname,empname) AS 'carbondata'")
 
-checkAnswer(sql("select empno from carbontable where empname = 'ayushi' or 
empname = 'krithin' or empname = 'madhan'"),
+checkAnswer(sql(

Review comment:
   change for all places





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464365



##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestBroadCastSIFilterPushJoinWithUDF.scala
##
@@ -14,20 +14,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+

Review comment:
   reverted





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [carbondata] QiangCai commented on a change in pull request #3950: [CARBONDATA-3889] Enable scalastyle check for all scala test code

2020-10-14 Thread GitBox


QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464144



##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
##
@@ -14,20 +14,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+

Review comment:
   reverted

##
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestAlterTableColumnRenameWithIndex.scala
##
@@ -73,9 +73,12 @@ class TestAlterTableColumnRenameWithIndex extends QueryTest 
with BeforeAndAfterA
 sql("alter table si_rename change c test string")
 sql("alter table si_rename change d testSI string")
 sql("show indexes on si_rename").collect
-val query2 = sql("select test,testsi from si_rename where testsi = 'pqr' 
or test = 'def'").count()
+val query2 = sql("select test,testsi from si_rename where testsi = 'pqr' 
or test = 'def'")
+  .count()
 assert(query1 == query2)
-val df = sql("select test,testsi from si_rename where testsi = 'pqr' or 
test = 'def'").queryExecution.sparkPlan
+val df = sql("select test,testsi from si_rename where testsi = 'pqr' or 
test = 'def'")
+  .queryExecution

Review comment:
   done





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org