Indhumathi27 commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r503988021



##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
##########
@@ -14,20 +14,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+

Review comment:
       This change not required

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestBroadCastSIFilterPushJoinWithUDF.scala
##########
@@ -14,20 +14,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+

Review comment:
       can revert this change

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##########
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
         "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
         "utilization int,salary int) STORED AS CARBONDATA")
     sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-        "TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+        "TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+        "'BAD_RECORDS_ACTION'='FORCE')")
     val withoutIndex =
-      sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+      sql("select empno from carbontable " +

Review comment:
       ```suggestion
         sql("select empno from carbontable where empname = 'ayushi' or " +
             "empname = 'krithin' or empname = 'madhan'").collect().toSeq
   ```

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestIndexModelWithUnsafeColumnPage.scala
##########
@@ -35,8 +35,8 @@ class TestIndexModelWithUnsafeColumnPage extends QueryTest 
with BeforeAndAfterAl
   }
 
   test("Test secondry index data count") {
-    checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable")
-    ,Seq(Row(1)))
+    checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"),

Review comment:
       ```suggestion
       checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"), 
Seq(Row(1)))
   ```

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
##########
@@ -262,40 +262,46 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
         "projectjoindate Timestamp, projectenddate Timestamp, attendance int, 
" +
         "utilization int,salary int) STORED AS CARBONDATA")
     sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-        "TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+        "TABLE carbontable OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', " +
+        "'BAD_RECORDS_ACTION'='FORCE')")
     val withoutIndex =
-      sql("select empno from carbontable where empname = 'ayushi' or empname = 
'krithin' or empname = 'madhan'")
+      sql("select empno from carbontable " +
+          "where empname = 'ayushi' or empname = 'krithin' or empname = 
'madhan'")
         .collect().toSeq
-    sql("create index empnameindex on table carbontable 
(workgroupcategoryname,empname) AS 'carbondata'")
+    sql(
+      "create index empnameindex on table carbontable (" +
+      "workgroupcategoryname,empname) AS 'carbondata'")
 
-    checkAnswer(sql("select empno from carbontable where empname = 'ayushi' or 
empname = 'krithin' or empname = 'madhan'"),
+    checkAnswer(sql(

Review comment:
       ```suggestion
       checkAnswer(sql("select empno from carbontable where empname = 'ayushi' 
or " +
                       "empname = 'krithin' or empname = 'madhan'"), 
withoutIndex)
   ```

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala
##########
@@ -296,14 +308,18 @@ class TestSIWithSecondryIndex extends QueryTest with 
BeforeAndAfterAll {
 
     sql(s"""ALTER TABLE default.index1 SET
            |SERDEPROPERTIES ('isSITableEnabled' = 'false')""".stripMargin)
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " +
-        "TABLE uniqdata OPTIONS('DELIMITER'=',', 
'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE uniqdata 
" +
+        
"OPTIONS('DELIMITER'=',','BAD_RECORDS_LOGGER_ENABLE'='FALSE','BAD_RECORDS_ACTION'='FORCE')")
     val count1 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'").count()
-    val df1 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'").queryExecution.sparkPlan
+    val df1 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'")
+      .queryExecution
+      .sparkPlan
     sql(s"""ALTER TABLE default.index1 SET
            |SERDEPROPERTIES ('isSITableEnabled' = 'false')""".stripMargin)
     val count2 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'").count()
-    val df2 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'").queryExecution.sparkPlan
+    val df2 = sql("select * from uniqdata where workgroupcategoryname = 
'developer'")
+      .queryExecution
+      .sparkPlan

Review comment:
       can move it to previous line. line 322 and 316

##########
File path: 
integration/presto/src/test/scala/org/apache/carbondata/presto/integrationtest/PrestoTestNonTransactionalTableFiles.scala
##########
@@ -587,13 +590,26 @@ class PrestoTestNonTransactionalTableFiles extends 
FunSuiteLike with BeforeAndAf
     val structType14 = new Field("stringField", DataTypes.STRING)
 
     try {
-      val options: util.Map[String, String] = Map("bAd_RECords_action" -> 
"FORCE", "quotechar" -> "\"").asJava
+      val options: util.Map[String, String] =
+        Map("bAd_RECords_action" -> "FORCE", "quotechar" -> "\"").asJava
       val builder = CarbonWriter.builder()
       val writer =
         builder.outputPath(writerPathComplex).withLoadOptions(options)
           
.uniqueIdentifier(System.nanoTime()).withBlockSize(2).enableLocalDictionary(false)
-          .withCsvInput(new 
Schema(Array[Field](structType1,structType2,structType3,structType4,structType5,structType6,
-            
structType7,structType8,structType9,structType10,structType11,structType12,structType13,structType14))).writtenBy("presto").build()
+          .withCsvInput(new Schema(Array[Field](structType1,

Review comment:
       ```suggestion
             .withCsvInput(new Schema(Array[Field](
               structType1, structType2, structType3, structType4, structType5, 
structType6,
               structType7, structType8, structType9, structType10, 
structType11, structType12,
               structType13, structType14))).writtenBy("presto").build()
   ```

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala
##########
@@ -77,7 +76,11 @@ class BloomCoarseGrainIndexFunctionSuite  extends QueryTest 
with BeforeAndAfterA
          | properties('BLOOM_SIZE'='640000')
       """.stripMargin)
 
-    IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, 
IndexStatus.ENABLED.name(), sqlContext.sparkSession, IndexType.BLOOMFILTER)
+    IndexStatusUtil.checkIndexStatus(bloomSampleTable,

Review comment:
       ```suggestion
       IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName,
         IndexStatus.ENABLED.name(), sqlContext.sparkSession, 
IndexType.BLOOMFILTER)
   ```
   can modify all places

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##########
@@ -863,8 +891,13 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
          | ON index_test_table (name)
          | AS 'bloomfilter'
       """.stripMargin)
-    sql("show indexes on table index_test_table").show(false)
-    checkExistence(sql("show indexes on table index_test_table"), true, "dm", 
"dm1", "lucene", "bloomfilter")
+    sql("show indexes on table index_test_table").collect()
+    checkExistence(sql("show indexes on table index_test_table"),

Review comment:
       ```suggestion
      checkExistence(sql("show indexes on table index_test_table"),
         true, "dm", "dm1", "lucene", "bloomfilter")
   ```

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##########
@@ -186,12 +191,21 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
 
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE index_test1 
OPTIONS('header'='false')")
 
-    checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"), 
sql(s"select * from index_test1 where name='n10'"))
-
-    var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext.sparkSession)
-    var indexes = 
carbonTable.getIndexMetadata.getIndexesMap.get(IndexType.LUCENE.getIndexProviderName)
-      .asScala.filter(p => 
p._2.get(CarbonCommonConstants.INDEX_STATUS).equalsIgnoreCase(IndexStatus.ENABLED.name()))
-    assert(indexes.exists(p => p._1.equals("dm12") && 
p._2.get(CarbonCommonConstants.INDEX_STATUS) == IndexStatus.ENABLED.name()))
+    checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"),
+      sql(s"select * from index_test1 where name='n10'"))
+
+    var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext
+      .sparkSession)
+    val indexes = carbonTable.getIndexMetadata

Review comment:
       ```suggestion
       val indexes = carbonTable.getIndexMetadata.getIndexesMap
         .get(IndexType.LUCENE.getIndexProviderName)
         .asScala.filter(p => p._2.get(CarbonCommonConstants.INDEX_STATUS)
           .equalsIgnoreCase(IndexStatus.ENABLED.name()))
   ```

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/aggquery/AllDataTypesTestCaseAggregate.scala
##########
@@ -46,21 +47,23 @@ class AllDataTypesTestCaseAggregate extends QueryTest with 
BeforeAndAfterAll {
       "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno 
int, deptname " +
       "String, projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance " +
       "int,utilization int,salary int)row format delimited fields terminated 
by ','")
-    sql(
-      s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithoutheader.csv' INTO 
TABLE alldatatypesAGG_hive")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/datawithoutheader.csv' " +
+        "INTO TABLE alldatatypesAGG_hive")
   }
 
   test(
     "select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypestableAGG where " +
     "empname in ('arvind','ayushi') group by empno,empname,utilization")
   {
     checkAnswer(
-      sql(
-        "select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypestableAGG where" +
-        " empname in ('arvind','ayushi') group by empno,empname,utilization"),
-      sql(
-        "select empno,empname,utilization,count(salary),sum(empno) from 
alldatatypesAGG_hive where" +
-        " empname in ('arvind','ayushi') group by empno,empname,utilization"))
+      sql("select empno,empname,utilization,count(salary),sum(empno) " +

Review comment:
       can enable scalastyle linelength

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithCompression.scala
##########
@@ -528,8 +571,9 @@ class TestLoadDataWithCompression extends QueryTest with 
BeforeAndAfterEach with
     var exception = intercept[RuntimeException] {
       loadData()
     }
+    // scalastyle:off lineLength
     assertResult("For not carbondata native supported compressor, the result 
of method getName() should be the full class name. Expected 
'org.apache.carbondata.core.datastore.compression.ZstdCompressor', found 
'zstd'")(exception.getMessage)
-
+    // scalastyle:on lineLength

Review comment:
       can enable linelength for complete file to avoid changes in line 419 and 
624

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexSuite.scala
##########
@@ -82,16 +82,24 @@ class BloomCoarseGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll with B
       sql(s"select * from $bloomSampleTable where id = 1 and city='city_1'", 
indexName, shouldHit),
       sql(s"select * from $normalTable where id = 1 and city='city_1'"))
     checkAnswer(
-      sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'", indexName, shouldHit),
+      sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'",
+        indexName,
+        shouldHit),

Review comment:
       can move it to previous line for all changes

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestAlterTableColumnRenameWithIndex.scala
##########
@@ -73,9 +73,12 @@ class TestAlterTableColumnRenameWithIndex extends QueryTest 
with BeforeAndAfterA
     sql("alter table si_rename change c test string")
     sql("alter table si_rename change d testSI string")
     sql("show indexes on si_rename").collect
-    val query2 = sql("select test,testsi from si_rename where testsi = 'pqr' 
or test = 'def'").count()
+    val query2 = sql("select test,testsi from si_rename where testsi = 'pqr' 
or test = 'def'")
+      .count()
     assert(query1 == query2)
-    val df = sql("select test,testsi from si_rename where testsi = 'pqr' or 
test = 'def'").queryExecution.sparkPlan
+    val df = sql("select test,testsi from si_rename where testsi = 'pqr' or 
test = 'def'")
+      .queryExecution

Review comment:
       ```suggestion
         .queryExecution.sparkPlan
   ```

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##########
@@ -188,14 +191,17 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
 
       val ch15 = sql("select count(*) from seccust where 
c_phone='25-989-741-2988' and c_mktsegment like 'BUI%LDING'")
       // equals on c_phone of I1, I2 & (length & startsWith & endswith) on 
c_mktsegment of I2 so SI - Yes
-      assert(checkSIColumnsSize(ch15, 3)) //size = EqualTo on c_phone, length, 
StartsWith
+      assert(checkSIColumnsSize(ch15, 3)) // size = EqualTo on c_phone, 
length, StartsWith
 
       val ch16 = sql("select * from seccust where c_phone='25-989-741-2988'")
       // Query has EqualTo so SI - Yes
       assert(checkSIColumnsSize(ch16, 1)) // size = EqualTo
 
-    } finally{
-      sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+    } finally {
+      sql(s"set carbon.si.lookup.partialstring=${
+        CarbonCommonConstants
+          .ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT

Review comment:
       same as above comment

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala
##########
@@ -128,25 +129,32 @@ class TestSIWithSecondryIndex extends QueryTest with 
BeforeAndAfterAll {
 
   test("test create secondary index global sort on partition table") {
     sql("drop table if exists partition_carbon_table")
-    sql("create table partition_carbon_table (name string, id string, country 
string) PARTITIONED BY(dateofjoin " +
+    sql("create table partition_carbon_table (" +
+        "name string, id string, country string) PARTITIONED BY(dateofjoin " +
       "string) stored as carbondata")
     // create SI before the inserting the data
-    sql("create index partition_carbon_table_index on table 
partition_carbon_table(id, country) as 'carbondata' properties" +
+    sql("create index partition_carbon_table_index on table 
partition_carbon_table(" +
+        "id, country) as 'carbondata' properties" +
         "('sort_scope'='global_sort', 'Global_sort_partitions'='3')")
     sql("insert into partition_carbon_table select 'xx', '2', 'china', '2020' 
" +
         "union all select 'xx', '1', 'india', '2021'")
     checkAnswerWithoutSort(sql("select id, country from 
partition_carbon_table_index"),
       Seq(Row("1", "india"), Row("2", "china")))
     // check for valid sort_scope
-    checkExistence(sql("describe formatted partition_carbon_table_index"), 
true, "Sort Scope global_sort")
+    checkExistence(sql("describe formatted partition_carbon_table_index"),
+      true,
+      "Sort Scope global_sort")

Review comment:
       can move it to previous line

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala
##########
@@ -803,27 +840,38 @@ class BloomCoarseGrainIndexFunctionSuite  extends 
QueryTest with BeforeAndAfterA
     sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v2', 2, 'c3v2'")
 
     // two segments both has index files
-    val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(SparkTestQueryExecutor.spark)
+    val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(
+      SparkTestQueryExecutor.spark)
     import scala.collection.JavaConverters._
     (0 to 1).foreach { segId =>
-      val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath, segId.toString, 
indexName)
-      assert(FileUtils.listFiles(FileUtils.getFile(indexPath), 
Array("bloomindexmerge"), true).asScala.nonEmpty)
+      val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath,

Review comment:
       ```suggestion
         val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath,
           segId.toString, indexName)
         assert(FileUtils.listFiles(FileUtils.getFile(indexPath), 
Array("bloomindexmerge"), true)
           .asScala.nonEmpty)
   ```

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala
##########
@@ -163,8 +163,11 @@ class TestNIQueryWithIndex extends QueryTest with 
BeforeAndAfterAll{
       // Query has EqualTo - So SI = Yes
       assert(checkSIColumnsSize(ch24, 1)) // EqualTo
 
-    }finally{
-      sql(s"set 
carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}")
+    } finally {
+      sql(s"set carbon.si.lookup.partialstring=${
+        CarbonCommonConstants

Review comment:
       ```suggestion
            CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT
   ```

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
##########
@@ -143,9 +147,52 @@ class IntegerDataTypeTestCase extends QueryTest with 
BeforeAndAfterAll {
 
     val ff = BigInt(2147484000L)
     checkAnswer(
-      sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5,begin_time6,begin_time7,begin_time8,begin_time9,begin_time10,begin_time11,begin_time12,begin_time13,begin_time14,begin_time15,begin_time16,begin_time17,begin_time18,begin_time19,begin_time20
 from all_encoding_table"),
-      
Seq(Row(1497376581,10000,8388600,125,1497376581,8386600,10000,100,125,1497376581,1497423738,2139095000,1497376581,1497423738,32000,123.4,11.1,3200.1,214744460.2,1497376581,1497376581),
-        
Row(1497408581,32000,45000,25,10000,55000,32000,75,35,1497423838,1497423838,ff,1497423838,1497423838,31900,838860.7,12.3,127.1,214748360.2,1497408581,1497408581))
+      sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5," +

Review comment:
       can enable scalastyle length

##########
File path: 
integration/spark/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
##########
@@ -17,13 +17,16 @@
 
 package org.apache.carbondata.sdk.util;
 
+import java.io.BufferedInputStream;

Review comment:
       Can replace with
   import java.io.*;
   import java.nio.charset.StandardCharsets;

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/TestAdaptiveEncodingForPrimitiveTypes.scala
##########
@@ -252,13 +267,19 @@ class TestAdaptiveEncodingForPrimitiveTypes extends 
QueryTest with BeforeAndAfte
     checkAnswer(sql("select * from uniqdata"), sql("select * from 
uniqdata_Compare"))
 
     // negative data compaction test
-    sql("create table negativeTable (intColumn int,stringColumn 
string,shortColumn short) STORED AS carbondata 
TBLPROPERTIES('SORT_COLUMNS'='intColumn,shortColumn')")
-    sql(s"load data inpath '${resourcesPath + "/dataWithNegativeValues.csv"}' 
into table negativeTable 
options('FILEHEADER'='intColumn,stringColumn,shortColumn')")
+    sql("create table negativeTable (intColumn int,stringColumn 
string,shortColumn short) " +
+        "STORED AS carbondata 
TBLPROPERTIES('SORT_COLUMNS'='intColumn,shortColumn')")
+    sql(s"load data inpath '${resourcesPath + "/dataWithNegativeValues.csv"}' 
" +
+        s"into table negativeTable 
options('FILEHEADER'='intColumn,stringColumn,shortColumn')")
     checkAnswer(sql("select * from negativeTable"), sql("select * from 
negativeTable_Compare"))
 
-    
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT,
 CarbonCommonConstants.ENABLE_UNSAFE_SORT_DEFAULT)
-      .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE, 
CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE_DEFAULT)
-      .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_IN_QUERY_EXECUTION, 
CarbonCommonConstants.ENABLE_UNSAFE_IN_QUERY_EXECUTION_DEFAULTVALUE)
+    CarbonProperties.getInstance()

Review comment:
       can move this to a method and reuse in line 315 and 469

##########
File path: 
integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
##########
@@ -96,25 +98,97 @@ class TestStreamingTableQueryFilter extends QueryTest with 
BeforeAndAfterAll {
     // filter
     checkAnswer(
       sql("select * from streaming_table_filter.stream_filter where id = 1"),
-      Seq(Row(1, "name_1", "city_1", 10000.0, BigDecimal.valueOf(0.01), 80.01, 
Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"), ("1" + longStrValue), 
Row(wrap(Array("school_1", "school_11")), 1))))
+      Seq(Row(1,

Review comment:
       can enable scala style off/on to avoid changes in this file

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestSkipEmptyLines.scala
##########
@@ -33,50 +33,65 @@ class TestSkipEmptyLines extends QueryTest with 
BeforeAndAfterAll {
   test("test load options with true") {
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-        s"OPTIONS('skip_empty_line'='true')")
-    checkAnswer(sql("select * from skipEmptyRowCarbonTable"), 
Seq(Row("a",25),Row("b",22),Row("c",23)))
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable OPTIONS('skip_empty_line'='true')")
+    checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
+      Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
   }
 
   test("test load options with false") {
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-        s"OPTIONS('skip_empty_line'='false')")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable 
OPTIONS('skip_empty_line'='false')")
     checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-      
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+      Seq(Row("a", 25),

Review comment:
       ```suggestion
         Seq(Row("a", 25), Row("b", 22), Row("c", 23), Row(null, null), 
           Row(null, null), Row(null, null)))
   ```

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/emptyrow/TestSkipEmptyLines.scala
##########
@@ -33,50 +33,65 @@ class TestSkipEmptyLines extends QueryTest with 
BeforeAndAfterAll {
   test("test load options with true") {
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-        s"OPTIONS('skip_empty_line'='true')")
-    checkAnswer(sql("select * from skipEmptyRowCarbonTable"), 
Seq(Row("a",25),Row("b",22),Row("c",23)))
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable OPTIONS('skip_empty_line'='true')")
+    checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
+      Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
   }
 
   test("test load options with false") {
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable " +
-        s"OPTIONS('skip_empty_line'='false')")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable 
OPTIONS('skip_empty_line'='false')")
     checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-      
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+      Seq(Row("a", 25),
+        Row("b", 22),
+        Row("c", 23),
+        Row(null, null),
+        Row(null, null),
+        Row(null, null)))
   }
 
   test("test carbonproperties with true") {
     
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
 "true")
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable")
     checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-      Seq(Row("a",25),Row("b",22),Row("c",23)))
+      Seq(Row("a", 25), Row("b", 22), Row("c", 23)))
     
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
       CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE_DEFAULT)
   }
 
   test("test carbonproperties with false") {
-    
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE,
 "false")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_SKIP_EMPTY_LINE, "false")
     sql("drop table if exists skipEmptyRowCarbonTable")
     sql("CREATE TABLE skipEmptyRowCarbonTable (name string, age int) STORED AS 
carbondata")
-    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' into table 
skipEmptyRowCarbonTable")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptylines.csv' " +
+        "into table skipEmptyRowCarbonTable")
     checkAnswer(sql("select * from skipEmptyRowCarbonTable"),
-      
Seq(Row("a",25),Row("b",22),Row("c",23),Row(null,null),Row(null,null),Row(null,null)))
+      Seq(Row("a", 25),

Review comment:
       same as above comment. handle for line 106

##########
File path: 
integration/flink/src/test/scala/org/apache/carbon/flink/TestCarbonPartitionWriter.scala
##########
@@ -58,7 +57,11 @@ class TestCarbonPartitionWriter extends QueryTest with 
BeforeAndAfterAll{
 
       val dataCount = 1000
       val source = getTestSource(dataCount)
-      executeStreamingEnvironment(tablePath, writerProperties, 
carbonProperties, environment, source)
+      executeStreamingEnvironment(tablePath,

Review comment:
       ```suggestion
         executeStreamingEnvironment(tablePath, writerProperties, 
carbonProperties, 
           environment, source)
   ```
   can modify at all places

##########
File path: 
integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithLongString.scala
##########
@@ -108,7 +109,16 @@ class TestStreamingTableWithLongString extends QueryTest 
with BeforeAndAfterAll
     )
 
     val row = sql("select * from streaming_longstr.stream_table_longstr_file 
order by id").head()
-    val exceptedRow = Row(10, "name_10", "city_10", 100000.0, 
BigDecimal.valueOf(0.01), 80.01, Date.valueOf("1990-01-01"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"), Timestamp.valueOf("2010-01-01 
10:01:01.0"), "10" + longStrValue)
+    val exceptedRow = Row(10,

Review comment:
       can enable scala style off/on to avoid changes in this file

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestPruneUsingSegmentMinMax.scala
##########
@@ -46,26 +47,32 @@ class TestPruneUsingSegmentMinMax extends QueryTest with 
BeforeAndAfterAll {
   private def createTablesAndLoadData = {
     drop
     sql("create table carbon(a int, b string, c double,d int,e timestamp) 
stored as carbondata")
-    sql("insert into carbon values(1,'ab',23.4,5,'2017-09-01 
00:00:00'),(2,'aa',23.6,8,'2017-09-02 00:00:00')")
-    sql("insert into carbon values(3,'ab',23.4,5,'2017-09-01 
00:00:00'),(4,'aa',23.6,8,'2017-09-02 00:00:00')")
-    sql("insert into carbon values(5,'ab',23.4,5,'2017-09-01 
00:00:00'),(6,'aa',23.6,8,'2017-09-02 00:00:00')")
+    sql("insert into carbon values" +

Review comment:
       can enable scala linelength off/on




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to