carbondata git commit: [CARBONDATA-2698][CARBONDATA-2700][CARBONDATA-2732][BloomDataMap] block some operations of bloomfilter datamap

2018-07-17 Thread xuchuanyin
Repository: carbondata
Updated Branches:
  refs/heads/master 8e7895715 -> 1c4358e89


[CARBONDATA-2698][CARBONDATA-2700][CARBONDATA-2732][BloomDataMap] block some 
operations of bloomfilter datamap

1.Block create bloomfilter datamap index on column which its datatype is 
complex type;
2.Block change datatype for bloomfilter index datamap;
3.Block dropping index columns for bloomfilter index datamap

This closes #2505


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1c4358e8
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1c4358e8
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1c4358e8

Branch: refs/heads/master
Commit: 1c4358e89f5cba1132e9512107d3a0cb22087b7b
Parents: 8e78957
Author: Sssan520 
Authored: Mon Jul 16 10:59:43 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 16:34:14 2018 +0800

--
 .../core/datamap/dev/DataMapFactory.java| 13 +++
 .../core/metadata/schema/table/CarbonTable.java | 13 ++-
 .../bloom/BloomCoarseGrainDataMapFactory.java   | 37 +++-
 .../datamap/CarbonCreateDataMapCommand.scala| 10 ++
 .../CarbonAlterTableDataTypeChangeCommand.scala |  3 +-
 .../CarbonAlterTableDropColumnCommand.scala |  3 +-
 .../bloom/BloomCoarseGrainDataMapSuite.scala| 99 
 7 files changed, 171 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index 0889f8b..ab0f8ea 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -144,4 +144,17 @@ public abstract class DataMapFactory {
 }
   }
 
+  /**
+   * whether to block operation on corresponding table or column.
+   * For example, bloomfilter datamap will block changing datatype for 
bloomindex column.
+   * By default it will not block any operation.
+   *
+   * @param operation table operation
+   * @param targets objects which the operation impact on
+   * @return true the operation will be blocked;false the operation will not 
be blocked
+   */
+  public boolean isOperationBlocked(TableOperation operation, Object... 
targets) {
+return false;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 71256d4..995f943 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -1054,11 +1054,12 @@ public class CarbonTable implements Serializable {
   /**
* methods returns true if operation is allowed for the corresponding 
datamap or not
* if this operation makes datamap stale it is not allowed
-   * @param carbonTable
-   * @param operation
-   * @return
+   * @param carbonTable carbontable to be operated
+   * @param operation which operation on the table,such as drop column,change 
datatype.
+   * @param targets objects which the operation impact on,such as column
+   * @return true allow;false not allow
*/
-  public boolean canAllow(CarbonTable carbonTable, TableOperation operation) {
+  public boolean canAllow(CarbonTable carbonTable, TableOperation operation, 
Object... targets) {
 try {
   List datamaps = 
DataMapStoreManager.getInstance().getAllDataMap(carbonTable);
   if (!datamaps.isEmpty()) {
@@ -1069,6 +1070,10 @@ public class CarbonTable implements Serializable {
   if (factoryClass.willBecomeStale(operation)) {
 return false;
   }
+  // check whether the operation is blocked for datamap
+  if (factoryClass.isOperationBlocked(operation, targets)) {
+return false;
+  }
 }
   }
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
--
diff --git 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
 
b/datamap/bloom/src/main/j

Jenkins build is back to normal : carbondata-master-spark-2.2 #671

2018-07-17 Thread Apache Jenkins Server
See 




carbondata git commit: [HotFix] Getting carbon table identifier to datamap events

2018-07-17 Thread kunalkapoor
Repository: carbondata
Updated Branches:
  refs/heads/master 1c4358e89 -> aec47e06f


[HotFix] Getting carbon table identifier to datamap events

Passing the table identifier to keep track of table in event in case preload 
and postload of datamap event.

This closes #2448


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/aec47e06
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/aec47e06
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/aec47e06

Branch: refs/heads/master
Commit: aec47e06ff57dbfe6180f7ba2574700ac07ae8f1
Parents: 1c4358e
Author: Jatin 
Authored: Wed Jul 4 19:53:48 2018 +0530
Committer: kunal642 
Committed: Tue Jul 17 14:49:16 2018 +0530

--
 .../org/apache/carbondata/events/DataMapEvents.scala   | 13 +
 .../command/datamap/CarbonCreateDataMapCommand.scala   | 12 
 .../command/datamap/CarbonDataMapRebuildCommand.scala  |  8 ++--
 .../command/datamap/CarbonDropDataMapCommand.scala |  4 ++--
 4 files changed, 25 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/aec47e06/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
index 8fb374f..72c980c 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
@@ -18,6 +18,7 @@
 package org.apache.carbondata.events
 
 import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.TableIdentifier
 
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 
@@ -26,14 +27,16 @@ import 
org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
  * example: bloom datamap, Lucene datamap
  */
 case class CreateDataMapPostExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's before start of update index datmap status over 
table with index datamap
  * example: bloom datamap, Lucene datamap
  */
 case class UpdateDataMapPreExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's after finish of  update index datmap status over 
table with index
@@ -41,7 +44,8 @@ case class UpdateDataMapPreExecutionEvent(sparkSession: 
SparkSession,
  * example: bloom datamap, Lucene datamap
  */
 case class UpdateDataMapPostExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's before start of index build over table with index 
datamap
@@ -64,5 +68,6 @@ case class BuildDataMapPostExecutionEvent(sparkSession: 
SparkSession,
  * example: bloom datamap, Lucene datamap
  */
 case class CreateDataMapPreExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/aec47e06/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 33dba28..7600160 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -125,13 +125,15 @@ case class CarbonCreateDataMapCommand(
 val operationContext: OperationContext = new OperationContext()
 val systemFolderLocation: String = 
CarbonProperties.getInstance().getSystemFolderLocation
 val createDataMapPreExecutionEvent: CreateDataMapPreExecutionEvent =
-  new CreateDataMapPreExecutionEvent(sparkSession, 
syste

Jenkins build is back to normal : carbondata-master-spark-2.1 #2631

2018-07-17 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.2 ยป Apache CarbonData :: Spark Common Test #673

2018-07-17 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.2 #673

2018-07-17 Thread Apache Jenkins Server
See 




carbondata git commit: [CARBONDATA-2727][BloomDataMap] Support create bloom datamap on newly added column

2018-07-17 Thread xuchuanyin
Repository: carbondata
Updated Branches:
  refs/heads/master aec47e06f -> 81038f55e


[CARBONDATA-2727][BloomDataMap] Support create bloom datamap on newly added 
column

Add a result collector with rowId infomation for datamap rebuild if table 
schema is changed;
Use keygenerator to retrieve surrogate value of dictIndexColumn from query 
result;

This closes #2490


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/81038f55
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/81038f55
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/81038f55

Branch: refs/heads/master
Commit: 81038f55ef9a582f82305378988f603ded76e524
Parents: aec47e0
Author: Manhua 
Authored: Wed Jul 11 19:39:31 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 23:31:43 2018 +0800

--
 .../scan/collector/ResultCollectorFactory.java  |  31 ++---
 ...RowIdRestructureBasedRawResultCollector.java | 138 +++
 .../bloom/AbstractBloomDataMapWriter.java   |  72 +-
 .../bloom/BloomCoarseGrainDataMapFactory.java   |   2 +-
 .../datamap/bloom/BloomDataMapBuilder.java  |   8 ++
 .../datamap/bloom/BloomDataMapWriter.java   |  72 ++
 .../datamap/IndexDataMapRebuildRDD.scala| 131 +++---
 .../bloom/BloomCoarseGrainDataMapSuite.scala|  96 +
 8 files changed, 413 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/81038f55/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
index ea4afd1..e0a0b90 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
@@ -18,15 +18,7 @@ package org.apache.carbondata.core.scan.collector;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedVectorResultCollector;
-import org.apache.carbondata.core.scan.collector.impl.RawBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedDictionaryResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedRawResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedVectorResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RowIdBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RowIdRawBasedResultCollector;
+import org.apache.carbondata.core.scan.collector.impl.*;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 
 /**
@@ -51,14 +43,21 @@ public class ResultCollectorFactory {
 AbstractScannedResultCollector scannerResultAggregator = null;
 if (blockExecutionInfo.isRawRecordDetailQuery()) {
   if (blockExecutionInfo.isRestructuredBlock()) {
-LOGGER.info("Restructure based raw collector is used to scan and 
collect the data");
-scannerResultAggregator = new 
RestructureBasedRawResultCollector(blockExecutionInfo);
-  } else if (blockExecutionInfo.isRequiredRowId()) {
-LOGGER.info("RowId based raw collector is used to scan and collect the 
data");
-scannerResultAggregator = new 
RowIdRawBasedResultCollector(blockExecutionInfo);
+if (blockExecutionInfo.isRequiredRowId()) {
+  LOGGER.info("RowId Restructure based raw ollector is used to scan 
and collect the data");
+  scannerResultAggregator = new 
RowIdRestructureBasedRawResultCollector(blockExecutionInfo);
+} else {
+  LOGGER.info("Restructure based raw collector is used to scan and 
collect the data");
+  scannerResultAggregator = new 
RestructureBasedRawResultCollector(blockExecutionInfo);
+}
   } else {
-LOGGER.info("Row based raw collector is used to scan and collect the 
data");
-scannerResultAggregator = new 
RawBasedResultCollector(blockExecutionInfo);
+if (blockExecutionInfo.isRequiredRowId()) {
+  LOGGER.info("RowId based raw collector is used to scan and collect 
the data");
+  scannerResultAggregator = new 
RowIdRawBasedResultCollector(blockExecutionInfo);
+} else {
+  LOGGER.info("Row based raw col

carbondata git commit: [CARBONDATA-2724][DataMap]Unsupported create datamap on table with V1 or V2 format data

2018-07-17 Thread xuchuanyin
Repository: carbondata
Updated Branches:
  refs/heads/master 81038f55e -> a16289786


[CARBONDATA-2724][DataMap]Unsupported create datamap on table with V1 or V2 
format data

block creating datamap on carbon table with V1 or V2 format
Currently the version info is read from carbon data file

This closes #2488


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a1628978
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a1628978
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a1628978

Branch: refs/heads/master
Commit: a162897862c92947ea8fd63713b7dbe6098f3b13
Parents: 81038f5
Author: ndwangsen 
Authored: Wed Jul 11 17:41:25 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 23:35:50 2018 +0800

--
 .../apache/carbondata/core/util/CarbonUtil.java | 51 
 .../datamap/CarbonCreateDataMapCommand.scala|  8 ++-
 2 files changed, 58 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a1628978/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 9796696..642fe8e 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -88,6 +88,7 @@ import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.format.BlockletHeader;
 import org.apache.carbondata.format.DataChunk2;
 import org.apache.carbondata.format.DataChunk3;
+import org.apache.carbondata.format.FileHeader;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -3184,4 +3185,54 @@ public final class CarbonUtil {
 }
 return columnLocalDictGenMap;
   }
+
+  /**
+   * This method get the carbon file format version
+   *
+   * @param carbonTable
+   * carbon Table
+   */
+  public static ColumnarFormatVersion getFormatVersion(CarbonTable carbonTable)
+  throws IOException {
+String storePath = null;
+// if the carbontable is support flat folder
+boolean supportFlatFolder = carbonTable.isSupportFlatFolder();
+if (supportFlatFolder) {
+  storePath = carbonTable.getTablePath();
+} else {
+  // get the valid segments
+  SegmentStatusManager segmentStatusManager =
+  new SegmentStatusManager(carbonTable.getAbsoluteTableIdentifier());
+  SegmentStatusManager.ValidAndInvalidSegmentsInfo 
validAndInvalidSegmentsInfo =
+  segmentStatusManager.getValidAndInvalidSegments();
+  List validSegments = 
validAndInvalidSegmentsInfo.getValidSegments();
+  CarbonProperties carbonProperties = CarbonProperties.getInstance();
+  if (validSegments.isEmpty()) {
+return carbonProperties.getFormatVersion();
+  }
+  storePath = 
carbonTable.getSegmentPath(validSegments.get(0).getSegmentNo());
+}
+
+CarbonFile[] carbonFiles = FileFactory
+.getCarbonFile(storePath)
+.listFiles(new CarbonFileFilter() {
+  @Override
+  public boolean accept(CarbonFile file) {
+if (file == null) {
+  return false;
+}
+return file.getName().endsWith("carbondata");
+  }
+});
+if (carbonFiles == null || carbonFiles.length < 1) {
+  return CarbonProperties.getInstance().getFormatVersion();
+}
+
+CarbonFile carbonFile = carbonFiles[0];
+// get the carbon file header
+CarbonHeaderReader headerReader = new 
CarbonHeaderReader(carbonFile.getCanonicalPath());
+FileHeader fileHeader = headerReader.readHeader();
+int version = fileHeader.getVersion();
+return ColumnarFormatVersion.valueOf((short)version);
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a1628978/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 7600160..336793e 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -26,9 +26,10 @@ import 
org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandExcept
 import org.apache.carbondata.common.logging.LogServiceF

carbondata git commit: [CARBONDATA-2746][BloomDataMap] Fix bug for getting datamap file when table has multiple datamaps

2018-07-17 Thread xuchuanyin
Repository: carbondata
Updated Branches:
  refs/heads/master a16289786 -> 4612e0031


[CARBONDATA-2746][BloomDataMap] Fix bug for getting datamap file when table has 
multiple datamaps

Currently, if table has multiple bloom datamap and carbon is set to use 
distributed datamap, query will throw an exception when accessing the index 
file, because carbon gets all the datamaps but sets them with same datamap 
schema. The error is appeared when getting the full path of bloom index by 
concating index directory and index column. This PR fix this problem by filter 
the index directories of target datamap when using distributed datamap.

Test shows that lucene is not affected by this. On the other hand, lucene gets 
wrong result if we apply this filter

This closes #2512


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4612e003
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4612e003
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4612e003

Branch: refs/heads/master
Commit: 4612e003186ccc6bae89443043bd0db3463f8fc1
Parents: a162897
Author: Manhua 
Authored: Mon Jul 16 19:29:07 2018 +0800
Committer: xuchuanyin 
Committed: Wed Jul 18 09:10:22 2018 +0800

--
 .../bloom/BloomCoarseGrainDataMapFactory.java   | 27 +++--
 .../lucene/LuceneFineGrainDataMapSuite.scala|  7 
 .../bloom/BloomCoarseGrainDataMapSuite.scala| 40 
 3 files changed, 62 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/4612e003/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
--
diff --git 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 35ebd20..4b5bc7c 100644
--- 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -278,18 +278,21 @@ public class BloomCoarseGrainDataMapFactory extends 
DataMapFactory 0) {
   for (TableDataMap dataMap : dataMaps) {
-List indexFiles;
-String dmPath = CarbonTablePath
-.getDataMapStorePath(tablePath, segmentId, 
dataMap.getDataMapSchema().getDataMapName());
-FileFactory.FileType fileType = FileFactory.getFileType(dmPath);
-final CarbonFile dirPath = FileFactory.getCarbonFile(dmPath, fileType);
-indexFiles = Arrays.asList(dirPath.listFiles(new CarbonFileFilter() {
-  @Override
-  public boolean accept(CarbonFile file) {
-return file.isDirectory();
-  }
-}));
-indexDirs.addAll(indexFiles);
+// different from lucene, bloom only get corresponding directory of 
current datamap
+if 
(dataMap.getDataMapSchema().getDataMapName().equals(this.dataMapName)) {
+  List indexFiles;
+  String dmPath = CarbonTablePath.getDataMapStorePath(tablePath, 
segmentId,
+  dataMap.getDataMapSchema().getDataMapName());
+  FileFactory.FileType fileType = FileFactory.getFileType(dmPath);
+  final CarbonFile dirPath = FileFactory.getCarbonFile(dmPath, 
fileType);
+  indexFiles = Arrays.asList(dirPath.listFiles(new CarbonFileFilter() {
+@Override
+public boolean accept(CarbonFile file) {
+  return file.isDirectory();
+}
+  }));
+  indexDirs.addAll(indexFiles);
+}
   }
 }
 return indexDirs.toArray(new CarbonFile[0]);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4612e003/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 657a3eb..aebbde4 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -34,6 +34,10 @@ import 
org.apache.carbondata.core.datamap.status.DataMapStatusManager
 
 class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
 
+  val originDistributedDatamapStatus = 
CarbonProperties.getInstance().getProperty(
+Ca

[2/2] carbondata git commit: Revert "[CARBONDATA-2532][Integration] Carbon to support spark 2.3 version, ColumnVector Interface"

2018-07-17 Thread jackylk
Revert "[CARBONDATA-2532][Integration] Carbon to support spark 2.3 version, 
ColumnVector Interface"

This reverts commit 2b8ae2628d50efcd095696b5bf614eab2fcdb8d2.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/96fe233a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/96fe233a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/96fe233a

Branch: refs/heads/carbonstore
Commit: 96fe233a20c5e8df2584a79671c3257f119f9414
Parents: 0aab4e7
Author: Jacky Li 
Authored: Wed Jul 18 09:16:27 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 09:16:27 2018 +0800

--
 .../carbondata/spark/rdd/CarbonScanRDD.scala|  12 +-
 .../carbondata/spark/rdd/StreamHandoffRDD.scala |  15 +-
 .../vectorreader/ColumnarVectorWrapper.java |  60 +-
 .../VectorizedCarbonRecordReader.java   |  34 +-
 .../stream/CarbonStreamRecordReader.java| 747 --
 .../org/apache/spark/sql/CarbonVectorProxy.java | 222 --
 .../org/apache/spark/sql/CarbonVectorProxy.java | 221 --
 .../org/apache/spark/sql/CarbonVectorProxy.java | 247 --
 .../apache/spark/sql/ColumnVectorFactory.java   |  45 --
 .../streaming/CarbonStreamInputFormat.java  |  46 +-
 .../streaming/CarbonStreamRecordReader.java | 758 +++
 .../carbondata/streaming/CarbonStreamUtils.java |  40 -
 .../streaming/StreamBlockletReader.java |  39 +-
 13 files changed, 838 insertions(+), 1648 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/96fe233a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index afd3af2..149f711 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -60,7 +60,7 @@ import org.apache.carbondata.processing.util.CarbonLoaderUtil
 import org.apache.carbondata.spark.InitInputMetrics
 import org.apache.carbondata.spark.format.{CsvReadSupport, 
VectorCsvReadSupport}
 import org.apache.carbondata.spark.util.{SparkDataTypeConverterImpl, Util}
-import org.apache.carbondata.streaming.CarbonStreamInputFormat
+import org.apache.carbondata.streaming.{CarbonStreamInputFormat, 
CarbonStreamRecordReader}
 
 /**
  * This RDD is used to perform query on CarbonData file. Before sending tasks 
to scan
@@ -431,13 +431,13 @@ class CarbonScanRDD[T: ClassTag](
   // create record reader for row format
   DataTypeUtil.setDataTypeConverter(dataTypeConverterClz.newInstance())
   val inputFormat = new CarbonStreamInputFormat
-  inputFormat.setVectorReader(vectorReader)
-  inputFormat.setInputMetricsStats(inputMetricsStats)
+  val streamReader = inputFormat.createRecordReader(inputSplit, 
attemptContext)
+.asInstanceOf[CarbonStreamRecordReader]
+  streamReader.setVectorReader(vectorReader)
+  streamReader.setInputMetricsStats(inputMetricsStats)
   model.setStatisticsRecorder(
 
CarbonTimeStatisticsFactory.createExecutorRecorder(model.getQueryId))
-  inputFormat.setModel(model)
-  val streamReader = inputFormat.createRecordReader(inputSplit, 
attemptContext)
-.asInstanceOf[RecordReader[Void, Object]]
+  streamReader.setQueryModel(model)
   streamReader
 case FileFormat.EXTERNAL =>
   require(storageFormat.equals("csv"),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/96fe233a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
index 0e8f660..1f3decc 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
@@ -22,7 +22,7 @@ import java.util
 import java.util.{Date, UUID}
 
 import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.mapreduce.{Job, RecordReader, TaskAttemptID, TaskType}
+import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
 import org.apache.spark.{Partition, SerializableWrita

[1/2] carbondata git commit: Revert "[CARBONDATA-2532][Integration] Carbon to support spark 2.3 version, ColumnVector Interface"

2018-07-17 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore 0aab4e7c6 -> 96fe233a2


http://git-wip-us.apache.org/repos/asf/carbondata/blob/96fe233a/streaming/src/main/java/org/apache/carbondata/streaming/CarbonStreamRecordReader.java
--
diff --git 
a/streaming/src/main/java/org/apache/carbondata/streaming/CarbonStreamRecordReader.java
 
b/streaming/src/main/java/org/apache/carbondata/streaming/CarbonStreamRecordReader.java
new file mode 100644
index 000..c4b501d
--- /dev/null
+++ 
b/streaming/src/main/java/org/apache/carbondata/streaming/CarbonStreamRecordReader.java
@@ -0,0 +1,758 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.streaming;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheProvider;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import 
org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.block.SegmentProperties;
+import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.encoder.Encoding;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.reader.CarbonHeaderReader;
+import 
org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.core.scan.filter.FilterUtil;
+import org.apache.carbondata.core.scan.filter.GenericQueryType;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.intf.RowImpl;
+import org.apache.carbondata.core.scan.filter.intf.RowIntf;
+import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.core.scan.model.QueryModel;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.format.BlockletHeader;
+import org.apache.carbondata.format.FileHeader;
+import org.apache.carbondata.hadoop.CarbonInputSplit;
+import org.apache.carbondata.hadoop.CarbonMultiBlockSplit;
+import org.apache.carbondata.hadoop.InputMetricsStats;
+import org.apache.carbondata.hadoop.api.CarbonTableInputFormat;
+import org.apache.carbondata.processing.util.CarbonDataProcessorUtil;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.spark.memory.MemoryMode;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.execution.vectorized.ColumnVector;
+import org.apache.spark.sql.execution.vectorized.ColumnarBatch;
+import org.apache.spark.sql.types.CalendarIntervalType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.DecimalType;
+import org.apache.spark.sql.types.StructField;
+import org.apache.spark.sql.types.S

carbondata git commit: [CARBONDATA-2736][CARBONSTORE] Kafka integration with Carbon StreamSQL

2018-07-17 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore 96fe233a2 -> 4b96ed8ca


[CARBONDATA-2736][CARBONSTORE] Kafka integration with Carbon StreamSQL

Modification in this PR:
1.Pass source table properties to streamReader.load()
2.Do not pass schema when sparkSession.readStream
3.Remove querySchema validation against sink as dataFrame made from kafka 
source will not have schema ( its written in value column of schema )
4.Extract the dataframe from kafka source which contain actual data schema @ 
writeStream

This closes #2495


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4b96ed8c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4b96ed8c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4b96ed8c

Branch: refs/heads/carbonstore
Commit: 4b96ed8ca2b99a55c51a8a81f0c606e13b06add7
Parents: 96fe233
Author: Ajith 
Authored: Thu Jul 12 09:17:22 2018 +0530
Committer: Jacky Li 
Committed: Wed Jul 18 09:36:58 2018 +0800

--
 .../core/metadata/schema/table/CarbonTable.java |  9 
 .../command/carbonTableSchemaCommon.scala   |  3 --
 .../carbondata/stream/StreamJobManager.scala| 32 +
 .../stream/CarbonCreateStreamCommand.scala  | 48 
 4 files changed, 59 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/4b96ed8c/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index c302b2b..f16d1bb 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -1123,4 +1123,13 @@ public class CarbonTable implements Serializable {
   table.setLocalDictionaryEnabled(Boolean.parseBoolean("false"));
 }
   }
+
+  /**
+   * Return the format value defined in table properties
+   * @return String as per table properties, null if not defined
+   */
+  public String getFormat() {
+return getTableInfo().getFactTable().getTableProperties()
+.get("format");
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4b96ed8c/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
 
b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index a641329..6cb0dcf 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -738,9 +738,6 @@ class TableNewProcessor(cm: TableModel) {
 tableInfo.setFactTable(tableSchema)
 val format = cm.tableProperties.get(CarbonCommonConstants.FORMAT)
 if (format.isDefined) {
-  if (!format.get.equalsIgnoreCase("csv")) {
-CarbonException.analysisException(s"Currently we only support csv as 
external file format")
-  }
   tableInfo.setFormat(format.get)
   val formatProperties = cm.tableProperties.filter(pair =>
 pair._1.startsWith(s"${format.get.toLowerCase}.")).asJava

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4b96ed8c/integration/spark2/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
 
b/integration/spark2/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
index 59e924d..470d89a 100644
--- 
a/integration/spark2/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
@@ -52,19 +52,23 @@ object StreamJobManager {
 }
   }
 
-  private def validateSinkTable(querySchema: StructType, sink: CarbonTable): 
Unit = {
+  private def validateSinkTable(validateQuerySchema: Boolean,
+querySchema: StructType, sink: CarbonTable): 
Unit = {
 if (!sink.isStreamingSink) {
   throw new MalformedCarbonCommandException(s"Table ${sink.getTableName} 
is not " +
 "streaming sink table " +
   

[12/50] [abbrv] carbondata git commit: [CARBONDATA-2637][DataMap] Fix bugs in rebuild datamap

2018-07-17 Thread jackylk
[CARBONDATA-2637][DataMap] Fix bugs in rebuild datamap

In cluster mode, readCommitScope is null while rebuilding datamap for
segments, this will cause NPE. Here we use the origin segment object
whose readCommitScope is not null and will work fine.

This closes #2493


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9d7a9a2a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9d7a9a2a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9d7a9a2a

Branch: refs/heads/carbonstore
Commit: 9d7a9a2a96b11d9d12b30d13925737c8c3400ab6
Parents: 202d099
Author: xuchuanyin 
Authored: Wed Jul 11 22:24:54 2018 +0800
Committer: Jacky Li 
Committed: Thu Jul 12 17:24:06 2018 +0800

--
 .../datamap/IndexDataMapRebuildRDD.scala| 143 ++-
 1 file changed, 73 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9d7a9a2a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
 
b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
index 688656d..85466f1 100644
--- 
a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
@@ -275,80 +275,83 @@ class IndexDataMapRebuildRDD[K, V](
 val inputMetrics = new CarbonInputMetrics
 TaskMetricsMap.getInstance().registerThreadCallback()
 val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
-val segment = inputSplit.getAllSplits.get(0).getSegment
-inputMetrics.initBytesReadCallback(context, inputSplit)
-
-val attemptId = new TaskAttemptID(jobTrackerId, id, TaskType.MAP, 
split.index, 0)
-val attemptContext = new TaskAttemptContextImpl(new Configuration(), 
attemptId)
-val format = createInputFormat(segment, attemptContext)
-
-val model = format.createQueryModel(inputSplit, attemptContext)
-// one query id per table
-model.setQueryId(queryId)
-model.setVectorReader(false)
-model.setRequiredRowId(true)
-
-var reader: CarbonRecordReader[Array[Object]] = null
-var refresher: DataMapBuilder = null
-try {
-  val segmentPropertiesFetcher = 
DataMapStoreManager.getInstance().getDataMap(carbonTable,
-BlockletDataMapFactory.DATA_MAP_SCHEMA).getDataMapFactory
-.asInstanceOf[SegmentPropertiesFetcher]
-  val segmentProperties = 
segmentPropertiesFetcher.getSegmentProperties(segment)
-
-  // we use task name as shard name to create the folder for this datamap
-  val shardName = 
CarbonTablePath.getShardName(inputSplit.getAllSplits.get(0).getBlockPath)
-  refresher = dataMapFactory.createBuilder(segment, shardName, 
segmentProperties)
-  refresher.initialize()
-
-  model.setForcedDetailRawQuery(refresher.isIndexForCarbonRawBytes)
-  val readSupport = if (refresher.isIndexForCarbonRawBytes) {
-new RawBytesReadSupport(segmentProperties, indexColumns)
-  } else {
-new OriginalReadSupport(indexColumns.map(_.getDataType))
-  }
-  reader = new CarbonRecordReader[Array[Object]](model, readSupport, 
inputMetrics)
-  reader.initialize(inputSplit, attemptContext)
-  // skip clear datamap and we will do this adter rebuild
-  reader.setSkipClearDataMapAtClose(true)
-
-  var blockletId = 0
-  var firstRow = true
-  while (reader.nextKeyValue()) {
-val rowWithPosition = reader.getCurrentValue
-val size = rowWithPosition.length
-val pageId = rowWithPosition(size - 2).asInstanceOf[Int]
-val rowId = rowWithPosition(size - 1).asInstanceOf[Int]
-
-if (!firstRow && pageId == 0 && rowId == 0) {
-  // new blocklet started, increase blockletId
-  blockletId = blockletId + 1
+val segmentId = inputSplit.getAllSplits.get(0).getSegment.getSegmentNo
+val segment = segments.find(p => p.getSegmentNo.equals(segmentId))
+if (segment.isDefined) {
+  inputMetrics.initBytesReadCallback(context, inputSplit)
+
+  val attemptId = new TaskAttemptID(jobTrackerId, id, TaskType.MAP, 
split.index, 0)
+  val attemptContext = new TaskAttemptContextImpl(new Configuration(), 
attemptId)
+  val format = createInputFormat(segment.get, attemptContext)
+
+  val model = format.createQueryModel(inputSplit, attemptContext)
+  // one query id per table
+  model.setQueryId(queryId)
+  model.setVectorReader(false)
+  model.setRequiredRowId(true)
+
+  var reader: CarbonRecordReader[Array[Object]] = null
+

[33/50] [abbrv] carbondata git commit: [CARBONDATA-2727][BloomDataMap] Support create bloom datamap on newly added column

2018-07-17 Thread jackylk
[CARBONDATA-2727][BloomDataMap] Support create bloom datamap on newly added 
column

Add a result collector with rowId infomation for datamap rebuild if table 
schema is changed;
Use keygenerator to retrieve surrogate value of dictIndexColumn from query 
result;

This closes #2490


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/81038f55
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/81038f55
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/81038f55

Branch: refs/heads/carbonstore
Commit: 81038f55ef9a582f82305378988f603ded76e524
Parents: aec47e0
Author: Manhua 
Authored: Wed Jul 11 19:39:31 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 23:31:43 2018 +0800

--
 .../scan/collector/ResultCollectorFactory.java  |  31 ++---
 ...RowIdRestructureBasedRawResultCollector.java | 138 +++
 .../bloom/AbstractBloomDataMapWriter.java   |  72 +-
 .../bloom/BloomCoarseGrainDataMapFactory.java   |   2 +-
 .../datamap/bloom/BloomDataMapBuilder.java  |   8 ++
 .../datamap/bloom/BloomDataMapWriter.java   |  72 ++
 .../datamap/IndexDataMapRebuildRDD.scala| 131 +++---
 .../bloom/BloomCoarseGrainDataMapSuite.scala|  96 +
 8 files changed, 413 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/81038f55/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
index ea4afd1..e0a0b90 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/ResultCollectorFactory.java
@@ -18,15 +18,7 @@ package org.apache.carbondata.core.scan.collector;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import 
org.apache.carbondata.core.scan.collector.impl.AbstractScannedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.DictionaryBasedVectorResultCollector;
-import org.apache.carbondata.core.scan.collector.impl.RawBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedDictionaryResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedRawResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RestructureBasedVectorResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RowIdBasedResultCollector;
-import 
org.apache.carbondata.core.scan.collector.impl.RowIdRawBasedResultCollector;
+import org.apache.carbondata.core.scan.collector.impl.*;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 
 /**
@@ -51,14 +43,21 @@ public class ResultCollectorFactory {
 AbstractScannedResultCollector scannerResultAggregator = null;
 if (blockExecutionInfo.isRawRecordDetailQuery()) {
   if (blockExecutionInfo.isRestructuredBlock()) {
-LOGGER.info("Restructure based raw collector is used to scan and 
collect the data");
-scannerResultAggregator = new 
RestructureBasedRawResultCollector(blockExecutionInfo);
-  } else if (blockExecutionInfo.isRequiredRowId()) {
-LOGGER.info("RowId based raw collector is used to scan and collect the 
data");
-scannerResultAggregator = new 
RowIdRawBasedResultCollector(blockExecutionInfo);
+if (blockExecutionInfo.isRequiredRowId()) {
+  LOGGER.info("RowId Restructure based raw ollector is used to scan 
and collect the data");
+  scannerResultAggregator = new 
RowIdRestructureBasedRawResultCollector(blockExecutionInfo);
+} else {
+  LOGGER.info("Restructure based raw collector is used to scan and 
collect the data");
+  scannerResultAggregator = new 
RestructureBasedRawResultCollector(blockExecutionInfo);
+}
   } else {
-LOGGER.info("Row based raw collector is used to scan and collect the 
data");
-scannerResultAggregator = new 
RawBasedResultCollector(blockExecutionInfo);
+if (blockExecutionInfo.isRequiredRowId()) {
+  LOGGER.info("RowId based raw collector is used to scan and collect 
the data");
+  scannerResultAggregator = new 
RowIdRawBasedResultCollector(blockExecutionInfo);
+} else {
+  LOGGER.info("Row based raw collector is used to scan and collect the 
data");
+  scannerResultAggregato

[06/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/test/java/org/apache/carbondata/core/scan/expression/conditional/GreaterThanExpressionUnitTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/scan/expression/conditional/GreaterThanExpressionUnitTest.java
 
b/core/src/test/java/org/apache/carbondata/core/scan/expression/conditional/GreaterThanExpressionUnitTest.java
index 38a7222..5391861 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/scan/expression/conditional/GreaterThanExpressionUnitTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/scan/expression/conditional/GreaterThanExpressionUnitTest.java
@@ -106,6 +106,40 @@ public class GreaterThanExpressionUnitTest {
 
   }
 
+  @Test public void testEvaluateForGreaterThanExpressionWithShortDataType1()
+  throws FilterUnsupportedException, FilterIllegalMemberException {
+ColumnExpression right = new ColumnExpression("id", DataTypes.SHORT);
+right.setColIndex(0);
+ColumnExpression left = new ColumnExpression("id", DataTypes.SHORT);
+left.setColIndex(1);
+greaterThanExpression = new GreaterThanExpression(left, right);
+RowImpl value = new RowImpl();
+Short[] row = { 170 };
+Short[] row1 = { 70 };
+Object objectRow[] = { row, row1 };
+value.setValues(objectRow);
+
+new MockUp() {
+  Boolean returnMockFlag = true;
+
+  @Mock public Short getShort() {
+if (returnMockFlag) {
+  returnMockFlag = false;
+  return 70;
+
+} else {
+  return 170;
+
+}
+
+  }
+};
+
+ExpressionResult result = greaterThanExpression.evaluate(value);
+assertFalse(result.getBoolean());
+
+  }
+
   @Test public void testEvaluateForGreaterThanExpressionWithDoubleDataType()
   throws FilterUnsupportedException, FilterIllegalMemberException {
 ColumnExpression right = new ColumnExpression("right_contact", 
DataTypes.DOUBLE);
@@ -217,6 +251,51 @@ public class GreaterThanExpressionUnitTest {
 }
   }
 
+  @Test public void 
testEvaluateForGreaterThanExpressionWithTimestampDataType1()
+  throws FilterUnsupportedException, FilterIllegalMemberException {
+try {
+  ColumnExpression left = new ColumnExpression("timestamp", 
DataTypes.TIMESTAMP);
+  left.setColIndex(0);
+  ColumnExpression right = new ColumnExpression("timestamp", 
DataTypes.TIMESTAMP);
+  right.setColIndex(1);
+
+  greaterThanExpression = new GreaterThanExpression(left, right);
+
+  RowImpl value = new RowImpl();
+
+  DateFormat dateFormat = new SimpleDateFormat("dd/MM/");
+
+  Date date = dateFormat.parse("23/09/2007");
+  long time = date.getTime();
+  Timestamp[] row = { new Timestamp(time) };
+
+  Date date1 = dateFormat.parse("24/09/2007");
+  long time1 = date1.getTime();
+  Timestamp[] row1 = { new Timestamp(time1) };
+
+  Object objectRow[] = { row1, row };
+  value.setValues(objectRow);
+
+  new MockUp() {
+Boolean returnMockFlag = true;
+
+@Mock public Long getTime() {
+  if (returnMockFlag) {
+returnMockFlag = false;
+return 1190505600L;
+  } else {
+return 1190592000L;
+  }
+}
+  };
+
+  ExpressionResult result = greaterThanExpression.evaluate(value);
+  assertFalse(result.getBoolean());
+} catch (ParseException e) {
+  System.out.println("Error while parsing " + e.getMessage());
+}
+  }
+
   @Test public void testEvaluateForGreaterThanExpressionWithLongDataType()
   throws FilterUnsupportedException, FilterIllegalMemberException {
 ColumnExpression right = new ColumnExpression("contact", DataTypes.LONG);
@@ -247,6 +326,36 @@ public class GreaterThanExpressionUnitTest {
 assertTrue(result.getBoolean());
   }
 
+  @Test public void testEvaluateForGreaterThanExpressionWithLongDataType1()
+  throws FilterUnsupportedException, FilterIllegalMemberException {
+ColumnExpression right = new ColumnExpression("contact", DataTypes.LONG);
+right.setColIndex(0);
+ColumnExpression left = new ColumnExpression("contact", DataTypes.LONG);
+left.setColIndex(1);
+greaterThanExpression = new GreaterThanExpression(left, right);
+RowImpl value = new RowImpl();
+Long[] row = { 1234567654321L };
+Long[] row1 = { 123456765432234L };
+Object objectRow[] = { row1, row };
+value.setValues(objectRow);
+
+new MockUp() {
+  Boolean returnMockFlag = true;
+
+  @Mock public Long getLong() {
+if (returnMockFlag) {
+  returnMockFlag = false;
+  return 1234567654321L;
+} else {
+  return 123456765432234L;
+}
+  }
+};
+
+ExpressionResult result = greaterThanExpression.evaluate(value);
+assertFalse(result.getBoolean());
+  }
+
   @Test public void testEvaluateForGreaterThanExpressionWithDeci

[43/50] [abbrv] carbondata git commit: [CARBONDATA-2613] Support csv based carbon table

2018-07-17 Thread jackylk
[CARBONDATA-2613] Support csv based carbon table

1. create csv based carbon table using
CREATE TABLE fact_table (col1 bigint, col2 string, ..., col100 string)
STORED BY 'CarbonData'
TBLPROPERTIES(
  'foramt'='csv',
  'csv.delimiter'=',',
  'csv.header'='col1,col2,col100')

2. Load data to this table using
ALTER TABLE fact_table ADD SEGMENT LOCATION 'path/to/data1'

This closes #2374


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2009009a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2009009a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2009009a

Branch: refs/heads/carbonstore
Commit: 2009009aa01348d716f9c3afb9e61de37fa29aef
Parents: 4437920
Author: xuchuanyin 
Authored: Wed Jun 13 09:03:28 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:10:52 2018 +0800

--
 .../carbondata/common/annotations/Since.java|  38 ++
 .../core/constants/CarbonCommonConstants.java   |   4 +
 .../core/metadata/schema/table/TableInfo.java   |  52 ++
 .../core/statusmanager/FileFormat.java  |  10 +-
 .../statusmanager/FileFormatProperties.java |  32 ++
 .../core/statusmanager/LoadMetadataDetails.java |  16 +-
 .../hadoop/CarbonMultiBlockSplit.java   |   6 +
 .../carbondata/hadoop/CsvRecordReader.java  | 506 +++
 .../hadoop/api/CarbonFileInputFormat.java   |  11 +-
 .../hadoop/api/CarbonInputFormat.java   | 120 -
 .../hadoop/api/CarbonTableInputFormat.java  |  12 +-
 .../datawithoutheader_delimiter_separator.csv   |  10 +
 .../CsvBasedCarbonTableSuite.scala  | 244 +
 .../carbondata/spark/format/CsvReadSupport.java | 107 
 .../spark/format/VectorCsvReadSupport.java  | 130 +
 .../carbondata/spark/rdd/CarbonScanRDD.scala|  77 ++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |   1 +
 .../command/carbonTableSchemaCommon.scala   |  10 +
 .../management/CarbonAddSegmentCommand.scala| 135 +
 .../sql/parser/CarbonSpark2SqlParser.scala  |  13 +-
 20 files changed, 1499 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2009009a/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
--
diff --git 
a/common/src/main/java/org/apache/carbondata/common/annotations/Since.java 
b/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
new file mode 100644
index 000..b7e4391
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.common.annotations;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The annotation indicates that the version number since a member or a type 
has been present.
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD, ElementType.TYPE, ElementType.METHOD})
+public @interface Since {
+  /**
+   * the value indicating a version number since this member
+   * or type has been present.
+   */
+  String value();
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2009009a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index ad3b0d3..75d6014 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -966,6 +966,8 @@ public final class CarbonCommonConstants {
*/
   public static final String DICTIONARY_PATH = "

[03/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
index 768dedb..caf121f 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataWriterProcessorStepImpl.java
@@ -42,7 +42,6 @@ import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.processing.datamap.DataMapWriterListener;
 import org.apache.carbondata.processing.loading.AbstractDataLoadProcessorStep;
 import org.apache.carbondata.processing.loading.CarbonDataLoadConfiguration;
-import org.apache.carbondata.processing.loading.DataField;
 import 
org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
 import org.apache.carbondata.processing.loading.row.CarbonRowBatch;
 import org.apache.carbondata.processing.store.CarbonFactDataHandlerModel;
@@ -78,10 +77,6 @@ public class DataWriterProcessorStepImpl extends 
AbstractDataLoadProcessorStep {
 
CarbonUtil.getLocalDictionaryModel(configuration.getTableSpec().getCarbonTable());
   }
 
-  @Override public DataField[] getOutput() {
-return child.getOutput();
-  }
-
   @Override public void initialize() throws IOException {
 super.initialize();
 child.initialize();
@@ -187,7 +182,7 @@ public class DataWriterProcessorStepImpl extends 
AbstractDataLoadProcessorStep {
   if (rowsNotExist) {
 rowsNotExist = false;
 dataHandler = CarbonFactHandlerFactory
-.createCarbonFactHandler(model, 
CarbonFactHandlerFactory.FactHandlerType.COLUMNAR);
+.createCarbonFactHandler(model);
 dataHandler.initialise();
   }
   processBatch(insideRangeIterator.next(), dataHandler);
@@ -260,10 +255,6 @@ public class DataWriterProcessorStepImpl extends 
AbstractDataLoadProcessorStep {
 rowCounter.getAndAdd(1);
   }
 
-  @Override protected CarbonRow processRow(CarbonRow row) {
-return null;
-  }
-
   @Override public void close() {
 if (!closed) {
   super.close();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepImpl.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepImpl.java
index 9521db4..c9f5fcc 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepImpl.java
@@ -85,10 +85,6 @@ public class InputProcessorStepImpl extends 
AbstractDataLoadProcessorStep {
 return outIterators;
   }
 
-  @Override protected CarbonRow processRow(CarbonRow row) {
-return null;
-  }
-
   @Override public void close() {
 if (!closed) {
   super.close();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
index 7c4f161..b6858e1 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
@@ -169,10 +169,6 @@ public class InputProcessorStepWithNoConverterImpl extends 
AbstractDataLoadProce
 return iterators;
   }
 
-  @Override protected CarbonRow processRow(CarbonRow row) {
-return null;
-  }
-
   @Override public void close() {
 if (!closed) {
   super.close();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/processing/src/main/java/org/apache/carbondata/processing/loading/steps/JsonInputProcessorStepImpl.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/JsonInputProcessorStepImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/JsonInputProcessorStepImpl.j

[48/50] [abbrv] carbondata git commit: [CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

2018-07-17 Thread jackylk
[CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

Support two implementations:
1.LocalCarbonStore for usage in local mode
2.DistributedCarbonStore leveraging multiple server (Master and Workers) via RPC

This closes #2473


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/85cdc404
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/85cdc404
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/85cdc404

Branch: refs/heads/carbonstore
Commit: 85cdc404598dbcdd0d4cfb055419c39104985483
Parents: 2009009
Author: Jacky Li 
Authored: Mon Jul 9 12:23:49 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:10:54 2018 +0800

--
 .../core/datastore/impl/FileFactory.java|   5 +
 .../schema/table/TableSchemaBuilder.java|   3 +-
 .../detailquery/SearchModeTestCase.scala|   3 +-
 .../carbondata/store/SparkCarbonStore.scala | 203 
 .../apache/carbondata/store/WorkerManager.scala |  75 +++
 .../org/apache/spark/sql/CarbonSession.scala|  59 ++-
 .../carbondata/store/SparkCarbonStoreTest.scala |  86 ---
 .../processing/loading/DataLoadExecutor.java|   6 +-
 .../processing/util/CarbonLoaderUtil.java   |   2 +-
 store/core/pom.xml  |   5 +
 .../carbondata/store/CarbonRowReadSupport.java  |  53 --
 .../apache/carbondata/store/CarbonStore.java|  68 ---
 .../carbondata/store/LocalCarbonStore.java  | 130 -
 .../carbondata/store/MetaCachedCarbonStore.java |  59 ---
 .../carbondata/store/api/CarbonStore.java   |  32 ++
 .../store/api/CarbonStoreFactory.java   |  93 
 .../apache/carbondata/store/api/DataStore.java  |  51 ++
 .../apache/carbondata/store/api/MetaStore.java  |  50 ++
 .../apache/carbondata/store/api/SqlStore.java   |  34 ++
 .../carbondata/store/api/conf/StoreConf.java| 191 +++
 .../store/api/descriptor/LoadDescriptor.java| 114 
 .../store/api/descriptor/SelectDescriptor.java  | 111 
 .../store/api/descriptor/TableDescriptor.java   | 174 +++
 .../store/api/descriptor/TableIdentifier.java   |  37 ++
 .../exception/ExecutionTimeoutException.java|  22 +
 .../store/api/exception/SchedulerException.java |  26 +
 .../store/api/exception/StoreException.java |  33 ++
 .../apache/carbondata/store/conf/StoreConf.java | 185 ---
 .../exception/ExecutionTimeoutException.java|  22 -
 .../store/exception/StoreException.java |  29 --
 .../store/exception/WorkerTooBusyException.java |  26 -
 .../carbondata/store/impl/CarbonStoreBase.java  | 177 +++
 .../store/impl/DistributedCarbonStore.java  | 232 +
 .../store/impl/IndexedRecordReader.java | 183 +++
 .../carbondata/store/impl/LocalCarbonStore.java | 164 ++
 .../carbondata/store/impl/MetaProcessor.java| 170 ++
 .../store/impl/SegmentTxnManager.java   | 121 +
 .../apache/carbondata/store/impl/Status.java|  28 +
 .../carbondata/store/impl/master/Master.java| 161 ++
 .../store/impl/master/RegistryServiceImpl.java  |  53 ++
 .../store/impl/master/Schedulable.java  |  76 +++
 .../carbondata/store/impl/master/Scheduler.java | 137 +
 .../store/impl/rpc/RegistryService.java |  32 ++
 .../store/impl/rpc/ServiceFactory.java  |  43 ++
 .../carbondata/store/impl/rpc/StoreService.java |  40 ++
 .../store/impl/rpc/model/BaseResponse.java  |  69 +++
 .../store/impl/rpc/model/LoadDataRequest.java   |  60 +++
 .../store/impl/rpc/model/QueryResponse.java |  73 +++
 .../impl/rpc/model/RegisterWorkerRequest.java   |  73 +++
 .../impl/rpc/model/RegisterWorkerResponse.java  |  54 ++
 .../carbondata/store/impl/rpc/model/Scan.java   | 108 
 .../store/impl/rpc/model/ShutdownRequest.java   |  53 ++
 .../store/impl/rpc/model/ShutdownResponse.java  |  61 +++
 .../store/impl/worker/RequestHandler.java   | 166 ++
 .../store/impl/worker/StoreServiceImpl.java |  77 +++
 .../carbondata/store/impl/worker/Worker.java| 166 ++
 .../apache/carbondata/store/master/Master.java  | 522 ---
 .../carbondata/store/rpc/RegistryService.java   |  32 --
 .../carbondata/store/rpc/ServiceFactory.java|  43 --
 .../carbondata/store/rpc/StoreService.java  |  40 --
 .../store/rpc/impl/IndexedRecordReader.java | 183 ---
 .../store/rpc/impl/RegistryServiceImpl.java |  54 --
 .../store/rpc/impl/RequestHandler.java  | 218 
 .../carbondata/store/rpc/impl/Status.java   |  28 -
 .../store/rpc/impl/StoreServiceImpl.java|  78 ---
 .../store/rpc/model/BaseResponse.java   |  69 ---
 .../store/rpc/model/LoadDataRequest.java|  60 ---
 .../store/rpc/model/QueryRequest.java   | 108 
 .../store/rpc/model/QueryResponse.java  |  73 ---
 .../store/rpc/model/RegisterWorkerRequest.java  |  73 ---
 .../store

[27/50] [abbrv] carbondata git commit: [CARBONDATA-2482] Pass uuid while writing segment file if possible

2018-07-17 Thread jackylk
[CARBONDATA-2482] Pass uuid while writing segment file if possible

Pass the uuid (segmentFileName) to the writeSegmentFile method file if possible.

Problem:
When the supporting tables depends on the segmentFileName of the main table, 
query is failing, as it is expected to be the same name as that of the main 
table. Mostly in case of merge index, segmentFile will be rewritten for that 
segment.

Solution:
Whenever the supporting table segmentFileName should be same as that main 
table, in that case we should pass the name as the UUID in the merge index flow 
instead of taking a new timestamp.

This closes #2307


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6c5abddf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6c5abddf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6c5abddf

Branch: refs/heads/carbonstore
Commit: 6c5abddfb125b2dd51c9a989f003fe2bdc066d6d
Parents: eb604fd
Author: dhatchayani 
Authored: Tue May 15 12:24:01 2018 +0530
Committer: ravipesala 
Committed: Mon Jul 16 10:34:31 2018 +0530

--
 .../core/datamap/DataMapStoreManager.java   |  3 +-
 .../core/writer/CarbonIndexFileMergeWriter.java | 40 ++--
 .../sdv/generated/MergeIndexTestCase.scala  |  8 ++--
 .../CarbonIndexFileMergeTestCase.scala  | 18 -
 .../TestStreamingTableWithRowParser.scala   |  2 +-
 .../store/CarbonFactDataHandlerModel.java   |  3 +-
 .../processing/util/CarbonLoaderUtil.java   | 10 +++--
 7 files changed, 35 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6c5abddf/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index 8ce302b..475ec01 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -584,7 +584,8 @@ public final class DataMapStoreManager {
   SegmentRefreshInfo segmentRefreshInfo =
   seg.getSegmentRefreshInfo(updateVo);
   String segmentId = seg.getSegmentNo();
-  if (segmentRefreshTime.get(segmentId) == null) {
+  if (segmentRefreshTime.get(segmentId) == null
+  && segmentRefreshInfo.getSegmentUpdatedTimestamp() != null) {
 segmentRefreshTime.put(segmentId, segmentRefreshInfo);
 return true;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6c5abddf/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
 
b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
index 80a46cb..b080f52 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileMergeWriter.java
@@ -67,7 +67,7 @@ public class CarbonIndexFileMergeWriter {
*/
   private String mergeCarbonIndexFilesOfSegment(String segmentId,
   String tablePath, List indexFileNamesTobeAdded,
-  boolean readFileFooterFromCarbonDataFile) throws IOException {
+  boolean readFileFooterFromCarbonDataFile, String uuid) throws 
IOException {
 Segment segment = Segment.getSegment(segmentId, tablePath);
 String segmentPath = CarbonTablePath.getSegmentPath(tablePath, segmentId);
 CarbonFile[] indexFiles;
@@ -85,7 +85,7 @@ public class CarbonIndexFileMergeWriter {
 readFileFooterFromCarbonDataFile, segmentPath, indexFiles, 
segmentId);
   } else {
 return writeMergeIndexFileBasedOnSegmentFile(
-segmentId, indexFileNamesTobeAdded, sfs, indexFiles);
+segmentId, indexFileNamesTobeAdded, sfs, indexFiles, uuid);
   }
 }
 return null;
@@ -111,10 +111,9 @@ public class CarbonIndexFileMergeWriter {
 return null;
   }
 
-  private String writeMergeIndexFileBasedOnSegmentFile(
-  String segmentId,
-  List indexFileNamesTobeAdded,
-  SegmentFileStore segmentFileStore, CarbonFile[] indexFiles) throws 
IOException {
+  private String writeMergeIndexFileBasedOnSegmentFile(String segmentId,
+  List indexFileNamesTobeAdded, SegmentFileStore segmentFileStore,
+  CarbonFile[] indexFiles, String uuid) throws IOException {
 SegmentIndexFileStore fileStore = new SegmentIndexFileStore();
 fileStore
 .readAllIIndexOfSegment(segmentFileStore.get

[46/50] [abbrv] carbondata git commit: [CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/QueryResponse.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/QueryResponse.java
 
b/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/QueryResponse.java
new file mode 100644
index 000..304fd0f
--- /dev/null
+++ 
b/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/QueryResponse.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store.impl.rpc.model;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.util.ObjectSerializationUtil;
+
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+
+@InterfaceAudience.Internal
+public class QueryResponse extends BaseResponse implements Serializable, 
Writable {
+  private int queryId;
+  private Object[][] rows;
+
+  public QueryResponse() {
+super();
+  }
+
+  public QueryResponse(int queryId, int status, String message, Object[][] 
rows) {
+super(status, message);
+this.queryId = queryId;
+this.rows = rows;
+  }
+
+  public int getQueryId() {
+return queryId;
+  }
+
+
+  public Object[][] getRows() {
+return rows;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+super.write(out);
+out.writeInt(queryId);
+WritableUtils.writeCompressedByteArray(out, 
ObjectSerializationUtil.serialize(rows));
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+super.readFields(in);
+queryId = in.readInt();
+try {
+  rows = (Object[][])ObjectSerializationUtil.deserialize(
+  WritableUtils.readCompressedByteArray(in));
+} catch (ClassNotFoundException e) {
+  throw new IOException(e);
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/RegisterWorkerRequest.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/RegisterWorkerRequest.java
 
b/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/RegisterWorkerRequest.java
new file mode 100644
index 000..5f223d6
--- /dev/null
+++ 
b/store/core/src/main/java/org/apache/carbondata/store/impl/rpc/model/RegisterWorkerRequest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store.impl.rpc.model;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+
+import org.apache.hadoop.io.Writable;
+
+@InterfaceAudience.Internal
+public class RegisterWorkerRequest implements Serializable, Writable {
+  private String hostAddress;
+  private int port;
+  private int cores;
+
+  public RegisterWorkerRequest() {
+  }
+
+  public RegisterWorkerRequest(String hostAddress, int port, int cores) {
+this.hostAddress = hostAddress;
+this.port = port;
+this.cores = cores;
+  }
+
+  public String getHost

[11/50] [abbrv] carbondata git commit: [CARBONDATA-2655][BloomDataMap] BloomFilter datamap support in operator

2018-07-17 Thread jackylk
[CARBONDATA-2655][BloomDataMap] BloomFilter datamap support in operator

Now queries with in expression on bloom index column can leverage the
BloomFilter datamap.

This closes #2445


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/202d099d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/202d099d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/202d099d

Branch: refs/heads/carbonstore
Commit: 202d099d631571a96acdf781749561ad3f0da36a
Parents: f911403
Author: xuchuanyin 
Authored: Sat Jul 7 22:19:53 2018 +0800
Committer: Jacky Li 
Committed: Thu Jul 12 16:38:51 2018 +0800

--
 .../datamap/bloom/BloomCoarseGrainDataMap.java  | 48 ++--
 .../bloom/BloomCoarseGrainDataMapFactory.java   |  1 +
 .../bloom/BloomCoarseGrainDataMapSuite.scala| 36 +++
 3 files changed, 82 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/202d099d/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
--
diff --git 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
index 01bd804..96f3495 100644
--- 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
+++ 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
@@ -55,6 +55,8 @@ import 
org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.LiteralExpression;
 import 
org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
+import org.apache.carbondata.core.scan.expression.conditional.InExpression;
+import org.apache.carbondata.core.scan.expression.conditional.ListExpression;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -178,6 +180,7 @@ public class BloomCoarseGrainDataMap extends 
CoarseGrainDataMap {
   private List createQueryModel(Expression expression)
   throws DictionaryGenerationException, UnsupportedEncodingException {
 List queryModels = new ArrayList();
+// bloomdatamap only support equalTo and In operators now
 if (expression instanceof EqualToExpression) {
   Expression left = ((EqualToExpression) expression).getLeft();
   Expression right = ((EqualToExpression) expression).getRight();
@@ -186,7 +189,7 @@ public class BloomCoarseGrainDataMap extends 
CoarseGrainDataMap {
 column = ((ColumnExpression) left).getColumnName();
 if (this.name2Col.containsKey(column)) {
   BloomQueryModel bloomQueryModel =
-  buildQueryModelFromExpression((ColumnExpression) left, 
(LiteralExpression) right);
+  buildQueryModelForEqual((ColumnExpression) left, 
(LiteralExpression) right);
   queryModels.add(bloomQueryModel);
 }
 return queryModels;
@@ -194,10 +197,35 @@ public class BloomCoarseGrainDataMap extends 
CoarseGrainDataMap {
 column = ((ColumnExpression) right).getColumnName();
 if (this.name2Col.containsKey(column)) {
   BloomQueryModel bloomQueryModel =
-  buildQueryModelFromExpression((ColumnExpression) right, 
(LiteralExpression) left);
+  buildQueryModelForEqual((ColumnExpression) right, 
(LiteralExpression) left);
   queryModels.add(bloomQueryModel);
 }
 return queryModels;
+  } else {
+LOGGER.warn("BloomFilter can only support the 'equal' filter like 'Col 
= PlainValue'");
+  }
+} else if (expression instanceof InExpression) {
+  Expression left = ((InExpression) expression).getLeft();
+  Expression right = ((InExpression) expression).getRight();
+  String column;
+  if (left instanceof ColumnExpression && right instanceof ListExpression) 
{
+column = ((ColumnExpression) left).getColumnName();
+if (this.name2Col.containsKey(column)) {
+  List models =
+  buildQueryModelForIn((ColumnExpression) left, (ListExpression) 
right);
+  queryModels.addAll(models);
+}
+return queryModels;
+  } else if (left instanceof ListExpression && right instanceof 
ColumnExpression) {
+column = ((ColumnExpression) right).getColumnName();
+if (this.name2Col.containsKey(column)) {
+  List models =
+  buildQueryModelForIn((ColumnExpression) right, (ListExpression) 
left);
+  queryModels.ad

[05/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneCoarseGrainDataMap.java
--
diff --git 
a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneCoarseGrainDataMap.java
 
b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneCoarseGrainDataMap.java
deleted file mode 100644
index 77b5347..000
--- 
a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneCoarseGrainDataMap.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.datamap.lucene;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.carbondata.common.annotations.InterfaceAudience;
-import org.apache.carbondata.common.logging.LogService;
-import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.datamap.dev.DataMapModel;
-import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
-import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.datastore.impl.FileFactory;
-import org.apache.carbondata.core.indexstore.Blocklet;
-import org.apache.carbondata.core.indexstore.PartitionSpec;
-import org.apache.carbondata.core.memory.MemoryException;
-import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.queryparser.classic.QueryParser;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.solr.store.hdfs.HdfsDirectory;
-
-@InterfaceAudience.Internal
-public class LuceneCoarseGrainDataMap extends CoarseGrainDataMap {
-
-  /**
-   * log information
-   */
-  private static final LogService LOGGER =
-  
LogServiceFactory.getLogService(LuceneCoarseGrainDataMap.class.getName());
-
-  public static final int BLOCKID_ID = 0;
-
-  public static final int BLOCKLETID_ID = 1;
-
-  public static final int PAGEID_ID = 2;
-
-  public static final int ROWID_ID = 3;
-  /**
-   * searcher object for this datamap
-   */
-  private IndexSearcher indexSearcher = null;
-
-  /**
-   * default max values to return
-   */
-  private static int MAX_RESULT_NUMBER = 100;
-
-  /**
-   * analyzer for lucene index
-   */
-  private Analyzer analyzer;
-
-  LuceneCoarseGrainDataMap(Analyzer analyzer) {
-this.analyzer = analyzer;
-  }
-
-  /**
-   * It is called to load the data map to memory or to initialize it.
-   */
-  @Override
-  public void init(DataMapModel dataMapModel) throws MemoryException, 
IOException {
-// get this path from file path
-Path indexPath = FileFactory.getPath(dataMapModel.getFilePath());
-
-LOGGER.info("Lucene index read path " + indexPath.toString());
-
-// get file system , use hdfs file system , realized in solr project
-FileSystem fs = FileFactory.getFileSystem(indexPath);
-
-// check this path valid
-if (!fs.exists(indexPath)) {
-  String errorMessage = String.format("index directory %s not exists.", 
indexPath);
-  LOGGER.error(errorMessage);
-  throw new IOException(errorMessage);
-}
-
-if (!fs.isDirectory(indexPath)) {
-  String errorMessage = String.format("error index path %s, must be 
directory", indexPath);
-  LOGGER.error(errorMessage);
-  throw new IOException(errorMessage);
-}
-
-// 

[02/50] [abbrv] carbondata git commit: [CARBONDATA-2712] Added fix for Local Dictionary Exclude for multi level complex columns

2018-07-17 Thread jackylk
[CARBONDATA-2712] Added fix for Local Dictionary Exclude for multi level 
complex columns

What was the problem?
When Local Dictionary Exclude was defined for multi level complex columns, the 
columns were still considered for Local Dictionary Include

What has been changed?
The index value was not getting updated on return from the recursive method 
needed for traversal.

This closes #2469


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d267c40b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d267c40b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d267c40b

Branch: refs/heads/carbonstore
Commit: d267c40b8d0f5ae4c4d054b3811867ab0917a63d
Parents: 3109d04
Author: praveenmeenakshi56 
Authored: Mon Jul 9 20:36:36 2018 +0530
Committer: kumarvishal09 
Committed: Wed Jul 11 22:06:55 2018 +0530

--
 .../apache/carbondata/core/util/CarbonUtil.java |  10 +-
 .../lucene/LuceneFineGrainDataMapSuite.scala|  51 
 .../testsuite/datamap/TestDataMapCommand.scala  |  51 
 .../LocalDictionarySupportAlterTableTest.scala  | 248 ++-
 .../LocalDictionarySupportCreateTableTest.scala | 138 +++
 5 files changed, 442 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d267c40b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index dd34bc6..8afba76 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -3178,11 +3178,15 @@ public final class CarbonUtil {
   ColumnSchema column = allColumns.get(dimensionOrdinal);
   if (column.getNumberOfChild() > 0) {
 dimensionOrdinal++;
-unsetLocalDictForComplexColumns(allColumns, dimensionOrdinal, 
column.getNumberOfChild());
+// Dimension ordinal will take value from recursive functions so as to 
skip the
+// child columns of the complex column.
+dimensionOrdinal = unsetLocalDictForComplexColumns(allColumns, 
dimensionOrdinal,
+column.getNumberOfChild());
+  } else {
+dimensionOrdinal++;
   }
-  dimensionOrdinal++;
 }
-return dimensionOrdinal++;
+return dimensionOrdinal;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d267c40b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index dc0cf52..fd55145 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -834,6 +834,57 @@ class LuceneFineGrainDataMapSuite extends QueryTest with 
BeforeAndAfterAll {
 sql("drop table datamap_copy")
   }
 
+  test("test create datamap: unable to create same index datamap for one 
column") {
+sql("DROP TABLE IF EXISTS datamap_test_table")
+sql(
+  """
+| CREATE TABLE datamap_test_table(id INT, name STRING, city STRING, 
age INT)
+| STORED BY 'carbondata'
+| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
+  """.stripMargin)
+val exception_duplicate_column: Exception = 
intercept[MalformedDataMapCommandException] {
+  sql(
+s"""
+   | CREATE DATAMAP dm ON TABLE datamap_test_table
+   | USING 'lucene'
+   | DMProperties('INDEX_COLUMNS'='name')
+  """.stripMargin)
+  sql(
+s"""
+   | CREATE DATAMAP dm1 ON TABLE datamap_test_table
+   | USING 'lucene'
+   | DMProperties('INDEX_COLUMNS'='name')
+  """.stripMargin)
+}
+assertResult("column 'name' already has lucene index datamap 
created")(exception_duplicate_column.getMessage)
+sql("drop table if exists datamap_test_table")
+  }
+
+  test("test create datamap: able to create different index datamap for one 
column") {
+sql("DROP TABLE IF EXISTS datamap_test_table")
+sql(
+  """
+| CREATE TABLE datamap_test_table(id INT, name STRING, city STRING, 
age INT)
+| STORED BY 'carbondata'
+| TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL

[18/50] [abbrv] carbondata git commit: [CARBONDATA-2723][DataMap] Fix bugs in recreate datamap on table

2018-07-17 Thread jackylk
[CARBONDATA-2723][DataMap] Fix bugs in recreate datamap on table

While we drop datamap/table, the executor side cache for datamap is
stale. So if we recreate the datamap with different index columns, when
we are doing data loading, the cache should be cleaned, otherwise the
DataMapWriterListener will not take effect for the new datamap.

This closes #2486


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/98c75819
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/98c75819
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/98c75819

Branch: refs/heads/carbonstore
Commit: 98c758190ed27eaf4f874a9445900960c4523251
Parents: bd02656
Author: xuchuanyin 
Authored: Wed Jul 11 12:23:18 2018 +0800
Committer: ravipesala 
Committed: Fri Jul 13 15:44:20 2018 +0530

--
 .../carbondata/core/datamap/DataMapMeta.java| 13 ++
 .../core/datamap/DataMapStoreManager.java   |  2 ++
 .../table/DiskBasedDMSchemaStorageProvider.java | 25 ++--
 .../bloom/AbstractBloomDataMapWriter.java   |  4 
 .../datamap/bloom/BloomDataMapBuilder.java  |  1 -
 .../datamap/DataMapWriterListener.java  |  3 +++
 .../store/writer/AbstractFactDataWriter.java|  1 +
 7 files changed, 27 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/98c75819/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
index adf85d8..93a8012 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
@@ -26,6 +26,7 @@ import 
org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.collections.Transformer;
+import org.apache.commons.lang3.StringUtils;
 
 /**
  * Metadata of the datamap, set by DataMap developer
@@ -71,9 +72,13 @@ public class DataMapMeta {
 return optimizedOperation;
   }
 
-  @Override
-  public String toString() {
-return "DataMapMeta{" + "dataMapName='" + dataMapName + '\'' + ", 
indexedColumns="
-+ indexedColumns + ", optimizedOperation=" + optimizedOperation + '}';
+  @Override public String toString() {
+return new StringBuilder("DataMapMeta{")
+.append("dataMapName='").append(dataMapName).append('\'')
+.append(", indexedColumns=[")
+.append(StringUtils.join(getIndexedColumnNames(), ", ")).append("]\'")
+.append(", optimizedOperation=").append(optimizedOperation)
+.append('}')
+.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/98c75819/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index 574b4c6..9a7d1c1 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -431,6 +431,7 @@ public final class DataMapStoreManager {
 }
   }
 }
+allDataMaps.remove(tableUniqName);
   }
 
   /**
@@ -460,6 +461,7 @@ public final class DataMapStoreManager {
 }
 i++;
   }
+  allDataMaps.put(tableUniqueName, tableIndices);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/98c75819/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
index cf4f6b9..f90960b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
@@ -27,6 +27,7 @@ import java.io.OutputStreamWriter;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 
@@ -84,10 +85,10 @@ public class DiskBasedDMSchemaStorageProvider implements 
DataMapSchemaStoragePro
   if (null != brW

[49/50] [abbrv] carbondata git commit: [CARBONDATA-2688][CarbonStore] Support SQL in REST API

2018-07-17 Thread jackylk
[CARBONDATA-2688][CarbonStore] Support SQL in REST API

Support SQL interface in Horizon service.
Support REST client for SQL

This closes #2481


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d4a28a25
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d4a28a25
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d4a28a25

Branch: refs/heads/carbonstore
Commit: d4a28a25116b90666e8e12e8aed95af3f7273dfe
Parents: 85cdc40
Author: Jacky Li 
Authored: Tue Jul 10 21:20:45 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:12:39 2018 +0800

--
 .../org/apache/spark/sql/CarbonSession.scala| 105 +---
 .../apache/spark/sql/CarbonSessionBuilder.scala | 162 +++
 pom.xml |   1 +
 .../carbondata/store/api/conf/StoreConf.java|   6 +
 store/horizon/pom.xml   |   4 +-
 .../rest/client/impl/SimpleHorizonClient.java   |  11 +-
 .../horizon/rest/controller/Horizon.java|  32 +++-
 .../rest/controller/HorizonController.java  |   9 +-
 .../horizon/rest/model/view/Response.java   |   8 +-
 .../horizon/rest/model/view/SelectResponse.java |   8 +-
 .../apache/carbondata/horizon/HorizonTest.java  |   4 +-
 store/sql/pom.xml   |  84 ++
 .../horizon/rest/client/SqlHorizonClient.java   |  57 +++
 .../horizon/rest/controller/SqlHorizon.java |  82 ++
 .../rest/controller/SqlHorizonController.java   |  70 
 .../rest/model/validate/RequestValidator.java   |  35 
 .../horizon/rest/model/view/SqlRequest.java |  33 
 .../horizon/rest/model/view/SqlResponse.java|  39 +
 .../horizon/rest/sql/SparkSqlWrapper.scala  |  34 
 .../java/org/apache/carbondata/AppTest.java |  20 +++
 20 files changed, 685 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d4a28a25/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index e9a0634..6c13955 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -265,14 +265,14 @@ object CarbonSession {
 
   private val statementId = new AtomicLong(0)
 
-  private var enableInMemCatlog: Boolean = false
+  private var enableInMemCatalog: Boolean = false
 
   private[sql] val threadStatementId = new ThreadLocal[Long]()
 
   implicit class CarbonBuilder(builder: Builder) {
 
 def enableInMemoryCatalog(): Builder = {
-  enableInMemCatlog = true
+  enableInMemCatalog = true
   builder
 }
 def getOrCreateCarbonSession(): SparkSession = {
@@ -287,106 +287,7 @@ object CarbonSession {
 
 def getOrCreateCarbonSession(storePath: String,
 metaStorePath: String): SparkSession = synchronized {
-  if (!enableInMemCatlog) {
-builder.enableHiveSupport()
-  }
-  val options =
-getValue("options", 
builder).asInstanceOf[scala.collection.mutable.HashMap[String, String]]
-  val userSuppliedContext: Option[SparkContext] =
-getValue("userSuppliedContext", 
builder).asInstanceOf[Option[SparkContext]]
-
-  if (metaStorePath != null) {
-val hadoopConf = new Configuration()
-val configFile = 
Utils.getContextOrSparkClassLoader.getResource("hive-site.xml")
-if (configFile != null) {
-  hadoopConf.addResource(configFile)
-}
-if (options.get(CarbonCommonConstants.HIVE_CONNECTION_URL).isEmpty &&
-hadoopConf.get(CarbonCommonConstants.HIVE_CONNECTION_URL) == null) 
{
-  val metaStorePathAbsolute = new File(metaStorePath).getCanonicalPath
-  val hiveMetaStoreDB = metaStorePathAbsolute + "/metastore_db"
-  options ++= Map[String, 
String]((CarbonCommonConstants.HIVE_CONNECTION_URL,
-s"jdbc:derby:;databaseName=$hiveMetaStoreDB;create=true"))
-}
-  }
-
-  // Get the session from current thread's active session.
-  var session: SparkSession = SparkSession.getActiveSession match {
-case Some(sparkSession: CarbonSession) =>
-  if ((sparkSession ne null) && !sparkSession.sparkContext.isStopped) {
-options.foreach { case (k, v) => 
sparkSession.sessionState.conf.setConfString(k, v) }
-sparkSession
-  } else {
-null
-  }
-case _ => null
-  }
-  if (session ne null) {
-return session
-  }
-
-  // Global synchronization so we wi

[08/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
index 2e87051..2e98f68 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
@@ -20,14 +20,11 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.BitSet;
-import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -37,7 +34,6 @@ import org.apache.carbondata.core.cache.CacheType;
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import 
org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.compression.Compressor;
 import org.apache.carbondata.core.datastore.compression.CompressorFactory;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
@@ -55,12 +51,10 @@ import 
org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.scan.complextypes.ArrayQueryType;
 import org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType;
 import org.apache.carbondata.core.scan.complextypes.StructQueryType;
-import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
-import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
 import org.apache.carbondata.core.scan.model.ProjectionMeasure;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -212,7 +206,7 @@ public class QueryUtil {
*/
   public static int[] getDimensionChunkIndexes(List 
queryDimensions,
   Map dimensionOrdinalToChunkMapping,
-  List customAggregationDimension, Set 
filterDimensions,
+  Set filterDimensions,
   Set allProjectionListDimensionIndexes) {
 // using set as in row group columns will point to same block
 Set dimensionChunkIndex = new HashSet();
@@ -238,13 +232,6 @@ public class QueryUtil {
 }
   }
 }
-for (int i = 0; i < customAggregationDimension.size(); i++) {
-  chunkIndex =
-  
dimensionOrdinalToChunkMapping.get(customAggregationDimension.get(i).getOrdinal());
-  // not adding the children dimension as dimension aggregation
-  // is not push down in case of complex dimension
-  dimensionChunkIndex.add(chunkIndex);
-}
 int[] dimensionIndex = ArrayUtils
 .toPrimitive(dimensionChunkIndex.toArray(new 
Integer[dimensionChunkIndex.size()]));
 Arrays.sort(dimensionIndex);
@@ -470,58 +457,6 @@ public class QueryUtil {
   }
 
   /**
-   * Below method will be used to get the mapping of block index and its
-   * restructuring info
-   *
-   * @param queryDimensions   query dimension from query model
-   * @param segmentProperties segment properties
-   * @return map of block index to its restructuring info
-   * @throws KeyGenException if problem while key generation
-   */
-  public static Map getColumnGroupKeyStructureInfo(
-  List queryDimensions, SegmentProperties 
segmentProperties)
-  throws KeyGenException {
-Map rowGroupToItsRSInfo = new HashMap();
-// get column group id and its ordinal mapping of column group
-Map> columnGroupAndItsOrdinalMappingForQuery =
-getColumnGroupAndItsOrdinalMapping(queryDimensions);
-Map columnGroupAndItsKeygenartor =
-segmentProperties.getColumnGroupAndItsKeygenartor();
-
-Iterator>> iterator =
-columnGroupAndItsOrdinalMappingForQuery.entrySet().iterator();
-KeyStructureInfo restructureInfos = null;
-while (iterator.hasNext()) {
-  Entry> next = iterator.next();
-  KeyGenerator keyGenerator = 
columnGroupAndItsKeygenartor.get(next.getKey());
-  restructureInfos = new KeyStructureInfo();
-  // sort the ordinal
-  List ordinal = next.getValue();
-  List mdKeyOrdinal = new ArrayList();
-  //Un sorted
-  List mdKeyOrdinalForQuery = new ArrayList();
-  for (Integer ord : ordinal) {
-
mdKe

[21/50] [abbrv] carbondata git commit: [HOTFIX] Removed BatchedDataSourceScanExec class and extended directly from FileSourceScanExec

2018-07-17 Thread jackylk
[HOTFIX] Removed BatchedDataSourceScanExec class and extended directly from 
FileSourceScanExec

Problem:
Since some of the code of BatchedDataSourceScanExec is copied from spark, it is 
difficult to maintain from version upgrades of spark. Currently we face issues 
during spark 2.3 upgrade so better remove it.

Solution:
Remove BatchedDataSourceScanExec and extend directly the spark class 
FileSourceScanExec.

This closes #2400


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3df2fd03
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3df2fd03
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3df2fd03

Branch: refs/heads/carbonstore
Commit: 3df2fd030b634add5a2df0c85e1262d6b1e4286b
Parents: 653efee
Author: ravipesala 
Authored: Fri Jun 22 18:13:59 2018 +0530
Committer: Jacky Li 
Committed: Sat Jul 14 11:46:30 2018 +0800

--
 .../dataload/TestGlobalSortDataLoad.scala   |   5 +-
 .../StandardPartitionTableLoadingTestCase.scala |   9 +-
 .../StandardPartitionTableQueryTestCase.scala   |   4 +-
 .../execution/BatchedDataSourceScanExec.scala   | 147 ---
 .../strategy/CarbonDataSourceScan.scala |  53 +++
 .../strategy/CarbonLateDecodeStrategy.scala |  31 +++-
 .../vectorreader/VectorReaderTestCase.scala |   6 +-
 7 files changed, 91 insertions(+), 164 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3df2fd03/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
index d7b1172..c40526d 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
@@ -18,7 +18,6 @@
 package org.apache.carbondata.spark.testsuite.dataload
 
 import scala.collection.JavaConverters._
-
 import java.io.{File, FileWriter}
 
 import org.apache.commons.io.FileUtils
@@ -27,7 +26,7 @@ import 
org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.execution.BatchedDataSourceScanExec
+import org.apache.spark.sql.execution.strategy.CarbonDataSourceScan
 import org.apache.spark.sql.test.TestQueryExecutor.projectPath
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
@@ -298,7 +297,7 @@ class TestGlobalSortDataLoad extends QueryTest with 
BeforeAndAfterEach with Befo
   }
   val df = sql("select * from carbon_globalsort")
   val scanRdd = df.queryExecution.sparkPlan.collect {
-case b: BatchedDataSourceScanExec if 
b.rdd.isInstanceOf[CarbonScanRDD[InternalRow]] =>
+case b: CarbonDataSourceScan if 
b.rdd.isInstanceOf[CarbonScanRDD[InternalRow]] =>
   b.rdd.asInstanceOf[CarbonScanRDD[InternalRow]]
   }.head
   assertResult(defaultParallelism)(scanRdd.getPartitions.length)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3df2fd03/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
index b929364..1db1f4a 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
@@ -16,22 +16,19 @@
  */
 package org.apache.carbondata.spark.testsuite.standardpartition
 
-import scala.collection.JavaConverters._
 import java.io.{File, FileWriter, IOException}
 import java.util
 import java.util.concurrent.{Callable, ExecutorService, Executors}
 
 import org.apache.commons.io.FileUtils
 import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
-import org.apache.spark.sql.execution.Batche

[09/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentTaskIndexWrapper.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentTaskIndexWrapper.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentTaskIndexWrapper.java
deleted file mode 100644
index 4fe6d1b..000
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentTaskIndexWrapper.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.core.datastore.block;
-
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.carbondata.core.cache.Cacheable;
-import org.apache.carbondata.core.datastore.SegmentTaskIndexStore;
-
-/**
- * SegmentTaskIndexWrapper class holds the  taskIdToTableSegmentMap
- */
-public class SegmentTaskIndexWrapper implements Cacheable {
-
-  /**
-   * task_id to table segment index map
-   */
-  private Map 
taskIdToTableSegmentMap;
-  /**
-   * atomic integer to maintain the access count for a column access
-   */
-  protected AtomicInteger accessCount = new AtomicInteger();
-
-  /**
-   * Table block meta size.
-   */
-  protected AtomicLong memorySize = new AtomicLong();
-
-  private Long refreshedTimeStamp;
-  public SegmentTaskIndexWrapper(
-  Map 
taskIdToTableSegmentMap) {
-this.taskIdToTableSegmentMap = taskIdToTableSegmentMap;
-  }
-
-  public Map 
getTaskIdToTableSegmentMap() {
-return taskIdToTableSegmentMap;
-  }
-
-  public void setTaskIdToTableSegmentMap(
-  Map 
taskIdToTableSegmentMap) {
-this.taskIdToTableSegmentMap = taskIdToTableSegmentMap;
-  }
-
-  /**
-   * return segment size
-   *
-   * @param memorySize
-   */
-  public void setMemorySize(long memorySize) {
-this.memorySize.set(memorySize);
-  }
-
-  /**
-   * returns the timestamp
-   *
-   * @return
-   */
-  @Override public long getFileTimeStamp() {
-return 0;
-  }
-
-  /**
-   * returns the access count
-   *
-   * @return
-   */
-  @Override public int getAccessCount() {
-return accessCount.get();
-  }
-
-  /**
-   * returns the memory size
-   *
-   * @return
-   */
-  @Override public long getMemorySize() {
-return memorySize.get();
-  }
-
-  /**
-   * The method is used to set the access count
-   */
-  public void incrementAccessCount() {
-accessCount.incrementAndGet();
-  }
-
-  /**
-   * This method will release the objects and set default value for primitive 
types
-   */
-  public void clear() {
-decrementAccessCount();
-  }
-
-  /**
-   * This method will decrement the access count for a column by 1
-   * whenever a column usage is complete
-   */
-  private void decrementAccessCount() {
-if (accessCount.get() > 0) {
-  accessCount.decrementAndGet();
-}
-  }
-
-  public Long getRefreshedTimeStamp() {
-return refreshedTimeStamp;
-  }
-
-  public void setRefreshedTimeStamp(Long refreshedTimeStamp) {
-this.refreshedTimeStamp = refreshedTimeStamp;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
index 15840bc..50fa09a 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/DimensionColumnPage.java
@@ -16,7 +16,6 @@
  */
 package org.apache.carbondata.core.datastore.chunk;
 
-import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
 
 /**
@@ -31,7 +30,7 @@ public interface DimensionColumnPage {
* @param data   data to filed
* @return how many bytes was copied
*/
-  int fillRawData(int r

[24/50] [abbrv] carbondata git commit: [CARBONDATA-2606][Complex DataType Enhancements]Fix Null result if First two Projection column have same parent and third column has different Parent Struct

2018-07-17 Thread jackylk
[CARBONDATA-2606][Complex DataType Enhancements]Fix Null result if First two
 Projection column have same parent and third column has different Parent Struct

Problem:
When multiple columns are there,then the first child elements is only going
to make parent Object Array. For all other cases it should be null.
For e.g. a : . here as 'a' is the parent column and b, c, d are
child columns during traversal when we encounter the first element in
list i.e. column 'b','a' will be completely filled. In case when column
'c' and 'd' encountered then only place null in the output.
Hence, as Null is placed in the output, Select result is Null if First
two Projection column have same parent and third column has different parent 
Struct column.
Solution: Place null in the end of output

This closes #2489


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/75a602d0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/75a602d0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/75a602d0

Branch: refs/heads/carbonstore
Commit: 75a602d013fce170d59e66b4f907ff405f855d33
Parents: 0c33857
Author: Indhumathi27 
Authored: Wed Jul 11 15:54:55 2018 +0530
Committer: kunal642 
Committed: Sun Jul 15 12:54:28 2018 +0530

--
 .../impl/DictionaryBasedResultCollector.java| 11 +++
 .../core/scan/result/BlockletScannedResult.java |  2 +-
 .../complexType/TestComplexDataType.scala   | 31 
 .../sql/CarbonDatasourceHadoopRelation.scala|  2 +-
 4 files changed, 44 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/75a602d0/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index 495d7de..3184d80 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -140,6 +140,17 @@ public class DictionaryBasedResultCollector extends 
AbstractScannedResultCollect
 continue;
   }
   fillMeasureData(scannedResult, row);
+  if 
(scannedResult.complexParentIndexToQueryMap.toString().contains("StructQueryType"))
 {
+// If a :  and d :  are two struct and if a.b,a.c,d.e is 
given in the
+// projection list,then object array will contain a,null,d as result, 
because for a.b,
+// a will be filled and for a.c null will be placed.
+// Instead place null in the end of object array and send a,d,null as 
result.
+int count = 0;
+for (int j = 0; j < row.length; j++) {
+  if (row[j] != null) row[count++] = row[j];
+}
+while (count < row.length) row[count++] = null;
+  }
   listBasedResult.add(row);
   rowCounter++;
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/75a602d0/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
index aac76e8..a25ebff 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
@@ -106,7 +106,7 @@ public abstract class BlockletScannedResult {
   /**
*
*/
-  private Map complexParentIndexToQueryMap;
+  public Map complexParentIndexToQueryMap;
 
   private int totalDimensionsSize;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/75a602d0/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 1068ba2..45a9c7a 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType

[07/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
 
b/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
deleted file mode 100644
index 19e91da..000
--- 
a/core/src/test/java/org/apache/carbondata/core/datastore/SegmentTaskIndexStoreTest.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.core.datastore;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.carbondata.core.cache.CacheProvider;
-import org.apache.carbondata.core.cache.CacheType;
-import org.apache.carbondata.core.datastore.block.AbstractIndex;
-import org.apache.carbondata.core.datastore.block.SegmentTaskIndexWrapper;
-import org.apache.carbondata.core.datastore.block.TableBlockInfo;
-import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
-import org.apache.carbondata.core.metadata.ColumnarFormatVersion;
-import org.apache.carbondata.core.metadata.blocklet.BlockletInfo;
-import org.apache.carbondata.core.metadata.blocklet.DataFileFooter;
-import org.apache.carbondata.core.metadata.blocklet.SegmentInfo;
-import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.assertNull;
-
-public class SegmentTaskIndexStoreTest {
-
-  private static short version = 1;
-  private static String locations[] = { "/tmp" };
-  private static SegmentTaskIndexStore taskIndexStore;
-  private static TableBlockInfo tableBlockInfo;
-  private static AbsoluteTableIdentifier absoluteTableIdentifier;
-
-  @BeforeClass public static void setUp() {
-CacheProvider cacheProvider = CacheProvider.getInstance();
-taskIndexStore = (SegmentTaskIndexStore) cacheProvider.
-
-createCache(CacheType.DRIVER_BTREE);
-tableBlockInfo = new TableBlockInfo("file", 0L, "SG100", locations, 10L,
-ColumnarFormatVersion.valueOf(version), null);
-absoluteTableIdentifier = AbsoluteTableIdentifier.from("/tmp",
-new CarbonTableIdentifier("testdatabase", "testtable", "TB100"));
-  }
-
-  private List getDataFileFooters() {
-SegmentInfo segmentInfo = new SegmentInfo();
-DataFileFooter footer = new DataFileFooter();
-ColumnSchema columnSchema = new ColumnSchema();
-BlockletInfo blockletInfo = new BlockletInfo();
-List footerList = new ArrayList();
-List columnSchemaList = new ArrayList();
-
-columnSchema.setColumnName("employeeName");
-columnSchemaList.add(new ColumnSchema());
-
-footer.setSegmentInfo(segmentInfo);
-footer.setColumnInTable(columnSchemaList);
-footer.setBlockletList(Arrays.asList(blockletInfo));
-footerList.add(footer);
-return footerList;
-  }
-
-  @Test public void checkExistenceOfSegmentBTree() {
-TableSegmentUniqueIdentifier tableSegmentUniqueIdentifier =
-new TableSegmentUniqueIdentifier(absoluteTableIdentifier, "SG100");
-SegmentTaskIndexWrapper segmentTaskIndexWrapper =
-taskIndexStore.getIfPresent(tableSegmentUniqueIdentifier);
-Map result = 
segmentTaskIndexWrapper != null ?
-segmentTaskIndexWrapper.getTaskIdToTableSegmentMap() :
-null;
-assertNull(result);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/core/src/test/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesTest.java
 
b/core/src/test/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesTest.java
index 20036ec..68bf896 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesTest.java
+++ 
b/core/src/test/java/org/apache/carbonda

[47/50] [abbrv] carbondata git commit: [CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/core/src/main/java/org/apache/carbondata/store/conf/StoreConf.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/conf/StoreConf.java 
b/store/core/src/main/java/org/apache/carbondata/store/conf/StoreConf.java
deleted file mode 100644
index da2a697..000
--- a/store/core/src/main/java/org/apache/carbondata/store/conf/StoreConf.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.store.conf;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.carbondata.core.datastore.impl.FileFactory;
-import org.apache.carbondata.store.util.StoreUtil;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
-public class StoreConf implements Serializable, Writable {
-
-  public static final String SELECT_PROJECTION = "carbon.select.projection";
-  public static final String SELECT_FILTER = "carbon.select.filter";
-  public static final String SELECT_LIMIT = "carbon.select.limit";
-
-  public static final String SELECT_ID = "carbon.select.id";
-
-  public static final String WORKER_HOST = "carbon.worker.host";
-  public static final String WORKER_PORT = "carbon.worker.port";
-  public static final String WORKER_CORE_NUM = "carbon.worker.core.num";
-  public static final String MASTER_HOST = "carbon.master.host";
-  public static final String MASTER_PORT = "carbon.master.port";
-
-  public static final String STORE_TEMP_LOCATION = 
"carbon.store.temp.location";
-  public static final String STORE_LOCATION = "carbon.store.location";
-
-  private Map conf = new HashMap<>();
-
-  public StoreConf() {
-  }
-
-  public StoreConf(String filePath) {
-load(filePath);
-  }
-
-  public StoreConf conf(String key, String value) {
-conf.put(key, value);
-return this;
-  }
-
-  public StoreConf conf(String key, int value) {
-conf.put(key, "" + value);
-return this;
-  }
-
-  public void load(String filePath) {
-StoreUtil.loadProperties(filePath, this);
-  }
-
-  public void conf(StoreConf conf) {
-this.conf.putAll(conf.conf);
-  }
-
-  public Object conf(String key) {
-return conf.get(key);
-  }
-
-  public String[] projection() {
-return stringArrayValue(SELECT_PROJECTION);
-  }
-
-  public String filter() {
-return stringValue(SELECT_FILTER);
-  }
-
-  public int limit() {
-return intValue(SELECT_LIMIT);
-  }
-
-  public String masterHost() {
-return stringValue(MASTER_HOST);
-  }
-
-  public int masterPort() {
-return intValue(MASTER_PORT);
-  }
-
-  public String workerHost() {
-return stringValue(WORKER_HOST);
-  }
-
-  public int workerPort() {
-return intValue(WORKER_PORT);
-  }
-
-  public int workerCoreNum() {
-return intValue(WORKER_CORE_NUM);
-  }
-
-  public String storeLocation() {
-return stringValue(STORE_LOCATION);
-  }
-
-  public String[] storeTempLocation() {
-return stringArrayValue(STORE_TEMP_LOCATION);
-  }
-
-  public String selectId() {
-return stringValue(SELECT_ID);
-  }
-
-  public Configuration newHadoopConf() {
-Configuration hadoopConf = FileFactory.getConfiguration();
-for (Map.Entry entry : conf.entrySet()) {
-  String key = entry.getKey();
-  String value = entry.getValue();
-  if (key != null && value != null && key.startsWith("carbon.hadoop.")) {
-hadoopConf.set(key.substring("carbon.hadoop.".length()), value);
-  }
-}
-return hadoopConf;
-  }
-
-  private String stringValue(String key) {
-Object obj = conf.get(key);
-if (obj == null) {
-  return null;
-}
-return obj.toString();
-  }
-
-  private int intValue(String key) {
-String value = conf.get(key);
-if (value == null) {
-  return -1;
-}
-return Integer.parseInt(value);
-  }
-
-  private String[] stringArrayValue(String key) {
-String value = conf.get(key);
-if (value == null) {
-

[37/50] [abbrv] carbondata git commit: [CARBONDATA-2609] Change RPC implementation to Hadoop RPC framework

2018-07-17 Thread jackylk
[CARBONDATA-2609] Change RPC implementation to Hadoop RPC framework

This closes #2372


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d9b40bf9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d9b40bf9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d9b40bf9

Branch: refs/heads/carbonstore
Commit: d9b40bf9edd7017ebaffe0a25161140940737d63
Parents: a162897
Author: Jacky Li 
Authored: Wed Jun 13 23:57:00 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:04:47 2018 +0800

--
 .../core/constants/CarbonCommonConstants.java   |   4 +-
 .../carbondata/core/scan/model/QueryModel.java  |  14 +-
 .../carbondata/core/util/CarbonProperties.java  |  10 +
 .../core/util/ObjectSerializationUtil.java  |  14 +
 .../carbondata/hadoop/CarbonRecordReader.java   |   8 +-
 .../detailquery/SearchModeTestCase.scala|  17 +-
 integration/spark2/pom.xml  |   2 +-
 .../carbondata/store/SparkCarbonStore.scala |  27 +-
 .../org/apache/spark/sql/CarbonSession.scala|   1 +
 pom.xml |   2 +-
 store/core/pom.xml  | 113 +++
 .../carbondata/store/CarbonRowReadSupport.java  |  53 
 .../apache/carbondata/store/CarbonStore.java|  68 +
 .../carbondata/store/LocalCarbonStore.java  | 130 +
 .../carbondata/store/MetaCachedCarbonStore.java |  59 
 .../carbondata/store/rpc/QueryService.java  |  33 +++
 .../carbondata/store/rpc/RegistryService.java   |  30 ++
 .../carbondata/store/rpc/ServiceFactory.java|  43 +++
 .../store/rpc/impl/IndexedRecordReader.java | 161 ++
 .../store/rpc/impl/QueryServiceImpl.java|  56 
 .../store/rpc/impl/RegistryServiceImpl.java |  54 
 .../store/rpc/impl/RequestHandler.java  | 147 ++
 .../carbondata/store/rpc/impl/Status.java   |  28 ++
 .../store/rpc/model/QueryRequest.java   | 108 +++
 .../store/rpc/model/QueryResponse.java  |  84 ++
 .../store/rpc/model/RegisterWorkerRequest.java  |  69 +
 .../store/rpc/model/RegisterWorkerResponse.java |  54 
 .../store/rpc/model/ShutdownRequest.java|  53 
 .../store/rpc/model/ShutdownResponse.java   |  61 
 .../org/apache/carbondata/store/Master.scala| 283 ++
 .../org/apache/carbondata/store/Scheduler.scala | 147 ++
 .../org/apache/carbondata/store/Worker.scala| 113 +++
 .../carbondata/store/LocalCarbonStoreTest.java  |  72 +
 .../org/apache/carbondata/store/TestUtil.java   | 168 +++
 .../carbondata/store/SchedulerSuite.scala   | 155 ++
 .../carbondata/store/CarbonRowReadSupport.java  |  53 
 .../apache/carbondata/store/CarbonStore.java|  68 -
 .../carbondata/store/LocalCarbonStore.java  | 130 -
 .../carbondata/store/MetaCachedCarbonStore.java |  59 
 .../carbondata/store/LocalCarbonStoreTest.java  |  72 -
 store/search/pom.xml| 112 ---
 .../store/worker/SearchRequestHandler.java  | 247 
 .../apache/carbondata/store/worker/Status.java  |  28 --
 .../scala/org/apache/spark/rpc/Master.scala | 291 ---
 .../scala/org/apache/spark/rpc/Scheduler.scala  | 139 -
 .../scala/org/apache/spark/rpc/Worker.scala | 118 
 .../org/apache/spark/search/Registry.scala  |  51 
 .../org/apache/spark/search/Searcher.scala  |  79 -
 .../carbondata/store/SearchServiceTest.java |  37 ---
 .../org/apache/spark/rpc/SchedulerSuite.scala   | 154 --
 50 files changed, 2402 insertions(+), 1677 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d9b40bf9/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index e7e074d..ad3b0d3 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1741,7 +1741,7 @@ public final class CarbonCommonConstants {
   public static final String CARBON_SEARCH_MODE_ENABLE_DEFAULT = "false";
 
   /**
-   * It's timeout threshold of carbon search query
+   * It's timeout threshold of carbon search query, in seconds
*/
   @CarbonProperty
   @InterfaceStability.Unstable
@@ -1750,7 +1750,7 @@ public final class CarbonCommonConstants {
   /**
* Default value is 10 seconds
*/
-  public static final String CARBON_SEARCH_QUERY_TIMEOUT_DEFAULT = "10s";
+ 

[30/50] [abbrv] carbondata git commit: [CARBONDATA-2649] Fixed arrayIndexOutOfBoundException while loading Blocklet DataMap after alter add column operation

2018-07-17 Thread jackylk
[CARBONDATA-2649] Fixed arrayIndexOutOfBoundException while loading Blocklet 
DataMap after alter add column operation

Things done as part of this PR

Fixed arrayIndexOutOfBoundException while loading Blocklet DataMap after alter 
add column operation
Problem:
Array Index out of bound exception was thrown after alter add column operation.

Analysis:
After alter add column operation if COLUMN_META_CACHE is set on the newly added 
columns, then on executing select query on the data loaded before alter 
operation threw exception. This was because minMaxCache caching columns were 
fetched irrespective of the segmentProperties. Data loaded before alter add 
column operation will not have the newly added columns in its columnSchemaList 
and hence can throw exception if non existent column are not removed from 
min/max column cache. Solution:
Fetch the min/max cache columns based on segmentProperties

This closes #2510


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8e789571
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8e789571
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8e789571

Branch: refs/heads/carbonstore
Commit: 8e7895715753f13964887688fdf6e59d3dca5ed8
Parents: 7341907
Author: m00258959 
Authored: Mon Jul 16 12:26:41 2018 +0530
Committer: ravipesala 
Committed: Mon Jul 16 20:02:01 2018 +0530

--
 .../block/SegmentPropertiesAndSchemaHolder.java | 14 ++
 .../indexstore/BlockletDataMapIndexStore.java   |  3 ++-
 .../indexstore/blockletindex/BlockDataMap.java  | 10 +++
 .../blockletindex/BlockletDataMap.java  |  2 +-
 .../blockletindex/BlockletDataMapModel.java |  9 ---
 .../core/metadata/schema/table/CarbonTable.java | 28 ++--
 ...ithColumnMetCacheAndCacheLevelProperty.scala | 11 
 7 files changed, 48 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8e789571/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
index e094076..bb7ff0d 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
@@ -108,7 +108,7 @@ public class SegmentPropertiesAndSchemaHolder {
 
this.segmentPropWrapperToSegmentSetMap.get(segmentPropertiesWrapper);
 if (null == segmentIdSetAndIndexWrapper) {
   // create new segmentProperties
-  segmentPropertiesWrapper.initSegmentProperties();
+  segmentPropertiesWrapper.initSegmentProperties(carbonTable);
   int segmentPropertiesIndex = 
segmentPropertiesIndexCounter.incrementAndGet();
   indexToSegmentPropertiesWrapperMapping
   .put(segmentPropertiesIndex, segmentPropertiesWrapper);
@@ -216,8 +216,11 @@ public class SegmentPropertiesAndSchemaHolder {
*
* @param segmentId
* @param segmentPropertiesIndex
+   * @param clearSegmentWrapperFromMap flag to specify whether to clear 
segmentPropertiesWrapper
+   *   from Map if all the segment's using it 
have become stale
*/
-  public void invalidate(String segmentId, int segmentPropertiesIndex) {
+  public void invalidate(String segmentId, int segmentPropertiesIndex,
+  boolean clearSegmentWrapperFromMap) {
 SegmentPropertiesWrapper segmentPropertiesWrapper =
 indexToSegmentPropertiesWrapperMapping.get(segmentPropertiesIndex);
 if (null != segmentPropertiesWrapper) {
@@ -230,7 +233,8 @@ public class SegmentPropertiesAndSchemaHolder {
   // if after removal of given SegmentId, the segmentIdSet becomes empty 
that means this
   // segmentPropertiesWrapper is not getting used at all. In that case 
this object can be
   // removed from all the holders
-  if (segmentIdAndSegmentPropertiesIndexWrapper.segmentIdSet.isEmpty()) {
+  if (clearSegmentWrapperFromMap && 
segmentIdAndSegmentPropertiesIndexWrapper.segmentIdSet
+  .isEmpty()) {
 indexToSegmentPropertiesWrapperMapping.remove(segmentPropertiesIndex);
 segmentPropWrapperToSegmentSetMap.remove(segmentPropertiesWrapper);
   }
@@ -254,11 +258,11 @@ public class SegmentPropertiesAndSchemaHolder {
   this.tableIdentifier = carbonTable.getAbsoluteTableIdentifier();
   this.columnsInTable = columnsInTable;
   this.columnCardinality = columnCardinality;
-  this.minMaxCacheColumns 

[04/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
index 55fa2bd..37abd73 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
@@ -193,26 +193,13 @@ case class 
AlterTableCompactionPostStatusUpdateEvent(sparkSession: SparkSession,
 
 /**
  * Compaction Event for handling clean up in case of any compaction failure 
and abort the
- * operation, lister has to implement this event to handle failure scenarios
- *
- * @param carbonTable
- * @param carbonMergerMapping
- * @param mergedLoadName
- */
-case class AlterTableCompactionAbortEvent(sparkSession: SparkSession,
-carbonTable: CarbonTable,
-carbonMergerMapping: CarbonMergerMapping,
-mergedLoadName: String) extends Event with AlterTableCompactionEventInfo
-
-
-/**
- * Compaction Event for handling exception in compaction
+ * * operation, lister has to implement this event to handle failure scenarios
  *
  * @param sparkSession
  * @param carbonTable
  * @param alterTableModel
  */
-case class AlterTableCompactionExceptionEvent(sparkSession: SparkSession,
+case class AlterTableCompactionAbortEvent(sparkSession: SparkSession,
 carbonTable: CarbonTable,
 alterTableModel: AlterTableModel) extends Event with 
AlterTableCompactionEventInfo
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/integration/spark-common/src/main/scala/org/apache/carbondata/events/CleanFilesEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CleanFilesEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CleanFilesEvents.scala
index 1a9c5f6..b7e9c20 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CleanFilesEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CleanFilesEvents.scala
@@ -36,11 +36,3 @@ case class CleanFilesPreEvent(carbonTable: CarbonTable, 
sparkSession: SparkSessi
  */
 case class CleanFilesPostEvent(carbonTable: CarbonTable, sparkSession: 
SparkSession)
   extends Event with CleanFilesEventInfo
-
-/**
- *
- * @param carbonTable
- * @param sparkSession
- */
-case class CleanFilesAbortEvent(carbonTable: CarbonTable, sparkSession: 
SparkSession)
-  extends Event with CleanFilesEventInfo

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
index 5f23f77..9724fa8 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
@@ -31,11 +31,6 @@ class CarbonOption(options: Map[String, String]) {
 
   def partitionCount: String = options.getOrElse("partitionCount", "1")
 
-  def partitionClass: String = {
-options.getOrElse("partitionClass",
-  
"org.apache.carbondata.processing.partition.impl.SampleDataPartitionerImpl")
-  }
-
   def tempCSV: Boolean = options.getOrElse("tempCSV", "false").toBoolean
 
   def compress: Boolean = options.getOrElse("compress", "false").toBoolean

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9114036/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
index f443214..73ed769 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
@@ -254,8 +254,7 @@ object DataLoadProcessorStepOnSpark {
   while (rows.hasNext) {
 if (rowsNotExist) {
   rowsNotExist = false
-  dataHandler = 
CarbonFactHandlerFactory.createCarbonFactHandler(dataHandlerModel,
-CarbonFactHandlerFactory.FactHandlerType.COLUMNAR)
+  dataHan

[01/50] [abbrv] carbondata git commit: [CARBONDATA-2712] Added fix for Local Dictionary Exclude for multi level complex columns [Forced Update!]

2018-07-17 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore 4b96ed8ca -> 239a6cadb (forced update)


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d267c40b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
index 32fab2c..d865b2a 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
@@ -41,9 +41,11 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match 
{
   case Some(row) => assert(row.get(1).toString.contains("true"))
+  case None => assert(false)
 }
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("1"))
+  case None => assert(false)
 }
   }
 
@@ -59,9 +61,11 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descFormatted1 = sql("describe formatted local1").collect
 descFormatted1.find(_.get(0).toString.contains("Local Dictionary 
Enabled")) match {
   case Some(row) => assert(row.get(1).toString.contains("true"))
+  case None => assert(false)
 }
 descFormatted1.find(_.get(0).toString.contains("Local Dictionary 
Include")) match {
   case Some(row) => assert(row.get(1).toString.contains("name"))
+  case None => assert(false)
 }
   }
 
@@ -156,9 +160,11 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match 
{
   case Some(row) => assert(row.get(1).toString.contains("true"))
+  case None => assert(false)
 }
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("2"))
+  case None => assert(false)
 }
   }
 
@@ -174,6 +180,7 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("1"))
+  case None => assert(false)
 }
   }
 
@@ -189,6 +196,7 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("1"))
+  case None => assert(false)
 }
   }
 
@@ -204,6 +212,7 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("1"))
+  case None => assert(false)
 }
   }
 
@@ -219,6 +228,7 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("1"))
+  case None => assert(false)
 }
   }
 
@@ -235,12 +245,15 @@ class LocalDictionarySupportCreateTableTest extends 
QueryTest with BeforeAndAfte
 val descLoc = sql("describe formatted local1").collect
 descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match 
{
   case Some(row) => assert(row.get(1).toString.contains("true"))
+  case None => assert(false)
 }
 descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) 
match {
   case Some(row) => assert(row.get(1).toString.contains("2"))
+  case None => assert(false)
 }
 descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match 
{
   case Some(row) => assert(row.get(1).toString.contains("name"))
+  case None => assert(false)
 }
   }
 
@@ -257,12 +270,15 @@ class LocalDictionarySupportCreateTableTe

[45/50] [abbrv] carbondata git commit: [CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
deleted file mode 100644
index d826b32..000
--- 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.store.rpc.model;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.Serializable;
-
-import org.apache.carbondata.common.annotations.InterfaceAudience;
-
-import org.apache.hadoop.io.Writable;
-
-@InterfaceAudience.Internal
-public class BaseResponse implements Serializable, Writable {
-  private int status;
-  private String message;
-
-  public BaseResponse() {
-  }
-
-  public BaseResponse(int status, String message) {
-this.status = status;
-this.message = message;
-  }
-
-  public int getStatus() {
-return status;
-  }
-
-  public void setStatus(int status) {
-this.status = status;
-  }
-
-  public String getMessage() {
-return message;
-  }
-
-  public void setMessage(String message) {
-this.message = message;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-out.writeInt(status);
-out.writeUTF(message);
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-status = in.readInt();
-message = in.readUTF();
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/core/src/main/java/org/apache/carbondata/store/rpc/model/LoadDataRequest.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/LoadDataRequest.java
 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/model/LoadDataRequest.java
deleted file mode 100644
index e79fad2..000
--- 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/LoadDataRequest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.store.rpc.model;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.Serializable;
-
-import org.apache.carbondata.processing.loading.model.CarbonLoadModel;
-import org.apache.carbondata.store.util.StoreUtil;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
-public class LoadDataRequest implements Serializable, Writable {
-
-  private CarbonLoadModel model;
-
-  public LoadDataRequest() {
-  }
-
-  public LoadDataRequest(CarbonLoadModel model) {
-this.model = model;
-  }
-
-  public CarbonLoadModel getModel() {
-return model;
-  }
-
-  public void setModel(CarbonLoadModel model) {
-this.model = model;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-WritableUtils.writeCompressedByteArray(out, StoreUtil.serialize(model));
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-byte[] bytes = WritableUtils.readCompressedByteArray(in);
-model = (CarbonLoadModel) StoreUtil.deserialize(bytes);
-  }
-}

h

[23/50] [abbrv] carbondata git commit: [CARBONDATA-2528][MV] Fixed order by in mv and aggregation functions inside projection expressions are fixed

2018-07-17 Thread jackylk
[CARBONDATA-2528][MV] Fixed order by in mv and aggregation functions inside 
projection expressions are fixed

Problem:
Order by queries and the queries with functions like sum(a)+sum(b) are not 
working in MV.
Please check jira for more details.
Solution:
The queries which have projection functions like sum(a)+sum(b) cannot be 
incrementally loaded, so introduced a new internal DM property to avoid group 
by on the final query.

This closes #2453


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0c33857f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0c33857f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0c33857f

Branch: refs/heads/carbonstore
Commit: 0c33857fd23611da393a3097511e66ddd81149f2
Parents: bc12de0
Author: ravipesala 
Authored: Thu Jun 14 11:40:07 2018 +0530
Committer: Jacky Li 
Committed: Sun Jul 15 14:49:39 2018 +0800

--
 .../apache/carbondata/mv/datamap/MVHelper.scala | 156 +++
 .../mv/rewrite/DefaultMatchMaker.scala  |  21 ++-
 .../carbondata/mv/rewrite/Navigator.scala   |  26 +---
 .../mv/rewrite/SummaryDatasetCatalog.scala  |  16 +-
 .../mv/rewrite/MVCreateTestCase.scala   |  65 
 5 files changed, 225 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c33857f/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
--
diff --git 
a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
 
b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
index f104d9b..fe761c0 100644
--- 
a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
+++ 
b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
@@ -20,11 +20,12 @@ import java.util
 
 import scala.collection.JavaConverters._
 import scala.collection.mutable
+import scala.collection.mutable.ArrayBuffer
 
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.catalog.CatalogTable
-import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, AttributeSet, Expression, NamedExpression, ScalaUDF}
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, Cast, Expression, NamedExpression, ScalaUDF, SortOrder}
 import org.apache.spark.sql.catalyst.expressions.aggregate._
 import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, 
Project}
 import org.apache.spark.sql.execution.command.{Field, TableModel, 
TableNewProcessor}
@@ -33,10 +34,9 @@ import 
org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.parser.CarbonSpark2SqlParser
 
 import org.apache.carbondata.core.datamap.DataMapStoreManager
-import org.apache.carbondata.core.metadata.schema.table.{DataMapSchema, 
DataMapSchemaStorageProvider, RelationIdentifier}
-import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.core.metadata.schema.table.{DataMapSchema, 
RelationIdentifier}
 import org.apache.carbondata.mv.plans.modular.{GroupBy, Matchable, 
ModularPlan, Select}
-import org.apache.carbondata.mv.rewrite.{DefaultMatchMaker, QueryRewrite}
+import org.apache.carbondata.mv.rewrite.{MVPlanWrapper, QueryRewrite}
 import org.apache.carbondata.spark.util.CommonUtil
 
 /**
@@ -51,6 +51,7 @@ object MVHelper {
 val dmProperties = dataMapSchema.getProperties.asScala
 val updatedQuery = new 
CarbonSpark2SqlParser().addPreAggFunction(queryString)
 val logicalPlan = 
sparkSession.sql(updatedQuery).drop("preAgg").queryExecution.analyzed
+val fullRebuild = isFullReload(logicalPlan)
 val fields = logicalPlan.output.map { attr =>
   val name = updateColumnName(attr)
   val rawSchema = '`' + name + '`' + ' ' + attr.dataType.typeName
@@ -113,6 +114,7 @@ object MVHelper {
   new RelationIdentifier(table.database, table.identifier.table, "")
 }
 dataMapSchema.setParentTables(new 
util.ArrayList[RelationIdentifier](parentIdents.asJava))
+dataMapSchema.getProperties.put("full_refresh", fullRebuild.toString)
 DataMapStoreManager.getInstance().saveDataMapSchema(dataMapSchema)
   }
 
@@ -147,6 +149,34 @@ object MVHelper {
 }.filter(_.isDefined).map(_.get)
   }
 
+
+  /**
+   * Check if we can do incremental load on the mv table. Some cases like 
aggregation functions
+   * which are present inside other expressions like sum(a)+sum(b) cannot be 
incremental loaded.
+   */
+  private def isFullReload(logicalPlan: LogicalPlan): Boolean = {
+var isFullReload = false
+logicalPlan.transformAllExpressions {
+  case a: Alias =>

[13/50] [abbrv] carbondata git commit: [HOTFIX][CARBONDATA-2716][DataMap] fix bug for loading datamap

2018-07-17 Thread jackylk
[HOTFIX][CARBONDATA-2716][DataMap] fix bug for loading datamap

In some scenarios, input parameter of getCarbonFactDataHandlerModel called 
carbonTable may be different from the one in loadmodel.

This close #2497


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/84102a22
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/84102a22
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/84102a22

Branch: refs/heads/carbonstore
Commit: 84102a22accd3d24d52faccc747a62887c13d502
Parents: 9d7a9a2
Author: Manhua 
Authored: Thu Jul 12 16:47:15 2018 +0800
Committer: xuchuanyin 
Committed: Fri Jul 13 09:25:25 2018 +0800

--
 .../carbondata/processing/store/CarbonFactDataHandlerModel.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/84102a22/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
 
b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
index 63e47f0..ca75b8c 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
@@ -332,7 +332,7 @@ public class CarbonFactDataHandlerModel {
 new TableSpec(loadModel.getCarbonDataLoadSchema().getCarbonTable());
 DataMapWriterListener listener = new DataMapWriterListener();
 listener.registerAllWriter(
-loadModel.getCarbonDataLoadSchema().getCarbonTable(),
+carbonTable,
 loadModel.getSegmentId(),
 CarbonTablePath.getShardName(
 
CarbonTablePath.DataFileUtil.getTaskIdFromTaskNo(loadModel.getTaskNo()),



[16/50] [abbrv] carbondata git commit: [CARBONDATA-2708][BloomDataMap] clear index file in case of data load failure

2018-07-17 Thread jackylk
[CARBONDATA-2708][BloomDataMap] clear index file in case of data load failure

When data loading failed, clean the index DataMap files that generated

This closes #2463


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1fd37039
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1fd37039
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1fd37039

Branch: refs/heads/carbonstore
Commit: 1fd3703990c60a6459861575c4f9e0f751be8c5e
Parents: 18381e3
Author: xuchuanyin 
Authored: Wed Jul 11 17:01:21 2018 +0800
Committer: Jacky Li 
Committed: Fri Jul 13 15:04:53 2018 +0800

--
 .../carbondata/core/datamap/TableDataMap.java   |  2 +-
 .../core/datamap/dev/DataMapFactory.java|  2 +-
 .../blockletindex/BlockletDataMapFactory.java   |  2 +-
 .../bloom/BloomCoarseGrainDataMapFactory.java   |  6 +++---
 .../examples/MinMaxIndexDataMapFactory.java |  2 +-
 .../lucene/LuceneDataMapFactoryBase.java| 11 ++-
 .../testsuite/datamap/CGDataMapTestCase.scala   |  2 +-
 .../testsuite/datamap/DataMapWriterSuite.scala  |  2 +-
 .../testsuite/datamap/FGDataMapTestCase.scala   |  2 +-
 .../testsuite/datamap/TestDataMapStatus.scala   |  2 +-
 .../spark/rdd/CarbonDataRDDFactory.scala| 20 +++-
 11 files changed, 36 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1fd37039/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
index 89a4c86..f6da73e 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
@@ -210,7 +210,7 @@ public final class TableDataMap extends 
OperationEventListener {
   /**
* delete only the datamaps of the segments
*/
-  public void deleteDatamapData(List segments) {
+  public void deleteDatamapData(List segments) throws IOException {
 for (Segment segment: segments) {
   dataMapFactory.deleteDatamapData(segment);
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1fd37039/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index b115462..0889f8b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -113,7 +113,7 @@ public abstract class DataMapFactory {
   /**
* delete datamap data in the specified segment
*/
-  public abstract void deleteDatamapData(Segment segment);
+  public abstract void deleteDatamapData(Segment segment) throws IOException;
 
   /**
* delete datamap data if any

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1fd37039/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index 175a2a4..643cc45 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -331,7 +331,7 @@ public class BlockletDataMapFactory extends 
CoarseGrainDataMapFactory
   }
 
   @Override
-  public void deleteDatamapData(Segment segment) {
+  public void deleteDatamapData(Segment segment) throws IOException {
 
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1fd37039/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
--
diff --git 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 68cf45c..6183077 100644
--- 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -348,7 +348,7 @@ public cla

[31/50] [abbrv] carbondata git commit: [CARBONDATA-2698][CARBONDATA-2700][CARBONDATA-2732][BloomDataMap] block some operations of bloomfilter datamap

2018-07-17 Thread jackylk
[CARBONDATA-2698][CARBONDATA-2700][CARBONDATA-2732][BloomDataMap] block some 
operations of bloomfilter datamap

1.Block create bloomfilter datamap index on column which its datatype is 
complex type;
2.Block change datatype for bloomfilter index datamap;
3.Block dropping index columns for bloomfilter index datamap

This closes #2505


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1c4358e8
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1c4358e8
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1c4358e8

Branch: refs/heads/carbonstore
Commit: 1c4358e89f5cba1132e9512107d3a0cb22087b7b
Parents: 8e78957
Author: Sssan520 
Authored: Mon Jul 16 10:59:43 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 16:34:14 2018 +0800

--
 .../core/datamap/dev/DataMapFactory.java| 13 +++
 .../core/metadata/schema/table/CarbonTable.java | 13 ++-
 .../bloom/BloomCoarseGrainDataMapFactory.java   | 37 +++-
 .../datamap/CarbonCreateDataMapCommand.scala| 10 ++
 .../CarbonAlterTableDataTypeChangeCommand.scala |  3 +-
 .../CarbonAlterTableDropColumnCommand.scala |  3 +-
 .../bloom/BloomCoarseGrainDataMapSuite.scala| 99 
 7 files changed, 171 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index 0889f8b..ab0f8ea 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -144,4 +144,17 @@ public abstract class DataMapFactory {
 }
   }
 
+  /**
+   * whether to block operation on corresponding table or column.
+   * For example, bloomfilter datamap will block changing datatype for 
bloomindex column.
+   * By default it will not block any operation.
+   *
+   * @param operation table operation
+   * @param targets objects which the operation impact on
+   * @return true the operation will be blocked;false the operation will not 
be blocked
+   */
+  public boolean isOperationBlocked(TableOperation operation, Object... 
targets) {
+return false;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 71256d4..995f943 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -1054,11 +1054,12 @@ public class CarbonTable implements Serializable {
   /**
* methods returns true if operation is allowed for the corresponding 
datamap or not
* if this operation makes datamap stale it is not allowed
-   * @param carbonTable
-   * @param operation
-   * @return
+   * @param carbonTable carbontable to be operated
+   * @param operation which operation on the table,such as drop column,change 
datatype.
+   * @param targets objects which the operation impact on,such as column
+   * @return true allow;false not allow
*/
-  public boolean canAllow(CarbonTable carbonTable, TableOperation operation) {
+  public boolean canAllow(CarbonTable carbonTable, TableOperation operation, 
Object... targets) {
 try {
   List datamaps = 
DataMapStoreManager.getInstance().getAllDataMap(carbonTable);
   if (!datamaps.isEmpty()) {
@@ -1069,6 +1070,10 @@ public class CarbonTable implements Serializable {
   if (factoryClass.willBecomeStale(operation)) {
 return false;
   }
+  // check whether the operation is blocked for datamap
+  if (factoryClass.isOperationBlocked(operation, targets)) {
+return false;
+  }
 }
   }
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c4358e8/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
--
diff --git 
a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
 
b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index

[40/50] [abbrv] carbondata git commit: [CARBONDATA-2690][CarbonStore] implement RESTful API: create table, load data and select

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4437920a/store/core/src/main/java/org/apache/carbondata/store/rpc/impl/StoreServiceImpl.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/impl/StoreServiceImpl.java
 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/impl/StoreServiceImpl.java
new file mode 100644
index 000..ac3b199
--- /dev/null
+++ 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/impl/StoreServiceImpl.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store.rpc.impl;
+
+import java.io.IOException;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.store.rpc.StoreService;
+import org.apache.carbondata.store.rpc.model.BaseResponse;
+import org.apache.carbondata.store.rpc.model.LoadDataRequest;
+import org.apache.carbondata.store.rpc.model.QueryRequest;
+import org.apache.carbondata.store.rpc.model.QueryResponse;
+import org.apache.carbondata.store.rpc.model.ShutdownRequest;
+import org.apache.carbondata.store.rpc.model.ShutdownResponse;
+import org.apache.carbondata.store.worker.Worker;
+
+import org.apache.hadoop.ipc.ProtocolSignature;
+
+@InterfaceAudience.Internal
+public class StoreServiceImpl implements StoreService {
+
+  private Worker worker;
+  RequestHandler handler;
+
+  public StoreServiceImpl(Worker worker) {
+this.worker = worker;
+this.handler = new RequestHandler(worker.getConf(), 
worker.getHadoopConf());
+  }
+
+  @Override
+  public BaseResponse loadData(LoadDataRequest request) {
+return handler.handleLoadData(request);
+  }
+
+  @Override
+  public QueryResponse query(QueryRequest request) {
+return handler.handleSearch(request);
+  }
+
+  @Override
+  public ShutdownResponse shutdown(ShutdownRequest request) {
+return handler.handleShutdown(request);
+  }
+
+  @Override
+  public long getProtocolVersion(String protocol, long clientVersion) throws 
IOException {
+return versionID;
+  }
+
+  @Override
+  public ProtocolSignature getProtocolSignature(String protocol, long 
clientVersion,
+  int clientMethodsHash) throws IOException {
+return null;
+  }
+
+  public Worker getWorker() {
+return worker;
+  }
+
+  public void setWorker(Worker worker) {
+this.worker = worker;
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4437920a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
--
diff --git 
a/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
new file mode 100644
index 000..d826b32
--- /dev/null
+++ 
b/store/core/src/main/java/org/apache/carbondata/store/rpc/model/BaseResponse.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store.rpc.model;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+
+import org.apache.hadoop.io.Writable;
+
+@InterfaceAudience.Internal
+public class BaseResponse implements Serializable, Writable {
+  private int status;
+  private String message;
+
+  public Ba

[32/50] [abbrv] carbondata git commit: [HotFix] Getting carbon table identifier to datamap events

2018-07-17 Thread jackylk
[HotFix] Getting carbon table identifier to datamap events

Passing the table identifier to keep track of table in event in case preload 
and postload of datamap event.

This closes #2448


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/aec47e06
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/aec47e06
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/aec47e06

Branch: refs/heads/carbonstore
Commit: aec47e06ff57dbfe6180f7ba2574700ac07ae8f1
Parents: 1c4358e
Author: Jatin 
Authored: Wed Jul 4 19:53:48 2018 +0530
Committer: kunal642 
Committed: Tue Jul 17 14:49:16 2018 +0530

--
 .../org/apache/carbondata/events/DataMapEvents.scala   | 13 +
 .../command/datamap/CarbonCreateDataMapCommand.scala   | 12 
 .../command/datamap/CarbonDataMapRebuildCommand.scala  |  8 ++--
 .../command/datamap/CarbonDropDataMapCommand.scala |  4 ++--
 4 files changed, 25 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/aec47e06/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
index 8fb374f..72c980c 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
@@ -18,6 +18,7 @@
 package org.apache.carbondata.events
 
 import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.TableIdentifier
 
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 
@@ -26,14 +27,16 @@ import 
org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
  * example: bloom datamap, Lucene datamap
  */
 case class CreateDataMapPostExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's before start of update index datmap status over 
table with index datamap
  * example: bloom datamap, Lucene datamap
  */
 case class UpdateDataMapPreExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's after finish of  update index datmap status over 
table with index
@@ -41,7 +44,8 @@ case class UpdateDataMapPreExecutionEvent(sparkSession: 
SparkSession,
  * example: bloom datamap, Lucene datamap
  */
 case class UpdateDataMapPostExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 
 /**
  * For handling operation's before start of index build over table with index 
datamap
@@ -64,5 +68,6 @@ case class BuildDataMapPostExecutionEvent(sparkSession: 
SparkSession,
  * example: bloom datamap, Lucene datamap
  */
 case class CreateDataMapPreExecutionEvent(sparkSession: SparkSession,
-storePath: String) extends Event with CreateDataMapEventsInfo
+storePath: String, tableIdentifier: TableIdentifier)
+  extends Event with CreateDataMapEventsInfo
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/aec47e06/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 33dba28..7600160 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -125,13 +125,15 @@ case class CarbonCreateDataMapCommand(
 val operationContext: OperationContext = new OperationContext()
 val systemFolderLocation: String = 
CarbonProperties.getInstance().getSystemFolderLocation
 val createDataMapPreExecutionEvent: CreateDataMapPreExecutionEvent =
-  new CreateDataMapPreExecutionEvent(sparkSession, 
systemFolderLocation)
+  new CreateDataMapPreExecutionEvent(sparkSession,
+   

[39/50] [abbrv] carbondata git commit: [CARBONDATA-2690][CarbonStore] implement RESTful API: create table, load data and select

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4437920a/store/horizon/src/main/java/org/apache/carbondata/horizon/antlr/gen/ExpressionLexer.java
--
diff --git 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/antlr/gen/ExpressionLexer.java
 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/antlr/gen/ExpressionLexer.java
new file mode 100644
index 000..e32ff07
--- /dev/null
+++ 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/antlr/gen/ExpressionLexer.java
@@ -0,0 +1,228 @@
+// Generated from 
/home/david/Documents/code/carbondata/store/horizon/src/main/anltr/Expression.g4
 by ANTLR 4.7
+package org.apache.carbondata.horizon.antlr.gen;
+import org.antlr.v4.runtime.Lexer;
+import org.antlr.v4.runtime.CharStream;
+import org.antlr.v4.runtime.Token;
+import org.antlr.v4.runtime.TokenStream;
+import org.antlr.v4.runtime.*;
+import org.antlr.v4.runtime.atn.*;
+import org.antlr.v4.runtime.dfa.DFA;
+import org.antlr.v4.runtime.misc.*;
+
+@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
+public class ExpressionLexer extends Lexer {
+   static { RuntimeMetaData.checkVersion("4.7", RuntimeMetaData.VERSION); }
+
+   protected static final DFA[] _decisionToDFA;
+   protected static final PredictionContextCache _sharedContextCache =
+   new PredictionContextCache();
+   public static final int
+   T__0=1, T__1=2, T__2=3, T__3=4, AND=5, BETWEEN=6, FALSE=7, 
IN=8, IS=9, 
+   NOT=10, NULL=11, OR=12, TRUE=13, EQ=14, NEQ=15, LT=16, LTE=17, 
GT=18, 
+   GTE=19, MINUS=20, STRING=21, BIGINT_LITERAL=22, 
SMALLINT_LITERAL=23, TINYINT_LITERAL=24, 
+   INTEGER_VALUE=25, DECIMAL_VALUE=26, DOUBLE_LITERAL=27, 
BIGDECIMAL_LITERAL=28, 
+   IDENTIFIER=29, BACKQUOTED_IDENTIFIER=30, WS=31, UNRECOGNIZED=32;
+   public static String[] channelNames = {
+   "DEFAULT_TOKEN_CHANNEL", "HIDDEN"
+   };
+
+   public static String[] modeNames = {
+   "DEFAULT_MODE"
+   };
+
+   public static final String[] ruleNames = {
+   "T__0", "T__1", "T__2", "T__3", "AND", "BETWEEN", "FALSE", 
"IN", "IS", 
+   "NOT", "NULL", "OR", "TRUE", "EQ", "NEQ", "LT", "LTE", "GT", 
"GTE", "MINUS", 
+   "STRING", "BIGINT_LITERAL", "SMALLINT_LITERAL", 
"TINYINT_LITERAL", "INTEGER_VALUE", 
+   "DECIMAL_VALUE", "DOUBLE_LITERAL", "BIGDECIMAL_LITERAL", 
"IDENTIFIER", 
+   "BACKQUOTED_IDENTIFIER", "DECIMAL_DIGITS", "EXPONENT", "DIGIT", 
"LETTER", 
+   "WS", "UNRECOGNIZED"
+   };
+
+   private static final String[] _LITERAL_NAMES = {
+   null, "'('", "')'", "','", "'.'", "'AND'", "'BETWEEN'", 
"'FALSE'", "'IN'", 
+   "'IS'", "'NOT'", "'NULL'", "'OR'", "'TRUE'", "'='", null, 
"'<'", "'<='", 
+   "'>'", "'>='", "'-'"
+   };
+   private static final String[] _SYMBOLIC_NAMES = {
+   null, null, null, null, null, "AND", "BETWEEN", "FALSE", "IN", 
"IS", "NOT", 
+   "NULL", "OR", "TRUE", "EQ", "NEQ", "LT", "LTE", "GT", "GTE", 
"MINUS", 
+   "STRING", "BIGINT_LITERAL", "SMALLINT_LITERAL", 
"TINYINT_LITERAL", "INTEGER_VALUE", 
+   "DECIMAL_VALUE", "DOUBLE_LITERAL", "BIGDECIMAL_LITERAL", 
"IDENTIFIER", 
+   "BACKQUOTED_IDENTIFIER", "WS", "UNRECOGNIZED"
+   };
+   public static final Vocabulary VOCABULARY = new 
VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
+
+   /**
+* @deprecated Use {@link #VOCABULARY} instead.
+*/
+   @Deprecated
+   public static final String[] tokenNames;
+   static {
+   tokenNames = new String[_SYMBOLIC_NAMES.length];
+   for (int i = 0; i < tokenNames.length; i++) {
+   tokenNames[i] = VOCABULARY.getLiteralName(i);
+   if (tokenNames[i] == null) {
+   tokenNames[i] = VOCABULARY.getSymbolicName(i);
+   }
+
+   if (tokenNames[i] == null) {
+   tokenNames[i] = "";
+   }
+   }
+   }
+
+   @Override
+   @Deprecated
+   public String[] getTokenNames() {
+   return tokenNames;
+   }
+
+   @Override
+
+   public Vocabulary getVocabulary() {
+   return VOCABULARY;
+   }
+
+
+   public ExpressionLexer(CharStream input) {
+   super(input);
+   _interp = new 
LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);
+   }
+
+   @Override
+   public String getGrammarFileName() { return "Expression.g4"; }
+
+   @Override
+   public String[] getRuleNames() { return ruleNames; }
+
+   @Override
+   public String getSerializedATN() { return _serializedATN; }
+
+   @Override
+   public

[10/50] [abbrv] carbondata git commit: [CARBONDATA-2720] Remove dead code

2018-07-17 Thread jackylk
[CARBONDATA-2720] Remove dead code

For acturate coverage results and easy maintainance

This closes #2354


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f9114036
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f9114036
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f9114036

Branch: refs/heads/carbonstore
Commit: f911403673fdae89c8537293ed55ef515b1362ef
Parents: d267c40
Author: sraghunandan 
Authored: Thu Jul 12 12:23:24 2018 +0530
Committer: Venkata Ramana G 
Committed: Thu Jul 12 12:23:24 2018 +0530

--
 .../sql/MalformedCarbonCommandException.java|   8 -
 .../common/logging/LogServiceFactory.java   |   5 -
 core/pom.xml|   1 +
 .../carbondata/core/cache/CacheProvider.java|   4 -
 .../apache/carbondata/core/cache/CacheType.java |  16 +-
 .../AbstractColumnDictionaryInfo.java   |  18 -
 .../cache/dictionary/ColumnDictionaryInfo.java  |  13 -
 .../dictionary/ColumnReverseDictionaryInfo.java |  18 +
 .../dictionary/DictionaryBuilderException.java  |  18 -
 .../dictionary/ManageDictionaryAndBTree.java|  48 --
 .../BlockletLevelDeleteDeltaDataCache.java  |  56 ---
 .../core/constants/CarbonCommonConstants.java   |   2 -
 .../core/datamap/DataMapProvider.java   |  10 +-
 .../datamap/DistributableDataMapFormat.java |  26 -
 .../dev/expr/DataMapDistributableWrapper.java   |   4 -
 .../core/datastore/SegmentTaskIndexStore.java   | 481 ---
 .../datastore/TableSegmentUniqueIdentifier.java |  53 --
 .../carbondata/core/datastore/TableSpec.java|  42 +-
 .../core/datastore/block/AbstractIndex.java |   5 -
 .../core/datastore/block/SegmentProperties.java | 224 +
 .../core/datastore/block/SegmentTaskIndex.java  |  53 --
 .../block/SegmentTaskIndexWrapper.java  | 129 -
 .../datastore/chunk/DimensionColumnPage.java|  16 +-
 .../impl/FixedLengthDimensionColumnPage.java|  16 +-
 .../impl/VariableLengthDimensionColumnPage.java |  16 +-
 .../chunk/store/ColumnPageWrapper.java  |  12 +-
 .../datastore/columnar/ColumnGroupModel.java|  88 
 .../datastore/columnar/ColumnWithRowId.java |  14 -
 .../core/datastore/page/EncodedTablePage.java   |  12 -
 .../page/encoding/DefaultEncodingFactory.java   |   7 +-
 .../adaptive/AdaptiveFloatingCodec.java |   6 -
 .../dimension/legacy/IndexStorageEncoder.java   |   4 -
 .../core/datastore/page/key/TablePageKey.java   |  16 -
 .../core/datastore/row/CarbonRow.java   |   8 -
 .../devapi/DictionaryGenerationException.java   |  52 --
 .../core/devapi/GeneratingBiDictionary.java |  48 --
 .../dictionary/client/DictionaryClient.java |   6 +-
 .../generator/TableDictionaryGenerator.java |   4 -
 .../generator/key/DictionaryMessage.java|   5 -
 .../dictionary/server/DictionaryServer.java |  18 +-
 .../service/DictionaryServiceProvider.java  |   2 +-
 .../InvalidConfigurationException.java  |  29 --
 .../core/indexstore/schema/CarbonRowSchema.java |   4 +-
 .../core/keygenerator/KeyGenerator.java |   7 -
 .../DirectDictionaryKeyGeneratorFactory.java|   1 -
 .../mdkey/AbstractKeyGenerator.java |  30 --
 .../keygenerator/mdkey/NumberCompressor.java|  34 --
 .../carbondata/core/locks/CarbonLockUtil.java   |   4 +-
 .../carbondata/core/locks/ZookeeperInit.java|   4 -
 .../core/memory/HeapMemoryAllocator.java|   1 -
 .../carbondata/core/memory/MemoryBlock.java |   2 +-
 .../carbondata/core/memory/MemoryLocation.java  |   9 -
 .../compressor/ChunkCompressorMeta.java |  34 --
 .../ThriftWrapperSchemaConverterImpl.java   |   6 +-
 .../datatype/DecimalConverterFactory.java   |  14 -
 .../core/metadata/schema/table/CarbonTable.java |  37 +-
 .../schema/table/TableSchemaBuilder.java|   1 -
 .../schema/table/column/CarbonColumn.java   |  13 -
 .../schema/table/column/CarbonDimension.java|  21 +-
 .../table/column/CarbonImplicitDimension.java   |   9 +-
 .../schema/table/column/ColumnSchema.java   |  33 --
 .../core/mutate/CarbonUpdateUtil.java   |  11 +-
 .../mutate/data/DeleteDeltaCacheLoaderIntf.java |  27 --
 .../core/readcommitter/ReadCommittedScope.java  |  15 +-
 .../scan/collector/ResultCollectorFactory.java  |   3 +
 .../exception/QueryExecutionException.java  |  46 --
 .../executor/impl/AbstractQueryExecutor.java|  19 +-
 .../scan/executor/infos/BlockExecutionInfo.java |  20 -
 .../scan/executor/infos/KeyStructureInfo.java   |  98 
 .../core/scan/executor/util/QueryUtil.java  | 176 +--
 .../exception/FilterIllegalMemberException.java |  29 --
 .../exception/FilterUnsupportedException.java   |  20 -
 .../expression/logical/FalseExpression.java |   5 +-
 .../expression/logical/RangeExpression.java |   5 +-
 .../scan/

[19/50] [abbrv] carbondata git commit: [CARBONDATA-2717] fixed table id empty problem while taking drop lock

2018-07-17 Thread jackylk
[CARBONDATA-2717] fixed table id empty problem while taking drop lock

This closes #2472


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/637a9746
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/637a9746
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/637a9746

Branch: refs/heads/carbonstore
Commit: 637a97469c1917a8554606eba138a7bb3fdeaa9c
Parents: 98c7581
Author: kunal642 
Authored: Tue Jul 10 14:38:05 2018 +0530
Committer: Venkata Ramana G 
Committed: Fri Jul 13 17:11:27 2018 +0530

--
 .../carbondata/core/locks/CarbonLockFactory.java   |  4 
 .../apache/carbondata/core/locks/CarbonLockUtil.java   | 13 +++--
 .../carbondata/hadoop/api/CarbonFileInputFormat.java   |  4 +---
 .../main/scala/org/apache/spark/sql/CarbonEnv.scala|  2 +-
 .../command/schema/CarbonGetTableDetailCommand.scala   | 12 
 .../command/table/CarbonCreateTableCommand.scala   |  3 ++-
 .../command/table/CarbonDropTableCommand.scala |  8 
 .../apache/spark/sql/hive/CarbonFileMetastore.scala|  3 ++-
 .../org/apache/spark/sql/hive/CarbonSessionState.scala |  3 ++-
 9 files changed, 27 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/637a9746/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java 
b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
index 769e752..91677a6 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockFactory.java
@@ -60,10 +60,6 @@ public class CarbonLockFactory {
 if (lockPath.isEmpty()) {
   absoluteLockPath = absoluteTableIdentifier.getTablePath();
 } else {
-  if (absoluteTableIdentifier
-  .getCarbonTableIdentifier().getTableId().isEmpty()) {
-throw new RuntimeException("Table id is empty");
-  }
   absoluteLockPath =
   
getLockpath(absoluteTableIdentifier.getCarbonTableIdentifier().getTableId());
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/637a9746/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java 
b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
index 4d67faf..ca6cddb 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java
@@ -19,6 +19,7 @@ package org.apache.carbondata.core.locks;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -27,6 +28,8 @@ import 
org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * This class contains all carbon lock utilities
  */
@@ -121,8 +124,14 @@ public class CarbonLockUtil {
 final long segmentLockFilesPreservTime =
 CarbonProperties.getInstance().getSegmentLockFilesPreserveHours();
 AbsoluteTableIdentifier absoluteTableIdentifier = 
carbonTable.getAbsoluteTableIdentifier();
-String lockFilesDir = CarbonTablePath
-.getLockFilesDirPath(absoluteTableIdentifier.getTablePath());
+String lockFilesDir = CarbonProperties.getInstance()
+.getProperty(CarbonCommonConstants.LOCK_PATH, "");
+if (StringUtils.isEmpty(lockFilesDir)) {
+  lockFilesDir = 
CarbonTablePath.getLockFilesDirPath(absoluteTableIdentifier.getTablePath());
+} else {
+  lockFilesDir = CarbonTablePath.getLockFilesDirPath(
+  
CarbonLockFactory.getLockpath(carbonTable.getTableInfo().getFactTable().getTableId()));
+}
 CarbonFile[] files = FileFactory.getCarbonFile(lockFilesDir)
 .listFiles(new CarbonFileFilter() {
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/637a9746/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
--
diff --git 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFor

[38/50] [abbrv] carbondata git commit: [CARBONDATA-2690][CarbonStore] implement RESTful API: create table, load data and select

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4437920a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/LoadRequest.java
--
diff --git 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/LoadRequest.java
 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/LoadRequest.java
new file mode 100644
index 000..a3e9f1c
--- /dev/null
+++ 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/LoadRequest.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.horizon.rest.model.view;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.carbondata.horizon.rest.model.descriptor.LoadDescriptor;
+
+public class LoadRequest {
+
+  private String databaseName;
+  private String tableName;
+  private String inputPath;
+  private Map options;
+  private boolean isOverwrite;
+
+  public LoadRequest() {
+  }
+
+  public LoadRequest(String databaseName, String tableName, String inputPaths,
+  Map options, boolean isOverwrite) {
+this.databaseName = databaseName;
+this.tableName = tableName;
+this.inputPath = inputPaths;
+this.options = options;
+this.isOverwrite = isOverwrite;
+  }
+
+  public String getDatabaseName() {
+return databaseName;
+  }
+
+  public void setDatabaseName(String databaseName) {
+this.databaseName = databaseName;
+  }
+
+  public String getTableName() {
+return tableName;
+  }
+
+  public void setTableName(String tableName) {
+this.tableName = tableName;
+  }
+
+  public String getInputPath() {
+return inputPath;
+  }
+
+  public void setInputPath(String inputPath) {
+this.inputPath = inputPath;
+  }
+
+  public Map getOptions() {
+return options;
+  }
+
+  public void setOptions(Map options) {
+this.options = options;
+  }
+
+  public boolean isOverwrite() {
+return isOverwrite;
+  }
+
+  public void setOverwrite(boolean overwrite) {
+isOverwrite = overwrite;
+  }
+
+  public LoadDescriptor convertToDto() {
+return new LoadDescriptor(databaseName, tableName, inputPath, options, 
isOverwrite);
+  }
+
+  public static class Builder {
+private LoadRequest load;
+private Map options;
+
+private Builder() {
+  load = new LoadRequest();
+  options = new HashMap<>();
+}
+
+public Builder databaseName(String databaseName) {
+  load.setDatabaseName(databaseName);
+  return this;
+}
+
+public Builder tableName(String tableName) {
+  load.setTableName(tableName);
+  return this;
+}
+
+public Builder overwrite(boolean isOverwrite) {
+  load.setOverwrite(isOverwrite);
+  return this;
+}
+
+public Builder inputPath(String inputPath) {
+  load.setInputPath(inputPath);
+  return this;
+}
+
+public Builder options(String key, String value) {
+  options.put(key, value);
+  return this;
+}
+
+public LoadRequest create() {
+  load.setOptions(options);
+  return load;
+}
+  }
+
+  public static Builder builder() {
+return new Builder();
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4437920a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/SelectRequest.java
--
diff --git 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/SelectRequest.java
 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/SelectRequest.java
new file mode 100644
index 000..3d5b3df
--- /dev/null
+++ 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/view/SelectRequest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a 

[28/50] [abbrv] carbondata git commit: [CARBONDATA-2714] Support merge index files for the segment

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/73419071/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
--
diff --git 
a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
 
b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
index ae99800..d34f7a2 100644
--- 
a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
+++ 
b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
@@ -32,12 +32,9 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.metadata.CarbonMetadata
-import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus}
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.core.writer.CarbonIndexFileMergeWriter
 
 case class FileElement(school: Array[String], age: Integer)
 case class StreamData(id: Integer, name: String, city: String, salary: 
java.lang.Float,
@@ -411,53 +408,6 @@ class TestStreamingTableWithRowParser extends QueryTest 
with BeforeAndAfterAll {
 }
 
   }
-  test("query on stream table with dictionary, sort_columns, with merge index 
applied") {
-executeStreamingIngest(
-  tableName = "stream_table_with_mi",
-  batchNums = 2,
-  rowNumsEachBatch = 25,
-  intervalOfSource = 5,
-  intervalOfIngest = 5,
-  continueSeconds = 20,
-  generateBadRecords = true,
-  badRecordAction = "force",
-  autoHandoff = false
-)
-val carbonTable: CarbonTable = CarbonMetadata.getInstance
-  .getCarbonTable("streaming1", "stream_table_with_mi")
-new CarbonIndexFileMergeWriter(carbonTable)
-  .mergeCarbonIndexFilesOfSegment("1", carbonTable.getTablePath, false, 
String.valueOf(System.currentTimeMillis()))
-// non-filter
-val result = sql("select * from streaming1.stream_table_with_mi order by 
id, name").collect()
-assert(result != null)
-assert(result.length == 55)
-// check one row of streaming data
-assert(result(1).isNullAt(0))
-assert(result(1).getString(1) == "name_6")
-// check one row of batch loading
-assert(result(50).getInt(0) == 10001)
-assert(result(50).getString(1) == "batch_1")
-
-// filter
-checkAnswer(
-  sql("select * from stream_table_with_mi where id = 1"),
-  Seq(Row(1, "name_1", "city_1", 1.0, BigDecimal.valueOf(0.01), 80.01, 
Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"
-
-checkAnswer(
-  sql("select * from stream_table_with_mi where id > 49 and id < 
10002"),
-  Seq(Row(50, "name_50", "city_50", 50.0, BigDecimal.valueOf(0.01), 
80.01, Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0")),
-Row(10001, "batch_1", "city_1", 0.1, BigDecimal.valueOf(0.01), 
80.01, Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"
-
-checkAnswer(
-  sql("select * from stream_table_with_mi where id between 50 and 
10001"),
-  Seq(Row(50, "name_50", "city_50", 50.0, BigDecimal.valueOf(0.01), 
80.01, Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0")),
-Row(10001, "batch_1", "city_1", 0.1, BigDecimal.valueOf(0.01), 
80.01, Date.valueOf("1990-01-01"), Timestamp.valueOf("2010-01-01 10:01:01.0"), 
Timestamp.valueOf("2010-01-01 10:01:01.0"
-
-checkAnswer(
-  sql("select * from stream_table_with_mi where name in 
('name_9','name_10', 'name_11', 'name_12') and id <> 10 and id not in (11, 
12)"),
-  Seq(Row(9, "name_9", "city_9", 9.0, BigDecimal.valueOf(0.04), 80.04, 
Date.valueOf("1990-01-04"), Timestamp.valueOf("2010-01-04 10:01:01.0"), 
Timestamp.valueOf("2010-01-04 10:01:01.0"
-
-  }
 
   test("query on stream table with dictionary, sort_columns and complex 
column") {
 executeStreamingIngest(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/73419071/integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala
--
diff --git 
a/integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala
 
b/integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala
index c6c647c..7ef86a5 100644
--- 
a/integration/spark2/src/test/scala/org/a

[50/50] [abbrv] carbondata git commit: [REBASE] Rebasing with master branch and Fixing rebase conflict

2018-07-17 Thread jackylk
[REBASE] Rebasing with master branch and Fixing rebase conflict


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/239a6cad
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/239a6cad
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/239a6cad

Branch: refs/heads/carbonstore
Commit: 239a6cadb14dfb1dc5dbfcc58f69c5675cbcbbf2
Parents: d4a28a2
Author: Jacky Li 
Authored: Wed Jul 18 10:14:43 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:14:54 2018 +0800

--
 .../carbondata/core/metadata/schema/table/TableSchemaBuilder.java | 3 ++-
 .../main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala| 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/239a6cad/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index 80f0aa5..8edf73b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -139,7 +139,8 @@ public class TableSchemaBuilder {
   String localdictionaryThreshold = 
localDictionaryThreshold.equalsIgnoreCase("0") ?
   CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD_DEFAULT :
   localDictionaryThreshold;
-  tableProperties.put(CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD, 
localdictionaryThreshold);
+  tableProperties.put(
+  CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD, 
localdictionaryThreshold);
   for (int index = 0; index < allColumns.size(); index++) {
 ColumnSchema colSchema = allColumns.get(index);
 if (colSchema.getDataType() == DataTypes.STRING

http://git-wip-us.apache.org/repos/asf/carbondata/blob/239a6cad/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala
index 29bcbaa..47b5c8c 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSessionBuilder.scala
@@ -20,8 +20,8 @@ package org.apache.spark.sql
 import java.io.File
 
 import org.apache.hadoop.conf.Configuration
-import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.sql.SparkSession.Builder
 import org.apache.spark.sql.profiler.Profiler
 import org.apache.spark.util.Utils



[17/50] [abbrv] carbondata git commit: [CARBONDATA-2648] Fixed NPE issue with legacy store when CACHE_LEVEL is Blocklet

2018-07-17 Thread jackylk
[CARBONDATA-2648] Fixed NPE issue with legacy store when CACHE_LEVEL is Blocklet

Things done as part of this PR:

Fixed Null pointer exception when store is of <= 1.1 version and DataMap is of 
type BlockletDataMap.
Added clearing of SegmentProperties cache holder from executor
Problem 1:
Null pointer exception thrown when store is of <= 1.1 version and DataMap is of 
type BlockletDataMap.

Analysis:
In BlcokletDataMap schema is created to consider blockletInfo while adding to 
unsafe but in case of legacy store we dont weite the blocklet Info. This lead 
to Null pointer exception while calculating the size of row during adding to 
unsafe.

Solution
For legacy store always call super class methods which is BlockDataMap.

This closes #2499


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bd02656a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bd02656a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bd02656a

Branch: refs/heads/carbonstore
Commit: bd02656abc559040111c636c909541c8441b8132
Parents: 1fd3703
Author: manishgupta88 
Authored: Thu Jul 12 21:35:45 2018 +0530
Committer: ravipesala 
Committed: Fri Jul 13 13:58:46 2018 +0530

--
 .../core/datamap/DistributableDataMapFormat.java  |  4 
 .../indexstore/BlockletDataMapIndexStore.java |  9 +
 .../TableBlockIndexUniqueIdentifierWrapper.java   | 18 ++
 .../indexstore/blockletindex/BlockletDataMap.java | 14 +-
 .../core/indexstore/row/DataMapRow.java   |  5 +
 5 files changed, 45 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bd02656a/core/src/main/java/org/apache/carbondata/core/datamap/DistributableDataMapFormat.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DistributableDataMapFormat.java
 
b/core/src/main/java/org/apache/carbondata/core/datamap/DistributableDataMapFormat.java
index 762d89c..007541d 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/DistributableDataMapFormat.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/DistributableDataMapFormat.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.carbondata.core.datamap.dev.DataMap;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapDistributableWrapper;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
+import 
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
@@ -97,6 +98,9 @@ public class DistributableDataMapFormat extends 
FileInputFormathttp://git-wip-us.apache.org/repos/asf/carbondata/blob/bd02656a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
index 33e624d..d84f977 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
@@ -93,7 +93,7 @@ public class BlockletDataMapIndexStore
   carbonDataFileBlockMetaInfoMapping);
   BlockDataMap blockletDataMap =
   loadAndGetDataMap(identifier, indexFileStore, blockMetaInfoMap,
-  identifierWrapper.getCarbonTable());
+  identifierWrapper.getCarbonTable(), 
identifierWrapper.isAddTableBlockToUnsafe());
   dataMaps.add(blockletDataMap);
   blockletDataMapIndexWrapper = new 
BlockletDataMapIndexWrapper(dataMaps);
 } else {
@@ -108,7 +108,8 @@ public class BlockletDataMapIndexStore
 carbonDataFileBlockMetaInfoMapping);
 BlockDataMap blockletDataMap =
 loadAndGetDataMap(blockIndexUniqueIdentifier, indexFileStore, 
blockMetaInfoMap,
-identifierWrapper.getCarbonTable());
+identifierWrapper.getCarbonTable(),
+identifierWrapper.isAddTableBlockToUnsafe());
 dataMaps.add(blockletDataMap);
   }
   blockletDataMapIndexWrapper = new 
BlockletDataMapIndexWrapper(dataMaps);
@@ -251,7 +252,7 @@ public class BlockletDataMapIndexStore
*/
   private BlockDataMap loadAndGetDataMap(TableBlockIndexUniqueIdentifier 
identifier,
   SegmentIndexFileStore indexFileStore, Map 
blockMetaInfoMa

[34/50] [abbrv] carbondata git commit: [CARBONDATA-2724][DataMap]Unsupported create datamap on table with V1 or V2 format data

2018-07-17 Thread jackylk
[CARBONDATA-2724][DataMap]Unsupported create datamap on table with V1 or V2 
format data

block creating datamap on carbon table with V1 or V2 format
Currently the version info is read from carbon data file

This closes #2488


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a1628978
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a1628978
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a1628978

Branch: refs/heads/carbonstore
Commit: a162897862c92947ea8fd63713b7dbe6098f3b13
Parents: 81038f5
Author: ndwangsen 
Authored: Wed Jul 11 17:41:25 2018 +0800
Committer: xuchuanyin 
Committed: Tue Jul 17 23:35:50 2018 +0800

--
 .../apache/carbondata/core/util/CarbonUtil.java | 51 
 .../datamap/CarbonCreateDataMapCommand.scala|  8 ++-
 2 files changed, 58 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a1628978/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 9796696..642fe8e 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -88,6 +88,7 @@ import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.format.BlockletHeader;
 import org.apache.carbondata.format.DataChunk2;
 import org.apache.carbondata.format.DataChunk3;
+import org.apache.carbondata.format.FileHeader;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -3184,4 +3185,54 @@ public final class CarbonUtil {
 }
 return columnLocalDictGenMap;
   }
+
+  /**
+   * This method get the carbon file format version
+   *
+   * @param carbonTable
+   * carbon Table
+   */
+  public static ColumnarFormatVersion getFormatVersion(CarbonTable carbonTable)
+  throws IOException {
+String storePath = null;
+// if the carbontable is support flat folder
+boolean supportFlatFolder = carbonTable.isSupportFlatFolder();
+if (supportFlatFolder) {
+  storePath = carbonTable.getTablePath();
+} else {
+  // get the valid segments
+  SegmentStatusManager segmentStatusManager =
+  new SegmentStatusManager(carbonTable.getAbsoluteTableIdentifier());
+  SegmentStatusManager.ValidAndInvalidSegmentsInfo 
validAndInvalidSegmentsInfo =
+  segmentStatusManager.getValidAndInvalidSegments();
+  List validSegments = 
validAndInvalidSegmentsInfo.getValidSegments();
+  CarbonProperties carbonProperties = CarbonProperties.getInstance();
+  if (validSegments.isEmpty()) {
+return carbonProperties.getFormatVersion();
+  }
+  storePath = 
carbonTable.getSegmentPath(validSegments.get(0).getSegmentNo());
+}
+
+CarbonFile[] carbonFiles = FileFactory
+.getCarbonFile(storePath)
+.listFiles(new CarbonFileFilter() {
+  @Override
+  public boolean accept(CarbonFile file) {
+if (file == null) {
+  return false;
+}
+return file.getName().endsWith("carbondata");
+  }
+});
+if (carbonFiles == null || carbonFiles.length < 1) {
+  return CarbonProperties.getInstance().getFormatVersion();
+}
+
+CarbonFile carbonFile = carbonFiles[0];
+// get the carbon file header
+CarbonHeaderReader headerReader = new 
CarbonHeaderReader(carbonFile.getCanonicalPath());
+FileHeader fileHeader = headerReader.readHeader();
+int version = fileHeader.getVersion();
+return ColumnarFormatVersion.valueOf((short)version);
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a1628978/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 7600160..336793e 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -26,9 +26,10 @@ import 
org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandExcept
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.datamap.{DataMapProvider, 
DataMapStore

[29/50] [abbrv] carbondata git commit: [CARBONDATA-2714] Support merge index files for the segment

2018-07-17 Thread jackylk
[CARBONDATA-2714] Support merge index files for the segment

Problem :
The first-time query of carbon becomes very slow. It is because of reading many 
small carbonindex files and cache to the driver at the first time.
Many carbonindex files are created in below case
Loading data in large cluster
For example, if the cluster size is 100 nodes then for each load 100 index 
files are created per segment. So after 100 loads, the number of carbonindex 
files becomes 1. .
It will be slower to read all the files from the driver since a lot of namenode 
calls and IO operations.
Solution :
Merge the carbonindex files in two levels.so that we can reduce the IO calls to 
namenode and improves the read performance.
Merge within a segment.
Merge the carbonindex files to single file immediately after load completes 
within the segment. It would be named as a .carbonindexmerge file. It is 
actually not a true data merging but a simple file merge. So that the current 
structure of carbonindex files does not change. While reading we just read one 
file instead of many carbonindex files within the segment.

This closes #2482


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/73419071
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/73419071
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/73419071

Branch: refs/heads/carbonstore
Commit: 73419071a308085be73c4a98fda57be241299fba
Parents: 6c5abdd
Author: dhatchayani 
Authored: Tue Jul 10 20:00:41 2018 +0530
Committer: ravipesala 
Committed: Mon Jul 16 17:50:15 2018 +0530

--
 .../core/constants/CarbonCommonConstants.java   |  13 +-
 .../indexstore/BlockletDataMapIndexStore.java   |  12 +-
 .../core/metadata/SegmentFileStore.java |  63 +++-
 .../core/util/BlockletDataMapUtil.java  |   2 +-
 .../sdv/generated/MergeIndexTestCase.scala  |  24 +-
 .../CarbonIndexFileMergeTestCase.scala  | 353 +++
 .../dataload/TestGlobalSortDataLoad.scala   |   4 +-
 .../StandardPartitionTableCleanTestCase.scala   |   2 +-
 .../StandardPartitionTableLoadingTestCase.scala |   5 +
 .../carbondata/events/AlterTableEvents.scala|  11 +
 .../carbondata/spark/util/CommonUtil.scala  |  59 +++-
 .../apache/spark/rdd/CarbonMergeFilesRDD.scala  | 100 ++
 .../scala/org/apache/spark/sql/CarbonEnv.scala  |  15 +-
 .../sql/events/MergeIndexEventListener.scala| 180 ++
 .../CarbonAlterTableCompactionCommand.scala |  23 +-
 .../sql/test/Spark2TestQueryExecutor.scala  |   1 -
 .../partition/TestAlterPartitionTable.scala |  24 +-
 .../TestStreamingTableWithRowParser.scala   |  50 ---
 .../CarbonGetTableDetailComandTestCase.scala|   4 +-
 .../processing/merger/CompactionType.java   |   1 +
 20 files changed, 751 insertions(+), 195 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/73419071/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 3e2843c..e7e074d 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1394,9 +1394,6 @@ public final class CarbonCommonConstants {
   public static final String CARBON_SQLASTBUILDER_CLASSNAME =
   "spark.carbon.sqlastbuilder.classname";
 
-  public static final String CARBON_COMMON_LISTENER_REGISTER_CLASSNAME =
-  "spark.carbon.common.listener.register.classname";
-
   @CarbonProperty
   public static final String CARBON_LEASE_RECOVERY_RETRY_COUNT =
   "carbon.lease.recovery.retry.count";
@@ -1871,6 +1868,16 @@ public final class CarbonCommonConstants {
*/
   public static final String CACHE_LEVEL_DEFAULT_VALUE = "BLOCK";
 
+  /**
+   * It is internal configuration and used only for test purpose.
+   * It will merge the carbon index files with in the segment to single 
segment.
+   */
+  @CarbonProperty
+  public static final String CARBON_MERGE_INDEX_IN_SEGMENT =
+  "carbon.merge.index.in.segment";
+
+  public static final String CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT = "true";
+
   private CarbonCommonConstants() {
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/73419071/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
 
b/core/src/main/java/org/apache/carbondat

[41/50] [abbrv] carbondata git commit: [CARBONDATA-2690][CarbonStore] implement RESTful API: create table, load data and select

2018-07-17 Thread jackylk
[CARBONDATA-2690][CarbonStore] implement RESTful API: create table, load data 
and select

This PR adds:
1.basic framework
rewrite the carbon store's Master, Worker and Scheduler code in Java

2.RESTful API
support create a table by using file meta store
support load data to a table in single work
support select data with a filter

This closes #2440


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4437920a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4437920a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4437920a

Branch: refs/heads/carbonstore
Commit: 4437920a3174921d6397c4f596ff941e2cd0faa0
Parents: d9b40bf
Author: QiangCai 
Authored: Tue Jul 3 20:21:18 2018 +0800
Committer: Jacky Li 
Committed: Wed Jul 18 10:10:15 2018 +0800

--
 .../schema/table/CarbonTableBuilder.java|9 +-
 .../schema/table/TableSchemaBuilder.java|   27 +-
 .../expression/conditional/ListExpression.java  |8 +-
 .../conditional/NotEqualsExpression.java|7 +-
 .../scan/expression/logical/OrExpression.java   |2 +-
 .../expression/logical/RangeExpression.java |2 +-
 .../carbondata/core/scan/model/QueryModel.java  |3 +-
 dev/findbugs-exclude.xml|4 +
 dev/javastyle-suppressions.xml  |2 +
 .../carbondata/hadoop/CarbonRecordReader.java   |1 +
 .../hadoop/util/CarbonInputFormatUtil.java  |   20 +-
 .../preaggregate/TestPreAggregateDrop.scala |3 +-
 .../TestPreAggregateTableSelection.scala|8 +-
 .../carbondata/store/SparkCarbonStore.scala |   40 +-
 pom.xml |6 +
 .../loading/model/CarbonLoadModelBuilder.java   |   16 +-
 .../processing/util/CarbonLoaderUtil.java   |2 +-
 store/conf/log4j.properties |   10 +
 store/conf/store.conf   |   10 +
 store/core/pom.xml  |8 +-
 .../apache/carbondata/store/conf/StoreConf.java |  185 +++
 .../exception/ExecutionTimeoutException.java|   22 +
 .../store/exception/StoreException.java |   29 +
 .../store/exception/WorkerTooBusyException.java |   26 +
 .../apache/carbondata/store/master/Master.java  |  522 
 .../carbondata/store/rpc/QueryService.java  |   33 -
 .../carbondata/store/rpc/RegistryService.java   |4 +-
 .../carbondata/store/rpc/ServiceFactory.java|4 +-
 .../carbondata/store/rpc/StoreService.java  |   40 +
 .../store/rpc/impl/IndexedRecordReader.java |   22 +
 .../store/rpc/impl/QueryServiceImpl.java|   56 -
 .../store/rpc/impl/RegistryServiceImpl.java |4 +-
 .../store/rpc/impl/RequestHandler.java  |  105 +-
 .../store/rpc/impl/StoreServiceImpl.java|   78 ++
 .../store/rpc/model/BaseResponse.java   |   69 ++
 .../store/rpc/model/LoadDataRequest.java|   60 +
 .../store/rpc/model/QueryResponse.java  |   21 +-
 .../store/rpc/model/RegisterWorkerRequest.java  |4 +
 .../carbondata/store/scheduler/Schedulable.java |   74 ++
 .../carbondata/store/scheduler/Scheduler.java   |  136 +++
 .../apache/carbondata/store/util/StoreUtil.java |  132 +++
 .../apache/carbondata/store/worker/Worker.java  |  166 +++
 .../org/apache/carbondata/store/Master.scala|  283 -
 .../org/apache/carbondata/store/Scheduler.scala |  147 ---
 .../org/apache/carbondata/store/Worker.scala|  113 --
 .../carbondata/store/SchedulerSuite.scala   |  155 ---
 store/horizon/pom.xml   |   95 ++
 store/horizon/src/main/anltr/Expression.g4  |  163 +++
 .../horizon/antlr/ANTLRNoCaseStringStream.java  |   38 +
 .../carbondata/horizon/antlr/FilterVisitor.java |  215 
 .../horizon/antlr/gen/Expression.tokens |   51 +
 .../antlr/gen/ExpressionBaseVisitor.java|  168 +++
 .../horizon/antlr/gen/ExpressionLexer.java  |  228 
 .../horizon/antlr/gen/ExpressionLexer.tokens|   51 +
 .../horizon/antlr/gen/ExpressionParser.java | 1117 ++
 .../horizon/antlr/gen/ExpressionVisitor.java|  162 +++
 .../horizon/rest/controller/Horizon.java|   36 +
 .../rest/controller/HorizonController.java  |   92 ++
 .../rest/model/descriptor/LoadDescriptor.java   |   81 ++
 .../rest/model/descriptor/SelectDescriptor.java |   88 ++
 .../rest/model/descriptor/TableDescriptor.java  |   90 ++
 .../rest/model/validate/RequestValidator.java   |   70 ++
 .../rest/model/view/CreateTableRequest.java |  174 +++
 .../horizon/rest/model/view/FieldRequest.java   |  114 ++
 .../horizon/rest/model/view/LoadRequest.java|  132 +++
 .../horizon/rest/model/view/SelectRequest.java  |  130 ++
 .../horizon/rest/model/view/SelectResponse.java |   49 +
 .../horizon/rest/service/HorizonService.java|  162 +++
 .../carbondata/horizon/FilterParseTest.jav

[14/50] [abbrv] carbondata git commit: [CARBONDATA-2719] Block update and delete on table having datamaps

2018-07-17 Thread jackylk
[CARBONDATA-2719] Block update and delete on table having datamaps

Table update/delete is needed to block on table which has datamaps.

This close #2483


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/56e7dad7
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/56e7dad7
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/56e7dad7

Branch: refs/heads/carbonstore
Commit: 56e7dad7b18b6d5946ccdc49c0d264384225d231
Parents: 84102a2
Author: ndwangsen 
Authored: Wed Jul 11 11:52:09 2018 +0800
Committer: xuchuanyin 
Committed: Fri Jul 13 09:50:56 2018 +0800

--
 .../lucene/LuceneFineGrainDataMapSuite.scala|  8 ++--
 .../iud/DeleteCarbonTableTestCase.scala | 44 +++
 .../TestInsertAndOtherCommandConcurrent.scala   | 12 +++--
 .../iud/UpdateCarbonTableTestCase.scala | 46 
 .../spark/sql/hive/CarbonAnalysisRules.scala| 37 +++-
 5 files changed, 138 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/56e7dad7/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index fd55145..657a3eb 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -641,15 +641,15 @@ class LuceneFineGrainDataMapSuite extends QueryTest with 
BeforeAndAfterAll {
 assert(ex4.getMessage.contains("alter table drop column is not supported"))
 
 sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test7 
OPTIONS('header'='false')")
-val ex5 = intercept[MalformedCarbonCommandException] {
+val ex5 = intercept[UnsupportedOperationException] {
   sql("UPDATE datamap_test7 d set(d.city)=('luc') where 
d.name='n10'").show()
 }
-assert(ex5.getMessage.contains("update operation is not supported"))
+assert(ex5.getMessage.contains("Update operation is not supported"))
 
-val ex6 = intercept[MalformedCarbonCommandException] {
+val ex6 = intercept[UnsupportedOperationException] {
   sql("delete from datamap_test7 where name = 'n10'").show()
 }
-assert(ex6.getMessage.contains("delete operation is not supported"))
+assert(ex6.getMessage.contains("Delete operation is not supported"))
   }
 
   test("test lucene fine grain multiple data map on table") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/56e7dad7/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 64aae1d..de93229 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -298,6 +298,50 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
 
   }
 
+  test("block deleting records from table which has preaggregate datamap") {
+sql("drop table if exists test_dm_main")
+sql("drop table if exists test_dm_main_preagg1")
+
+sql("create table test_dm_main (a string, b string, c string) stored by 
'carbondata'")
+sql("insert into test_dm_main select 'aaa','bbb','ccc'")
+sql("insert into test_dm_main select 'bbb','bbb','ccc'")
+sql("insert into test_dm_main select 'ccc','bbb','ccc'")
+
+sql(
+  "create datamap preagg1 on table test_dm_main using 'preaggregate' as 
select" +
+  " a,sum(b) from test_dm_main group by a")
+
+assert(intercept[UnsupportedOperationException] {
+  sql("delete from test_dm_main_preagg1 where test_dm_main_a = 'bbb'")
+}.getMessage.contains("Delete operation is not supported for pre-aggregate 
table"))
+assert(intercept[UnsupportedOperationException] {
+  sql("delete from test_dm_main where a = 'ccc'")
+}.getMessage.contains("Delete operation is not supported for tables which 
have a pre-aggregate table"))
+
+sql("drop table if exist

[22/50] [abbrv] carbondata git commit: [CARBONDATA-2729][file-format] Schema Compatibility problem between version 1.3.0 and 1.4.0

2018-07-17 Thread jackylk
[CARBONDATA-2729][file-format] Schema Compatibility problem between version 
1.3.0 and 1.4.0

Problem:
In TableSchema the field Name schemaEvaluation is changed to schemaEvoluation 
and in DataMapSchema field name className is changed to providerName.
Due to this current Carbon version & Version 1.4.0 is not able to properly 
de-serialize the schema created with older Carbon Version.
Solution
To de-serialize old schema having different field name the multiple values for 
SerializedName annotation can be used.
For refrence please refer the gson PR#699.

This closes #2498


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bc12de00
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bc12de00
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bc12de00

Branch: refs/heads/carbonstore
Commit: bc12de004b654d3f61c836672f3ff857bb3e9a97
Parents: 3df2fd0
Author: mohammadshahidkhan 
Authored: Mon Jul 2 19:51:03 2018 +0530
Committer: Jacky Li 
Committed: Sat Jul 14 14:50:08 2018 +0800

--
 core/pom.xml|   2 +-
 .../metadata/schema/table/DataMapSchema.java|   4 +
 .../core/metadata/schema/table/TableSchema.java |   5 +
 .../metadata/schema/table/TableInfoTest.java| 203 +--
 integration/spark-common/pom.xml|   2 +-
 5 files changed, 193 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bc12de00/core/pom.xml
--
diff --git a/core/pom.xml b/core/pom.xml
index 71fa66c..decb5d0 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -48,7 +48,7 @@
 
   com.google.code.gson
   gson
-  2.3.1
+  2.4
 
 
   org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bc12de00/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
index e373fae..a48b03c 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
@@ -33,6 +33,7 @@ import 
org.apache.carbondata.core.metadata.schema.datamap.DataMapProperty;
 import static 
org.apache.carbondata.core.constants.CarbonCommonConstants.INDEX_COLUMNS;
 
 import com.google.gson.Gson;
+import com.google.gson.annotations.SerializedName;
 import org.apache.commons.lang.StringUtils;
 
 /**
@@ -49,6 +50,9 @@ public class DataMapSchema implements Serializable, Writable {
* 1. Index DataMap: provider name is class name of implementation class of 
DataMapFactory
* 2. OLAP DataMap: provider name is one of the {@link 
DataMapClassProvider#shortName}
*/
+  // the old version the field name for providerName was className, so to 
de-serialization
+  // old schema provided the old field name in the alternate filed using 
annotation
+  @SerializedName(value = "providerName", alternate = "className")
   protected String providerName;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bc12de00/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
index b53a9d8..4425697 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchema.java
@@ -34,6 +34,8 @@ import 
org.apache.carbondata.core.metadata.schema.datamap.DataMapProperty;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.util.CarbonUtil;
 
+import com.google.gson.annotations.SerializedName;
+
 /**
  * Persisting the table information
  */
@@ -62,6 +64,9 @@ public class TableSchema implements Serializable, Writable {
   /**
* History of schema evolution of this table
*/
+  // the old version the field name for schemaEvolution was schemaEvalution, 
so to de-serialization
+  // old schema provided the old field name in the alternate filed using 
annotation
+  @SerializedName(value = "schemaEvolution", alternate = "schemaEvalution")
   private SchemaEvolution schemaEvolution;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bc12de00/core/src/test/java/org/apache/carbond

[42/50] [abbrv] carbondata git commit: [CARBONDATA-2613] Support csv based carbon table

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2009009a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
new file mode 100644
index 000..e7f6c7f
--- /dev/null
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.management
+
+import java.util.UUID
+
+import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+import org.apache.spark.sql.execution.command.AtomicRunnableCommand
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.util.FileUtils
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.mutate.CarbonUpdateUtil
+import org.apache.carbondata.core.statusmanager.{FileFormat, 
LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
+import org.apache.carbondata.core.util.CarbonUtil
+import org.apache.carbondata.core.util.path.CarbonTablePath
+import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
+import 
org.apache.carbondata.processing.loading.events.LoadEvents.LoadMetadataEvent
+import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, 
CarbonLoadModel}
+import org.apache.carbondata.processing.util.CarbonLoaderUtil
+
+/**
+ * support `alter table tableName add segment location 'path'` command.
+ * It will create a segment and map the path of datafile to segment's storage
+ */
+case class CarbonAddSegmentCommand(
+dbNameOp: Option[String],
+tableName: String,
+filePathFromUser: String,
+var operationContext: OperationContext = new OperationContext) extends 
AtomicRunnableCommand {
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+  var carbonTable: CarbonTable = _
+
+  override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
+val dbName = CarbonEnv.getDatabaseName(dbNameOp)(sparkSession)
+carbonTable = {
+  val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+.lookupRelation(Option(dbName), 
tableName)(sparkSession).asInstanceOf[CarbonRelation]
+  if (relation == null) {
+LOGGER.error(s"Add segment failed due to table $dbName.$tableName not 
found")
+throw new NoSuchTableException(dbName, tableName)
+  }
+  relation.carbonTable
+}
+
+if (carbonTable.isHivePartitionTable) {
+  LOGGER.error("Ignore hive partition table for now")
+}
+
+operationContext.setProperty("isOverwrite", false)
+if (CarbonUtil.hasAggregationDataMap(carbonTable)) {
+  val loadMetadataEvent = new LoadMetadataEvent(carbonTable, false)
+  OperationListenerBus.getInstance().fireEvent(loadMetadataEvent, 
operationContext)
+}
+Seq.empty
+  }
+
+  // will just mapping external files to segment metadata
+  override def processData(sparkSession: SparkSession): Seq[Row] = {
+// clean up invalid segment before creating a new entry
+SegmentStatusManager.deleteLoadsAndUpdateMetadata(carbonTable, false, null)
+val currentLoadMetadataDetails = SegmentStatusManager.readLoadMetadata(
+  CarbonTablePath.getMetadataPath(carbonTable.getTablePath))
+val newSegmentId = 
SegmentStatusManager.createNewSegmentId(currentLoadMetadataDetails).toString
+// create new segment folder in carbon store
+CarbonLoaderUtil.checkAndCreateCarbonDataLocation(newSegmentId, 
carbonTable)
+
+val factFilePath = FileUtils.getPaths(filePathFromUser)
+
+val uuid = if (carbonTable.isChildDataMap) {
+  Option(operationContext.getProperty("uuid"))

[15/50] [abbrv] carbondata git commit: [CARBONDATA-2734] Fix struct of date issue in create table

2018-07-17 Thread jackylk
[CARBONDATA-2734] Fix struct of date issue in create table

problem: Struct of date is not supported currently in create table flow
as date datatype check is missing during parsing.
Hence child date column was not appended with parent name, leading to
StringOutOfIndex exception.

solution: Handle the date DataType as a complex child during column
formation.

This closes #2494


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/18381e3d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/18381e3d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/18381e3d

Branch: refs/heads/carbonstore
Commit: 18381e3db164802258b28b794c15d45cbf687f2f
Parents: 56e7dad
Author: ajantha-bhat 
Authored: Wed Jul 11 17:28:45 2018 +0530
Committer: ravipesala 
Committed: Fri Jul 13 11:58:45 2018 +0530

--
 .../testsuite/complexType/TestComplexDataType.scala  | 15 +++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala  |  6 ++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/18381e3d/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 276ed30..1068ba2 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -667,4 +667,19 @@ class TestComplexDataType extends QueryTest with 
BeforeAndAfterAll {
 checkAnswer(sql("select a.b from test where id=3 or 
a.c=3"),Seq(Row(5),Row(2)))
   }
 
+  /* test struct of date*/
+  test("test struct complex type with date") {
+var backupdateFormat = CarbonProperties.getInstance().getProperty(
+  CarbonCommonConstants.CARBON_DATE_FORMAT, 
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
+CarbonProperties.getInstance()
+  .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
+sql("DROP TABLE IF EXISTS test")
+sql("create table test(a struct) stored by 'carbondata'")
+sql("insert into test select '1992-02-19' ")
+checkAnswer(sql("select * from test "), 
Row(Row(java.sql.Date.valueOf("1992-02-19"
+CarbonProperties.getInstance()
+  .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+backupdateFormat)
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/18381e3d/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
 
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 0a0b49f..44adff3 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -1254,6 +1254,10 @@ abstract class CarbonDDLSqlParser extends 
AbstractCarbonSparkSQLParser {
 Field(field.column, Some("Timestamp"), field.name, Some(null),
   field.parent, field.storeType, field.schemaOrdinal,
   field.precision, field.scale, field.rawSchema, field.columnComment)
+  case "date" =>
+Field(field.column, Some("Date"), field.name, Some(null),
+  field.parent, field.storeType, field.schemaOrdinal,
+  field.precision, field.scale, field.rawSchema, field.columnComment)
   case "numeric" => Field(field.column, Some("Numeric"), field.name, 
Some(null), field.parent,
 field.storeType, field.schemaOrdinal, field.precision, field.scale, 
field.rawSchema,
 field.columnComment)
@@ -1326,6 +1330,8 @@ abstract class CarbonDDLSqlParser extends 
AbstractCarbonSparkSQLParser {
 Some(parentName + "." + field.name.getOrElse(None)), Some(null), 
parentName)
   case "Timestamp" => Field(parentName + "." + field.column, 
Some("Timestamp"),
 Some(parentName + "." + field.name.getOrElse(None)), Some(null), 
parentName)
+  case "Date" => Field(parentName + "." + field.column, Some("Date"),
+Some(parentName + "." + field.name.getOrElse(None)), Some(null), 
parentName)
  

[25/50] [abbrv] carbondata git commit: [CARBONDATA-2693][BloomDataMap]Fix bug for alter rename is renaming the existing table on which bloomfilter datamp exists

2018-07-17 Thread jackylk
[CARBONDATA-2693][BloomDataMap]Fix bug for alter rename is renaming the 
existing table on which bloomfilter datamp exists

Fix bug for alter rename is renaming the existing table on which bloom filter 
datamap exists

This closes #2452


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/cdee81d4
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/cdee81d4
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/cdee81d4

Branch: refs/heads/carbonstore
Commit: cdee81d4dbdd22ed728f2bd898ed888ed25a8774
Parents: 75a602d
Author: ndwangsen 
Authored: Thu Jul 5 16:51:39 2018 +0800
Committer: Jacky Li 
Committed: Sun Jul 15 21:44:53 2018 +0800

--
 .../core/datamap/DataMapStoreManager.java   |  51 -
 .../core/metadata/schema/table/CarbonTable.java |  24 +++
 .../table/DiskBasedDMSchemaStorageProvider.java |   3 +-
 .../bloom/BloomCoarseGrainDataMapFactory.java   |  21 +-
 .../TestRenameTableWithDataMap.scala| 192 +++
 .../preaaggregate/PreAggregateListeners.scala   |   5 -
 .../schema/CarbonAlterTableRenameCommand.scala  |  22 ++-
 7 files changed, 306 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/cdee81d4/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index 9a7d1c1..8ce302b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -50,6 +50,9 @@ import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonSessionInfo;
 import org.apache.carbondata.core.util.ThreadLocalSessionInfo;
 
+import static 
org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.MV;
+import static 
org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.PREAGGREGATE;
+
 /**
  * It maintains all the DataMaps in it.
  */
@@ -125,9 +128,8 @@ public final class DataMapStoreManager {
 if (dataMapSchemas != null) {
   for (DataMapSchema dataMapSchema : dataMapSchemas) {
 RelationIdentifier identifier = dataMapSchema.getParentTables().get(0);
-if (dataMapSchema.isIndexDataMap() && identifier.getTableName()
-.equals(carbonTable.getTableName()) && identifier.getDatabaseName()
-.equals(carbonTable.getDatabaseName())) {
+if (dataMapSchema.isIndexDataMap() && identifier.getTableId()
+.equals(carbonTable.getTableId())) {
   dataMaps.add(getDataMap(carbonTable, dataMapSchema));
 }
   }
@@ -173,6 +175,49 @@ public final class DataMapStoreManager {
   }
 
   /**
+   * Update the datamap schema after table rename
+   * This should be invoked after changing table name
+   * @param dataMapSchemaList
+   * @param newTableName
+   */
+  public void updateDataMapSchema(List dataMapSchemaList,
+  String newTableName) throws IOException {
+List newDataMapSchemas = new ArrayList<>();
+for (DataMapSchema dataMapSchema : dataMapSchemaList) {
+  RelationIdentifier relationIdentifier = 
dataMapSchema.getRelationIdentifier();
+  String dataBaseName =  relationIdentifier.getDatabaseName();
+  String tableId = relationIdentifier.getTableId();
+  String providerName = dataMapSchema.getProviderName();
+  // if the preaggregate datamap,not be modified the schema
+  if (providerName.equalsIgnoreCase(PREAGGREGATE.toString())) {
+continue;
+  }
+  // if the mv datamap,not be modified the relationIdentifier
+  if (!providerName.equalsIgnoreCase(MV.toString())) {
+RelationIdentifier newRelationIdentifier = new 
RelationIdentifier(dataBaseName,
+newTableName, tableId);
+dataMapSchema.setRelationIdentifier(newRelationIdentifier);
+  }
+  List newParentTables = new ArrayList<>();
+  List parentTables = dataMapSchema.getParentTables();
+  for (RelationIdentifier identifier : parentTables) {
+RelationIdentifier newParentTableIdentifier = new RelationIdentifier(
+identifier.getDatabaseName(), newTableName, 
identifier.getTableId());
+newParentTables.add(newParentTableIdentifier);
+  }
+  dataMapSchema.setParentTables(newParentTables);
+  newDataMapSchemas.add(dataMapSchema);
+  // frist drop old schema
+  String dataMapName = dataMapSchema.getDataMapName();
+  dropDataMapSchema(dataMapName);
+}
+// save new datamap schema to storage
+for (DataMapSchema newDataMapSc

[44/50] [abbrv] carbondata git commit: [CARBONDATA-2705][CarbonStore] CarbonStore Java API and Implementation

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/SelectDescriptor.java
--
diff --git 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/SelectDescriptor.java
 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/SelectDescriptor.java
deleted file mode 100644
index e10dc84..000
--- 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/SelectDescriptor.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.horizon.rest.model.descriptor;
-
-public class SelectDescriptor {
-
-  private String id;
-  private String databaseName;
-  private String tableName;
-  private String[] projection;
-  private String filter;
-  private int limit;
-
-  public SelectDescriptor() {
-  }
-
-  public SelectDescriptor(String databaseName, String tableName, String[] 
projection, String filter,
-  int limit) {
-this.databaseName = databaseName;
-this.tableName = tableName;
-this.projection = projection;
-this.filter = filter;
-this.limit = limit;
-  }
-
-  public String getDatabaseName() {
-return databaseName;
-  }
-
-  public void setDatabaseName(String databaseName) {
-this.databaseName = databaseName;
-  }
-
-  public String getTableName() {
-return tableName;
-  }
-
-  public void setTableName(String tableName) {
-this.tableName = tableName;
-  }
-
-  public String[] getProjection() {
-return projection;
-  }
-
-  public void setProjection(String[] projection) {
-this.projection = projection;
-  }
-
-  public String getFilter() {
-return filter;
-  }
-
-  public void setFilter(String filter) {
-this.filter = filter;
-  }
-
-  public int getLimit() {
-return limit;
-  }
-
-  public void setLimit(int limit) {
-this.limit = limit;
-  }
-
-  public String getId() {
-return id;
-  }
-
-  public void setId(String id) {
-this.id = id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/85cdc404/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/TableDescriptor.java
--
diff --git 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/TableDescriptor.java
 
b/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/TableDescriptor.java
deleted file mode 100644
index db1ce98..000
--- 
a/store/horizon/src/main/java/org/apache/carbondata/horizon/rest/model/descriptor/TableDescriptor.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.horizon.rest.model.descriptor;
-
-import java.util.Map;
-
-import org.apache.carbondata.sdk.file.Schema;
-
-public class TableDescriptor {
-
-  private boolean ifNotExists;
-  private String database;
-  private String name;
-  private Schema schema;
-  private Map properties;
-  private String comment;
-
-  public TableDescriptor(boolean ifNotExists, String database, String name, 
Schema schema,
-  Map properties, String comment) {
-this.ifNotExists = ifNotExists;
-this.database = database;
-this.name = name;
-this.schema = schema;
-this.properties = properties;
-  

[36/50] [abbrv] carbondata git commit: [CARBONDATA-2609] Change RPC implementation to Hadoop RPC framework

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/d9b40bf9/store/core/src/main/scala/org/apache/carbondata/store/Master.scala
--
diff --git a/store/core/src/main/scala/org/apache/carbondata/store/Master.scala 
b/store/core/src/main/scala/org/apache/carbondata/store/Master.scala
new file mode 100644
index 000..2109251
--- /dev/null
+++ b/store/core/src/main/scala/org/apache/carbondata/store/Master.scala
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.store
+
+import java.io.IOException
+import java.net.{BindException, InetAddress}
+import java.util.{List => JList, Map => JMap, Objects, Random, UUID}
+import java.util.concurrent.{ExecutionException, Future, TimeoutException, 
TimeUnit}
+import java.util.concurrent.atomic.AtomicBoolean
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.ipc.RPC
+import org.apache.hadoop.mapred.JobConf
+import org.apache.hadoop.mapreduce.Job
+
+import org.apache.carbondata.common.annotations.InterfaceAudience
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.datastore.block.Distributable
+import org.apache.carbondata.core.datastore.impl.FileFactory
+import org.apache.carbondata.core.datastore.row.CarbonRow
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.scan.expression.Expression
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.hadoop.CarbonMultiBlockSplit
+import org.apache.carbondata.hadoop.api.CarbonInputFormat
+import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
+import org.apache.carbondata.processing.util.CarbonLoaderUtil
+import org.apache.carbondata.store.rpc.{RegistryService, ServiceFactory}
+import org.apache.carbondata.store.rpc.impl.{RegistryServiceImpl, Status}
+import org.apache.carbondata.store.rpc.model._
+
+/**
+ * Master of CarbonSearch.
+ * It provides a Registry service for worker to register.
+ * And it provides search API to fire RPC call to workers.
+ */
+@InterfaceAudience.Internal
+private[store] class Master {
+  private val LOG = 
LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
+  // worker host address map to EndpointRef
+
+  private val random = new Random
+
+  private var registryServer: RPC.Server = _
+
+  private val scheduler: Scheduler = new Scheduler
+
+  def buildServer(serverHost: String, serverPort: Int): RPC.Server = {
+val hadoopConf = FileFactory.getConfiguration
+val builder = new RPC.Builder(hadoopConf)
+builder
+  .setBindAddress(serverHost)
+  .setPort(serverPort)
+  .setProtocol(classOf[RegistryService])
+  .setInstance(new RegistryServiceImpl(this))
+  .build
+  }
+
+  /** start service and listen on port passed in constructor */
+  def startService(): Unit = {
+if (registryServer == null) {
+  LOG.info("Start search mode master thread")
+  val isStarted: AtomicBoolean = new AtomicBoolean(false)
+  new Thread(new Runnable {
+override def run(): Unit = {
+  val hostAddress = InetAddress.getLocalHost.getHostAddress
+  var port = CarbonProperties.getSearchMasterPort
+  var exception: BindException = null
+  var numTry = 100  // we will try to create service at worse case 100 
times
+  do {
+try {
+  LOG.info(s"building registry-service on $hostAddress:$port")
+  registryServer = buildServer(hostAddress, port)
+  numTry = 0
+} catch {
+  case e: BindException =>
+// port is occupied, increase the port number and try again
+exception = e
+LOG.error(s"start registry-service failed: ${e.getMessage}")
+port = port + 1
+numTry = numTry - 1
+}
+  } while (numTry > 0)
+  if (registryServer == null) {
+// we have tried many times, but stil

[20/50] [abbrv] carbondata git commit: [CARBONDATA-2722] [CARBONDATA-2721] JsonWriter issue fixes

2018-07-17 Thread jackylk
[CARBONDATA-2722] [CARBONDATA-2721] JsonWriter issue fixes

[CARBONDATA-2722][SDK] [JsonWriter] NPE when schema and data are not of same 
length or Data is null.

problem: Null data is not handled in the json object to carbon row conversion.

solution: add a null check when object is fetched from json map.

[CARBONDATA-2721][SDK] [JsonWriter] Json writer is writing only first element 
of an array and discarding the rest of the elements.

problem: converting json object to carbon row array object is based on array 
children count , instead of array element count.
Hence as array will always have one children. only one element is filled.

solution: use array element count instead of array children count

This closes #2485


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/653efee0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/653efee0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/653efee0

Branch: refs/heads/carbonstore
Commit: 653efee0283701d928562379f91e5df8fec73c24
Parents: 637a974
Author: ajantha-bhat 
Authored: Mon Jul 9 18:45:30 2018 +0530
Committer: ravipesala 
Committed: Fri Jul 13 20:49:56 2018 +0530

--
 .../jsonFiles/data/PrimitiveTypeWithNull.json   |  4 +
 .../jsonFiles/data/StructOfAllTypes.json|  2 +-
 .../jsonFiles/data/allPrimitiveType.json|  2 +-
 ...tNonTransactionalCarbonTableJsonWriter.scala | 88 +---
 .../loading/parser/impl/JsonRowParser.java  | 51 
 5 files changed, 115 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/653efee0/integration/spark-common-test/src/test/resources/jsonFiles/data/PrimitiveTypeWithNull.json
--
diff --git 
a/integration/spark-common-test/src/test/resources/jsonFiles/data/PrimitiveTypeWithNull.json
 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/PrimitiveTypeWithNull.json
new file mode 100644
index 000..a5cd3d7
--- /dev/null
+++ 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/PrimitiveTypeWithNull.json
@@ -0,0 +1,4 @@
+{
+   "stringField": null,
+   "intField": 26
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/653efee0/integration/spark-common-test/src/test/resources/jsonFiles/data/StructOfAllTypes.json
--
diff --git 
a/integration/spark-common-test/src/test/resources/jsonFiles/data/StructOfAllTypes.json
 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/StructOfAllTypes.json
index 9806325..3beab07 100644
--- 
a/integration/spark-common-test/src/test/resources/jsonFiles/data/StructOfAllTypes.json
+++ 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/StructOfAllTypes.json
@@ -5,7 +5,7 @@
"longField": 12345678,
"doubleField": 123400.78,
"boolField": true,
-   "FloorNum": [ 1, 2],
+   "FloorNum": [1,2,3,4,5,6],
"FloorString": [ "abc", "def"],
"FloorLong": [ 1234567, 2345678],
"FloorDouble": [ 1.0, 2.0, 33.33],

http://git-wip-us.apache.org/repos/asf/carbondata/blob/653efee0/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
--
diff --git 
a/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
index 86648c3..6d81ec7 100644
--- 
a/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
+++ 
b/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
@@ -1,5 +1,5 @@
 {
-   "stringField": "ajantha",
+   "stringField": "ajantha\"bhat\"",
"intField": 26,
"shortField": 26,
"longField": 1234567,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/653efee0/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
index 299c966..ff5c062 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
+++ 
b/integration/spark-common-test/src/tes

[35/50] [abbrv] carbondata git commit: [CARBONDATA-2609] Change RPC implementation to Hadoop RPC framework

2018-07-17 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/d9b40bf9/store/search/src/main/scala/org/apache/spark/rpc/Scheduler.scala
--
diff --git a/store/search/src/main/scala/org/apache/spark/rpc/Scheduler.scala 
b/store/search/src/main/scala/org/apache/spark/rpc/Scheduler.scala
deleted file mode 100644
index 26208d0..000
--- a/store/search/src/main/scala/org/apache/spark/rpc/Scheduler.scala
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.rpc
-
-import java.io.IOException
-import java.util.concurrent.atomic.AtomicInteger
-
-import scala.collection.mutable
-import scala.concurrent.Future
-import scala.reflect.ClassTag
-import scala.util.Random
-
-import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
- * [[org.apache.spark.rpc.Master]] uses Scheduler to pick a Worker to send 
request
- */
-private[rpc] class Scheduler {
-  // mapping of worker IP address to worker instance
-  private val workers = mutable.Map[String, Schedulable]()
-  private val random = new Random()
-
-  private val LOG = 
LogServiceFactory.getLogService(this.getClass.getCanonicalName)
-
-  /**
-   * Pick a Worker according to the address and workload of the Worker
-   * Invoke the RPC and return Future result
-   */
-  def sendRequestAsync[T: ClassTag](
-  splitAddress: String,
-  request: Any): (Schedulable, Future[T]) = {
-require(splitAddress != null)
-if (workers.isEmpty) {
-  throw new IOException("No worker is available")
-}
-var worker = pickWorker(splitAddress)
-
-// check whether worker exceed max workload, if exceeded, pick next worker
-val maxWorkload = CarbonProperties.getMaxWorkloadForWorker(worker.cores)
-var numTry = workers.size
-do {
-  if (worker.workload.get() >= maxWorkload) {
-LOG.info(s"worker ${worker.address}:${worker.port} reach limit, 
re-select worker...")
-worker = pickNextWorker(worker)
-numTry = numTry - 1
-  } else {
-numTry = -1
-  }
-} while (numTry > 0)
-if (numTry == 0) {
-  // tried so many times and still not able to find Worker
-  throw new WorkerTooBusyException(
-s"All workers are busy, number of workers: ${workers.size}, workload 
limit: $maxWorkload")
-}
-LOG.info(s"sending search request to worker 
${worker.address}:${worker.port}")
-val future = worker.ref.ask(request)
-worker.workload.incrementAndGet()
-(worker, future)
-  }
-
-  private def pickWorker[T: ClassTag](splitAddress: String) = {
-try {
-  workers(splitAddress)
-} catch {
-  case e: NoSuchElementException =>
-// no local worker available, choose one worker randomly
-pickRandomWorker()
-}
-  }
-
-  /** pick a worker randomly */
-  private def pickRandomWorker() = {
-val index = random.nextInt(workers.size)
-workers.toSeq(index)._2
-  }
-
-  /** pick the next worker of the input worker in the [[Scheduler.workers]] */
-  private def pickNextWorker(worker: Schedulable) = {
-val index = workers.zipWithIndex.find { case ((address, w), index) =>
-  w == worker
-}.get._2
-if (index == workers.size - 1) {
-  workers.toSeq.head._2
-} else {
-  workers.toSeq(index + 1)._2
-}
-  }
-
-  /** A new searcher is trying to register, add it to the map and connect to 
this searcher */
-  def addWorker(address: String, schedulable: Schedulable): Unit = {
-require(schedulable != null)
-require(address.equals(schedulable.address))
-workers(address) = schedulable
-  }
-
-  def removeWorker(address: String): Unit = {
-workers.remove(address)
-  }
-
-  def getAllWorkers: Iterator[(String, Schedulable)] = workers.iterator
-}
-
-/**
- * Represent a Worker which [[Scheduler]] can send
- * Search request on it
- * @param id Worker ID, a UUID string
- * @param cores, number of cores in Worker
- * @param ref RPC endpoint reference
- * @param workload number of outstanding request sent to Worker
- */
-private[rpc] class Schedulable(
-val id: String,
-val address: String,
-val port: 

[26/50] [abbrv] carbondata git commit: [CARBONDATA-2704] Index file size in describe formatted command is not updated correctly with the segment file

2018-07-17 Thread jackylk
[CARBONDATA-2704] Index file size in describe formatted command is not updated 
correctly with the segment file

Problem:
Describe formatted command is not showing correct index files size after index 
files merge.
Solution:
Segment file should be updated with the actual index files size of that segment 
after index files merge.

This closes #2462


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/eb604fdb
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/eb604fdb
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/eb604fdb

Branch: refs/heads/carbonstore
Commit: eb604fdb73983dfe9396d488a51907d90ed51d3e
Parents: cdee81d
Author: dhatchayani 
Authored: Mon Jul 9 11:19:51 2018 +0530
Committer: manishgupta88 
Committed: Sun Jul 15 20:34:32 2018 +0530

--
 .../core/metadata/SegmentFileStore.java |  4 +-
 .../apache/carbondata/core/util/CarbonUtil.java | 48 ---
 .../core/writer/CarbonIndexFileMergeWriter.java | 17 +++
 .../CarbonIndexFileMergeTestCase.scala  | 50 
 .../spark/rdd/CarbonDataRDDFactory.scala|  4 +-
 5 files changed, 96 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/eb604fdb/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java 
b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 3d3b245..ce79e65 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -281,7 +281,7 @@ public class SegmentFileStore {
* @throws IOException
*/
   public static boolean updateSegmentFile(String tablePath, String segmentId, 
String segmentFile,
-  String tableId) throws IOException {
+  String tableId, SegmentFileStore segmentFileStore) throws IOException {
 boolean status = false;
 String tableStatusPath = CarbonTablePath.getTableStatusFilePath(tablePath);
 if (!FileFactory.isFileExist(tableStatusPath)) {
@@ -308,6 +308,8 @@ public class SegmentFileStore {
   // if the segments is in the list of marked for delete then update 
the status.
   if (segmentId.equals(detail.getLoadName())) {
 detail.setSegmentFile(segmentFile);
+detail.setIndexSize(String.valueOf(CarbonUtil
+.getCarbonIndexSize(segmentFileStore, 
segmentFileStore.getLocationMap(;
 break;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/eb604fdb/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index e87e52c..9796696 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -2647,23 +2647,7 @@ public final class CarbonUtil {
   fileStore.readIndexFiles();
   Map> indexFilesMap = fileStore.getIndexFilesMap();
   // get the size of carbonindex file
-  for (Map.Entry entry : 
locationMap.entrySet()) {
-SegmentFileStore.FolderDetails folderDetails = entry.getValue();
-Set carbonindexFiles = folderDetails.getFiles();
-String mergeFileName = folderDetails.getMergeFileName();
-if (null != mergeFileName) {
-  String mergeIndexPath =
-  fileStore.getTablePath() + entry.getKey() + 
CarbonCommonConstants.FILE_SEPARATOR
-  + mergeFileName;
-  carbonIndexSize += 
FileFactory.getCarbonFile(mergeIndexPath).getSize();
-}
-for (String indexFile : carbonindexFiles) {
-  String indexPath =
-  fileStore.getTablePath() + entry.getKey() + 
CarbonCommonConstants.FILE_SEPARATOR
-  + indexFile;
-  carbonIndexSize += FileFactory.getCarbonFile(indexPath).getSize();
-}
-  }
+  carbonIndexSize = getCarbonIndexSize(fileStore, locationMap);
   for (Map.Entry> entry : indexFilesMap.entrySet()) {
 // get the size of carbondata files
 for (String blockFile : entry.getValue()) {
@@ -2676,6 +2660,36 @@ public final class CarbonUtil {
 return dataAndIndexSize;
   }
 
+  /**
+   * Calcuate the index files size of the segment
+   *
+   * @param fileStore
+   * @param locationMap
+   * @return
+   */
+  public static long getCarbonIndexSize(SegmentFileStore fileStore,
+  Map loca

carbondata git commit: [CARBONDATA-2714][Merge Index] Fixed block dataMap cache refresh issue after creation of merge index file

2018-07-17 Thread ravipesala
Repository: carbondata
Updated Branches:
  refs/heads/master 4612e0031 -> a26be1b18


[CARBONDATA-2714][Merge Index] Fixed block dataMap cache refresh issue after 
creation of merge index file

Things handled as part of this PR

Fixed block dataMap cache refresh issue after creation of merge index file
Problem:
Block DataMap cache not getting refreshed after creation of merge index file 
due to which queries still look for index file and fail.

Analysis:
Merge index file creation involves modification of segment file. If a query is 
executed without merge Index file creation then cache will be loaded. Once 
merge Index file is created the index file entries will be removed from segment 
file and merge index file entry will be added. In this process the cache is not 
getting refreshed and the tableIdentifiers created still have the 
mergeIndexFIleName as null.

Fix:
After updating table status file clear the dataMap cache for all segmentId's on 
which dataMap is being created

This closes #2515


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a26be1b1
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a26be1b1
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a26be1b1

Branch: refs/heads/master
Commit: a26be1b181f952d860050e65b6cf1ad85d0bfea5
Parents: 4612e00
Author: manishgupta88 
Authored: Tue Jul 17 14:24:54 2018 +0530
Committer: ravipesala 
Committed: Wed Jul 18 10:30:33 2018 +0530

--
 .../core/metadata/SegmentFileStore.java | 29 +++-
 .../core/writer/CarbonIndexFileMergeWriter.java |  2 +-
 .../CarbonIndexFileMergeTestCase.scala  | 47 +++-
 .../spark/rdd/CarbonDataRDDFactory.scala|  2 +-
 .../sql/events/MergeIndexEventListener.scala| 17 ++-
 5 files changed, 91 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a26be1b1/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java 
b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 3d08a2d..9681e37 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -36,7 +36,9 @@ import java.util.Set;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.TableDataMap;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -280,9 +282,10 @@ public class SegmentFileStore {
* @return boolean which determines whether status update is done or not.
* @throws IOException
*/
-  public static boolean updateSegmentFile(String tablePath, String segmentId, 
String segmentFile,
-  String tableId, SegmentFileStore segmentFileStore) throws IOException {
+  public static boolean updateSegmentFile(CarbonTable carbonTable, String 
segmentId,
+  String segmentFile, String tableId, SegmentFileStore segmentFileStore) 
throws IOException {
 boolean status = false;
+String tablePath = carbonTable.getTablePath();
 String tableStatusPath = CarbonTablePath.getTableStatusFilePath(tablePath);
 if (!FileFactory.isFileExist(tableStatusPath)) {
   return status;
@@ -316,6 +319,8 @@ public class SegmentFileStore {
 
 SegmentStatusManager
 .writeLoadDetailsIntoFile(tableStatusPath, 
listOfLoadFolderDetailsArray);
+// clear dataMap cache for the segmentId for which the table status 
file is getting updated
+clearBlockDataMapCache(carbonTable, segmentId);
 status = true;
   } else {
 LOGGER.error(
@@ -333,6 +338,26 @@ public class SegmentFileStore {
 return status;
   }
 
+  /**
+   * After updating table status file clear the dataMap cache for all 
segmentId's on which
+   * dataMap is being created because flows like merge index file creation 
involves modification of
+   * segment file and once segment file is modified the cache for that segment 
need to be cleared
+   * otherwise the old cache will be used which is stale
+   *
+   * @param carbonTable
+   * @param segmentId
+   */
+  public static void clearBlockDataMapCache(CarbonTable carbonTable, String 
segmentId) {
+TableDataMap defaultDataMap 

carbondata git commit: [CARBONDATA-2738]Block Preaggregate, Compaction, Dictionary Exclude/Include for child columns for Complex datatype

2018-07-17 Thread kunalkapoor
Repository: carbondata
Updated Branches:
  refs/heads/master a26be1b18 -> 0c9a60e01


[CARBONDATA-2738]Block Preaggregate, Compaction, Dictionary Exclude/Include
for child columns for Complex datatype

Block Preaggregate, Compaction, Dictionary Exclude/Include for child column
s and Update Complex datatype columns for Complex datatype

This closes #2501


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0c9a60e0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0c9a60e0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0c9a60e0

Branch: refs/heads/master
Commit: 0c9a60e017211a27033eb770c416cf3ac303d9d4
Parents: a26be1b
Author: Indhumathi27 
Authored: Thu Jul 12 21:11:11 2018 +0530
Committer: kunal642 
Committed: Wed Jul 18 10:45:52 2018 +0530

--
 .../complexType/TestComplexDataType.scala   | 176 ++-
 ...ataWithMalformedCarbonCommandException.scala |  12 +-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  14 +-
 .../sql/events/MergeIndexEventListener.scala|   6 +-
 .../CarbonAlterTableCompactionCommand.scala |   6 +
 .../CarbonProjectForUpdateCommand.scala |  10 +-
 .../preaaggregate/PreAggregateUtil.scala|  10 +-
 .../strategy/StreamingTableStrategy.scala   |   2 +-
 .../spark/sql/optimizer/CarbonIUDRule.scala |   9 +-
 9 files changed, 224 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0c9a60e0/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 45a9c7a..6470648 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -4,10 +4,11 @@ import java.sql.Timestamp
 
 import scala.collection.mutable
 
-import org.apache.spark.sql.Row
+import org.apache.spark.sql.{AnalysisException, Row}
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
+import 
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 
@@ -18,6 +19,16 @@ import org.apache.carbondata.core.util.CarbonProperties
 
 class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
 
+  override def beforeAll(): Unit = {
+sql("DROP TABLE IF EXISTS table1")
+sql("DROP TABLE IF EXISTS test")
+  }
+
+  override def afterAll(): Unit = {
+sql("DROP TABLE IF EXISTS table1")
+sql("DROP TABLE IF EXISTS test")
+  }
+
   test("test Projection PushDown for Struct - Integer type") {
 sql("DROP TABLE IF EXISTS table1")
 sql(
@@ -712,5 +723,166 @@ class TestComplexDataType extends QueryTest with 
BeforeAndAfterAll {
 checkAnswer(sql("select 
a.b,id,a.c,person.detail[0],d.e,d.f,person.detail[1],id from 
test"),Seq(Row(2,1,3,5,3,2,6,1)))
 checkAnswer(sql("select 
a.b,id,a.c,person.detail[0],d.e,d.f,person.detail[1],id,1,a.b from 
test"),Seq(Row(2,1,3,5,3,2,6,1,1,2)))
   }
-  
+
+  test("test block Update for complex datatype") {
+sql("DROP TABLE IF EXISTS test")
+sql("create table test(id int,a struct,d array) stored 
by 'carbondata'")
+sql("insert into test values(1,'2$3',4)")
+val structException = intercept[UnsupportedOperationException](
+sql("update test set(a.b)=(4) where id=1").show(false))
+assertResult("Unsupported operation on Complex data 
type")(structException.getMessage)
+val arrayException = intercept[UnsupportedOperationException](
+sql("update test set(a)=(4) where id=1").show(false))
+assertResult("Unsupported operation on Complex data 
type")(arrayException.getMessage)
+  }
+
+  test("test block partition column") {
+sql("DROP TABLE IF EXISTS test")
+val arrayException = intercept[AnalysisException](
+sql("""
+  | CREATE TABLE IF NOT EXISTS test
+  | (
+  | id Int,
+  | vin string,
+  | logdate Timestamp,
+  | phonenumber Long,
+  | country array,
+  | salary Int
+  | )
+  | PARTITIONED BY (area array)
+  | STORED BY 'carbondata'
+""".stripMargin))
+assertResult("Cannot use array for partition 

carbondata git commit: [CARBONDATA-2738]Update documentation for Complex datatype

2018-07-17 Thread kunalkapoor
Repository: carbondata
Updated Branches:
  refs/heads/master 0c9a60e01 -> 6ca03f6b7


[CARBONDATA-2738]Update documentation for Complex datatype

Update documentation for Complex datatype

This closes #2502


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6ca03f6b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6ca03f6b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6ca03f6b

Branch: refs/heads/master
Commit: 6ca03f6b7a573638fb8b5ff2da07b624843c3af5
Parents: 0c9a60e
Author: Indhumathi27 
Authored: Fri Jul 13 11:56:25 2018 +0530
Committer: kunal642 
Committed: Wed Jul 18 11:53:35 2018 +0530

--
 docs/data-management-on-carbondata.md  | 7 +++
 docs/datamap/preaggregate-datamap-guide.md | 1 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6ca03f6b/docs/data-management-on-carbondata.md
--
diff --git a/docs/data-management-on-carbondata.md 
b/docs/data-management-on-carbondata.md
index 7e171aa..750dc20 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -52,6 +52,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  ```
  TBLPROPERTIES ('DICTIONARY_INCLUDE'='column1, column2')
 ```
+NOTE: Dictionary Include/Exclude for complex child columns is not 
supported.
 
- **Inverted Index Configuration**
 
@@ -75,6 +76,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  OR
  TBLPROPERTIES ('SORT_COLUMNS'='')
  ```
+ NOTE: Sort_Columns for Complex datatype columns is not supported.
 
- **Sort Scope Configuration**

@@ -290,6 +292,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  ```
  ALTER TABLE carbon ADD COLUMNS (a1 INT, b1 STRING) 
TBLPROPERTIES('DEFAULT.VALUE.a1'='10')
  ```
+  NOTE: Add Complex datatype columns is not supported.
 
- **DROP COLUMNS**

@@ -306,6 +309,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  
  ALTER TABLE carbon DROP COLUMNS (c1,d1)
  ```
+ NOTE: Drop Complex child column is not supported.
 
- **CHANGE DATA TYPE**

@@ -656,6 +660,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   ```
   UPDATE t3 SET (t3_country, t3_salary) = (SELECT t5_country, t5_salary FROM 
t5 FULL JOIN t3 u WHERE u.t3_id = t5_id and t5_id=6) WHERE t3_id >6
   ```
+   NOTE: Update Complex datatype columns is not supported.
 
 ### DELETE
 
@@ -725,6 +730,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   ```
   ALTER TABLE table_name COMPACT 'CUSTOM' WHERE SEGMENT.ID IN (2,3,4)
   ```
+  NOTE: Compaction is unsupported for table containing Complex columns.
   
 
   - **CLEAN SEGMENTS AFTER Compaction**
@@ -765,6 +771,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   PARTITIONED BY (productCategory STRING, productBatch STRING)
   STORED BY 'carbondata'
   ```
+   NOTE: Hive partition is not supported on complex datatype columns.

  Load Data Using Static Partition 
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6ca03f6b/docs/datamap/preaggregate-datamap-guide.md
--
diff --git a/docs/datamap/preaggregate-datamap-guide.md 
b/docs/datamap/preaggregate-datamap-guide.md
index ce7cbcc..d85f527 100644
--- a/docs/datamap/preaggregate-datamap-guide.md
+++ b/docs/datamap/preaggregate-datamap-guide.md
@@ -244,6 +244,7 @@ is not supported:
 change datatype command, CarbonData will check whether it will impact the 
pre-aggregate table, if 
  not, the operation is allowed, otherwise operation will be rejected by 
throwing exception.   
 3. Partition management command: `ALTER TABLE ADD/DROP PARTITION`
+4. Complex Datatypes for preaggregate is not supported.
 
 However, there is still way to support these operations on main table, in 
current CarbonData 
 release, user can do as following:



carbondata git commit: [CARBONDATA-2656] Presto vector stream readers performance Enhancement

2018-07-17 Thread chenliang613
Repository: carbondata
Updated Branches:
  refs/heads/master 6ca03f6b7 -> a4c2ef5f8


[CARBONDATA-2656] Presto vector stream readers performance Enhancement

eliminate the extra iteration over the carbonColumnVectorImpl object -> 
vectorArray, by extending it to StreamReaders which will fill up carbon-core 
vector data (one by one) directly to the block(presto), and on the call of 
block builder it will return the block to the Presto.

This closes #2412


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a4c2ef5f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a4c2ef5f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a4c2ef5f

Branch: refs/heads/master
Commit: a4c2ef5f833373b4d3bfb6dc4a9fb1c166ae0ed4
Parents: 6ca03f6
Author: sv71294 
Authored: Tue Jun 5 17:44:58 2018 +0530
Committer: chenliang613 
Committed: Wed Jul 18 14:36:09 2018 +0800

--
 .../carbondata/presto/CarbonVectorBatch.java|  89 +---
 .../carbondata/presto/CarbondataPageSource.java |  95 ++--
 .../presto/CarbondataPageSourceProvider.java|  18 +-
 .../PrestoCarbonVectorizedRecordReader.java |  25 ++-
 .../presto/readers/AbstractStreamReader.java|  66 --
 .../presto/readers/BooleanStreamReader.java |  93 +++-
 .../readers/DecimalSliceStreamReader.java   | 219 +--
 .../presto/readers/DoubleStreamReader.java  |  94 +++-
 .../presto/readers/IntegerStreamReader.java |  90 +++-
 .../presto/readers/LongStreamReader.java|  87 +++-
 .../presto/readers/ObjectStreamReader.java  |  56 ++---
 .../readers/PrestoVectorBlockBuilder.java   |  28 +++
 .../presto/readers/ShortStreamReader.java   |  87 +++-
 .../presto/readers/SliceStreamReader.java   | 105 -
 .../carbondata/presto/readers/StreamReader.java |  43 
 .../presto/readers/StreamReaders.java   |  98 -
 .../presto/readers/TimestampStreamReader.java   |  75 ---
 .../CarbonDictionaryDecodeReadSupport.scala |   6 +-
 18 files changed, 461 insertions(+), 913 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a4c2ef5f/integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java
--
diff --git 
a/integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java
 
b/integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java
index b6caaa3..6a4cc0d 100644
--- 
a/integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java
+++ 
b/integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java
@@ -20,50 +20,81 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.datatype.DecimalType;
 import org.apache.carbondata.core.metadata.datatype.StructField;
 import 
org.apache.carbondata.core.scan.result.vector.impl.CarbonColumnVectorImpl;
+import org.apache.carbondata.presto.readers.BooleanStreamReader;
+import org.apache.carbondata.presto.readers.DecimalSliceStreamReader;
+import org.apache.carbondata.presto.readers.DoubleStreamReader;
+import org.apache.carbondata.presto.readers.IntegerStreamReader;
+import org.apache.carbondata.presto.readers.LongStreamReader;
+import org.apache.carbondata.presto.readers.ObjectStreamReader;
+import org.apache.carbondata.presto.readers.ShortStreamReader;
+import org.apache.carbondata.presto.readers.SliceStreamReader;
+import org.apache.carbondata.presto.readers.TimestampStreamReader;
+
+import com.facebook.presto.spi.block.SliceArrayBlock;
 
 public class CarbonVectorBatch {
 
-  private static final int DEFAULT_BATCH_SIZE =  4 * 1024;
+  private static final int DEFAULT_BATCH_SIZE = 4 * 1024;
 
-  private final StructField[] schema;
   private final int capacity;
-  private int numRows;
   private final CarbonColumnVectorImpl[] columns;
-
   // True if the row is filtered.
   private final boolean[] filteredRows;
-
   // Column indices that cannot have null values.
   private final Set nullFilteredColumns;
-
+  private int numRows;
   // Total number of rows that have been filtered.
   private int numRowsFiltered = 0;
 
-
-  private CarbonVectorBatch(StructField[] schema, int maxRows) {
-this.schema = schema;
+  private CarbonVectorBatch(StructField[] schema, 
CarbonDictionaryDecodeReadSupport readSupport,
+  int maxRows) {
 this.capacity = maxRows;
 this.columns = new CarbonColumnVectorImpl[schema.length];
 this.nullFilteredColumns = ne