carbondata git commit: [CARBONDATA-2217]fix drop partition for non existing partition and set FactTimeStamp during compaction for partition table
Repository: carbondata Updated Branches: refs/heads/master f74d1efac -> aa910ddb2 [CARBONDATA-2217]fix drop partition for non existing partition and set FactTimeStamp during compaction for partition table Problem: 1)when drop partition is fired for a column which does not exists , it throws null pointer exception 2)select * is not working when clean files operation is fired after second level of compaction, it throws exception sometimes 3)new segment is getting created for all the segments if any one partition is dropped Solution: 1)have a null check , if column does not exists 2)give different timestamp for fact files during compaction to avoid deletion of files during clean files 3)for the partition which is dropped, only for that new segment file should be written and not for all the partition 4) This PR also contains fix for creating a pre aggregate table with same name which has already created in other database This closes #2017 Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/aa910ddb Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/aa910ddb Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/aa910ddb Branch: refs/heads/master Commit: aa910ddb2460d7fa18ff594859391eb888b585b9 Parents: f74d1ef Author: akashrn5 Authored: Wed Feb 28 17:28:43 2018 +0530 Committer: ravipesala Committed: Fri Mar 2 19:09:36 2018 +0530 -- .../core/metadata/SegmentFileStore.java | 24 + .../preaggregate/TestPreAggCreateCommand.scala | 27 ++-- .../StandardPartitionGlobalSortTestCase.scala | 13 ++ .../spark/rdd/CarbonDataRDDFactory.scala| 3 +++ .../spark/rdd/CarbonTableCompactor.scala| 3 +++ .../datamap/CarbonCreateDataMapCommand.scala| 2 +- .../management/CarbonLoadDataCommand.scala | 21 ++- 7 files changed, 73 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/carbondata/blob/aa910ddb/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java -- diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java index b5f5a25..1902ab9 100644 --- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java +++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java @@ -371,19 +371,23 @@ public class SegmentFileStore { } Path path = new Path(location); // Update the status to delete if path equals - for (PartitionSpec spec : partitionSpecs) { -if (path.equals(spec.getLocation())) { - entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage()); - updateSegment = true; - break; + if (null != partitionSpecs) { +for (PartitionSpec spec : partitionSpecs) { + if (path.equals(spec.getLocation())) { + entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage()); +updateSegment = true; +break; + } } } } -String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath); -writePath = -writePath + CarbonCommonConstants.FILE_SEPARATOR + segment.getSegmentNo() + "_" + uniqueId -+ CarbonTablePath.SEGMENT_EXT; -writeSegmentFile(segmentFile, writePath); +if (updateSegment) { + String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath); + writePath = + writePath + CarbonCommonConstants.FILE_SEPARATOR + segment.getSegmentNo() + "_" + uniqueId + + CarbonTablePath.SEGMENT_EXT; + writeSegmentFile(segmentFile, writePath); +} // Check whether we can completly remove the segment. boolean deleteSegment = true; for (Map.Entry entry : segmentFile.getLocationMap().entrySet()) { http://git-wip-us.apache.org/repos/asf/carbondata/blob/aa910ddb/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala -- diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala index 1e59a80..8b71a31 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/carbonda
carbondata git commit: [CARBONDATA-2217]fix drop partition for non existing partition and set FactTimeStamp during compaction for partition table
Repository: carbondata Updated Branches: refs/heads/branch-1.3 b360f9084 -> b9a6b6865 [CARBONDATA-2217]fix drop partition for non existing partition and set FactTimeStamp during compaction for partition table Problem: 1)when drop partition is fired for a column which does not exists , it throws null pointer exception 2)select * is not working when clean files operation is fired after second level of compaction, it throws exception sometimes 3)new segment is getting created for all the segments if any one partition is dropped Solution: 1)have a null check , if column does not exists 2)give different timestamp for fact files during compaction to avoid deletion of files during clean files 3)for the partition which is dropped, only for that new segment file should be written and not for all the partition 4) This PR also contains fix for creating a pre aggregate table with same name which has already created in other database This closes #2017 Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b9a6b686 Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b9a6b686 Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b9a6b686 Branch: refs/heads/branch-1.3 Commit: b9a6b68658fd0f7f408102374b3ef31dcfe44cea Parents: b360f90 Author: akashrn5 Authored: Wed Feb 28 17:28:43 2018 +0530 Committer: ravipesala Committed: Fri Mar 2 19:10:24 2018 +0530 -- .../core/metadata/SegmentFileStore.java | 24 + .../preaggregate/TestPreAggCreateCommand.scala | 27 ++-- .../StandardPartitionGlobalSortTestCase.scala | 13 ++ .../spark/rdd/CarbonDataRDDFactory.scala| 3 +++ .../spark/rdd/CarbonTableCompactor.scala| 3 +++ .../datamap/CarbonCreateDataMapCommand.scala| 2 +- .../management/CarbonLoadDataCommand.scala | 21 ++- 7 files changed, 73 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/carbondata/blob/b9a6b686/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java -- diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java index b5f5a25..1902ab9 100644 --- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java +++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java @@ -371,19 +371,23 @@ public class SegmentFileStore { } Path path = new Path(location); // Update the status to delete if path equals - for (PartitionSpec spec : partitionSpecs) { -if (path.equals(spec.getLocation())) { - entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage()); - updateSegment = true; - break; + if (null != partitionSpecs) { +for (PartitionSpec spec : partitionSpecs) { + if (path.equals(spec.getLocation())) { + entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage()); +updateSegment = true; +break; + } } } } -String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath); -writePath = -writePath + CarbonCommonConstants.FILE_SEPARATOR + segment.getSegmentNo() + "_" + uniqueId -+ CarbonTablePath.SEGMENT_EXT; -writeSegmentFile(segmentFile, writePath); +if (updateSegment) { + String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath); + writePath = + writePath + CarbonCommonConstants.FILE_SEPARATOR + segment.getSegmentNo() + "_" + uniqueId + + CarbonTablePath.SEGMENT_EXT; + writeSegmentFile(segmentFile, writePath); +} // Check whether we can completly remove the segment. boolean deleteSegment = true; for (Map.Entry entry : segmentFile.getLocationMap().entrySet()) { http://git-wip-us.apache.org/repos/asf/carbondata/blob/b9a6b686/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala -- diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala index 1e59a80..8b71a31 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/