[CARBONDATA-2734] Update is not working on the table which has segmentfile 
present

It fixes the IUD on the flat folder

This closes #2503


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/78a7371c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/78a7371c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/78a7371c

Branch: refs/heads/branch-1.4
Commit: 78a7371c4d55c69474c4bafda2c99c0536431dc4
Parents: c144e3d
Author: ravipesala <ravi.pes...@gmail.com>
Authored: Fri Jul 13 13:15:15 2018 +0530
Committer: ravipesala <ravi.pes...@gmail.com>
Committed: Tue Jul 31 00:10:41 2018 +0530

----------------------------------------------------------------------
 .../core/mutate/CarbonUpdateUtil.java           |  4 ++--
 .../executor/impl/AbstractQueryExecutor.java    |  5 +++--
 .../SegmentUpdateStatusManager.java             | 12 +++++------
 .../apache/carbondata/core/util/CarbonUtil.java | 19 ++++++++++++++---
 .../FlatFolderTableLoadingTestCase.scala        | 21 +++++++++++++++++++
 .../iud/DeleteCarbonTableTestCase.scala         | 22 +++++++++++---------
 .../iud/UpdateCarbonTableTestCase.scala         | 13 ++++++++++++
 .../command/mutation/DeleteExecution.scala      | 14 +++++++------
 8 files changed, 81 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java 
b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
index d0a204c..4a8d2e8 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
@@ -81,10 +81,10 @@ public class CarbonUpdateUtil {
   /**
    * Returns block path from tuple id
    */
-  public static String getTableBlockPath(String tid, String tablePath, boolean 
isPartitionTable) {
+  public static String getTableBlockPath(String tid, String tablePath, boolean 
isStandardTable) {
     String partField = getRequiredFieldFromTID(tid, TupleIdEnum.PART_ID);
     // If it has segment file then partfield can be appended directly to table 
path
-    if (isPartitionTable) {
+    if (!isStandardTable) {
       return tablePath + CarbonCommonConstants.FILE_SEPARATOR + 
partField.replace("#", "/");
     }
     String part = CarbonTablePath.addPartPrefix(partField);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 180ca4d..910ae3e 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -344,11 +344,12 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
             queryModel.getProjectionDimensions(), tableBlockDimensions,
             segmentProperties.getComplexDimensions(), 
queryModel.getProjectionMeasures().size(),
             queryModel.getTable().getTableInfo().isTransactionalTable());
+    boolean isStandardTable = 
CarbonUtil.isStandardCarbonTable(queryModel.getTable());
     String blockId = CarbonUtil
         .getBlockId(queryModel.getAbsoluteTableIdentifier(), filePath, 
segment.getSegmentNo(),
             queryModel.getTable().getTableInfo().isTransactionalTable(),
-            queryModel.getTable().isHivePartitionTable());
-    if (queryModel.getTable().isHivePartitionTable()) {
+            isStandardTable);
+    if (!isStandardTable) {
       
blockExecutionInfo.setBlockId(CarbonTablePath.getShortBlockIdForPartitionTable(blockId));
     } else {
       blockExecutionInfo.setBlockId(CarbonTablePath.getShortBlockId(blockId));

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
 
b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
index 55381fb..0c2098a 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
@@ -69,21 +69,22 @@ public class SegmentUpdateStatusManager {
   private LoadMetadataDetails[] segmentDetails;
   private SegmentUpdateDetails[] updateDetails;
   private Map<String, SegmentUpdateDetails> blockAndDetailsMap;
-  private boolean isPartitionTable;
+  private boolean isStandardTable;
 
   public SegmentUpdateStatusManager(CarbonTable table,
       LoadMetadataDetails[] segmentDetails) {
     this.identifier = table.getAbsoluteTableIdentifier();
+    this.isStandardTable = CarbonUtil.isStandardCarbonTable(table);
     // current it is used only for read function scenarios, as file update 
always requires to work
     // on latest file status.
     this.segmentDetails = segmentDetails;
     updateDetails = readLoadMetadata();
-    isPartitionTable = table.isHivePartitionTable();
     populateMap();
   }
 
   public SegmentUpdateStatusManager(CarbonTable table) {
     this.identifier = table.getAbsoluteTableIdentifier();
+    this.isStandardTable = CarbonUtil.isStandardCarbonTable(table);
     // current it is used only for read function scenarios, as file update 
always requires to work
     // on latest file status.
     if (!table.getTableInfo().isTransactionalTable()) {
@@ -98,7 +99,6 @@ public class SegmentUpdateStatusManager {
     } else {
       updateDetails = new SegmentUpdateDetails[0];
     }
-    isPartitionTable = table.isHivePartitionTable();
     populateMap();
   }
 
@@ -250,9 +250,9 @@ public class SegmentUpdateStatusManager {
    */
   public String[] getDeleteDeltaFilePath(String blockFilePath, String 
segmentId) throws Exception {
     String blockId =
-        CarbonUtil.getBlockId(identifier, blockFilePath, segmentId, true, 
isPartitionTable);
+        CarbonUtil.getBlockId(identifier, blockFilePath, segmentId, true, 
isStandardTable);
     String tupleId;
-    if (isPartitionTable) {
+    if (!isStandardTable) {
       tupleId = CarbonTablePath.getShortBlockIdForPartitionTable(blockId);
     } else {
       tupleId = CarbonTablePath.getShortBlockId(blockId);
@@ -272,7 +272,7 @@ public class SegmentUpdateStatusManager {
             + CarbonCommonConstants.FACT_FILE_EXT);
 
     String blockPath;
-    if (isPartitionTable) {
+    if (!isStandardTable) {
       blockPath = identifier.getTablePath() + 
CarbonCommonConstants.FILE_SEPARATOR
           + CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, 
TupleIdEnum.PART_ID)
           .replace("#", "/") + CarbonCommonConstants.FILE_SEPARATOR + 
completeBlockName;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 642fe8e..3eb6aae 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -2906,17 +2906,17 @@ public final class CarbonUtil {
    * @param identifier
    * @param filePath
    * @param segmentId
-   * @param isTransactionalTable
+   * @param isStandardTable
    * @return
    */
   public static String getBlockId(AbsoluteTableIdentifier identifier, String 
filePath,
-      String segmentId, boolean isTransactionalTable, boolean 
isPartitionTable) {
+      String segmentId, boolean isTransactionalTable, boolean isStandardTable) 
{
     String blockId;
     String blockName = filePath.substring(filePath.lastIndexOf("/") + 1, 
filePath.length());
     String tablePath = identifier.getTablePath();
 
     if (filePath.startsWith(tablePath)) {
-      if (!isTransactionalTable || !isPartitionTable) {
+      if (!isTransactionalTable || isStandardTable) {
         blockId = "Part0" + CarbonCommonConstants.FILE_SEPARATOR + "Segment_" 
+ segmentId
             + CarbonCommonConstants.FILE_SEPARATOR + blockName;
       } else {
@@ -3235,4 +3235,17 @@ public final class CarbonUtil {
     int version = fileHeader.getVersion();
     return ColumnarFormatVersion.valueOf((short)version);
   }
+
+  /**
+   * Check whether it is standard table means tablepath has 
Fact/Part0/Segment_ tail present with
+   * all carbon files. In other cases carbon files present directly under 
tablepath or
+   * tablepath/partition folder
+   * TODO Read segment file and corresponding index file to get the correct 
carbondata file instead
+   * of using this way.
+   * @param table
+   * @return
+   */
+  public static boolean isStandardCarbonTable(CarbonTable table) {
+    return !(table.isSupportFlatFolder() || table.isHivePartitionTable());
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
index d786d10..9a60978 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
@@ -106,6 +106,27 @@ class FlatFolderTableLoadingTestCase extends QueryTest 
with BeforeAndAfterAll {
 
   }
 
+  test("merge index flat folder issue") {
+    sql("drop table if exists t1")
+    sql("create table t1(c1 int,c2 string,c3 float,c4 date) stored by 
'carbondata' TBLPROPERTIES('flat_folder'='true')")
+    sql("insert into t1 select 1,'a',1001,'1999-01-02'")
+    sql("insert into t1 select 2,'b',20.01,'1998-01-02'")
+    sql("insert into t1 select 3,'c',30.01,'1997-01-02'")
+    sql("insert into t1 select 4,'d',40.01,'1996-01-02'")
+    sql("insert into t1 select 5,'d',40.01,'1996-01-02'")
+    sql("delete from table t1 where segment.id in(1)")
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", 
"t1")
+    
assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length
 == 5)
+    sql("clean files for table t1")
+    
assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length
 == 4)
+    sql("Alter table t1 compact 'minor'")
+    sql("show segments for table t1").show()
+    
assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length
 == 5)
+    sql("clean files for table t1")
+    
assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length
 == 1)
+    sql("drop table if exists t1")
+  }
+
   override def afterAll = {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 8280693..39582f0 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -24,6 +24,7 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.apache.spark.sql.{CarbonEnv, Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
+import org.apache.carbondata.core.datamap.Segment
 import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, 
CarbonFileFilter}
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.mutate.CarbonUpdateUtil
@@ -246,15 +247,15 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     val dataframe = sql("select getTupleId() as tupleId from 
iud_db.dest_tuple")
     val listOfTupleId = dataframe.collect().map(df => 
df.get(0).toString).sorted
     assert(
-      listOfTupleId(0).startsWith("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(0).endsWith("/0/0/0"))
+      listOfTupleId(0).contains("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(0).endsWith("/0/0/0"))
     assert(
-      listOfTupleId(1).startsWith("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(1).endsWith("/0/0/1"))
+      listOfTupleId(1).contains("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(1).endsWith("/0/0/1"))
     assert(
-      listOfTupleId(2).startsWith("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(2).endsWith("/0/0/2"))
+      listOfTupleId(2).contains("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(2).endsWith("/0/0/2"))
     assert(
-      listOfTupleId(3).startsWith("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(3).endsWith("/0/0/3"))
+      listOfTupleId(3).contains("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(3).endsWith("/0/0/3"))
     assert(
-      listOfTupleId(4).startsWith("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(4).endsWith("/0/0/4"))
+      listOfTupleId(4).contains("0/0/0-0_batchno0-0-0-") && 
listOfTupleId(4).endsWith("/0/0/4"))
 
     val carbonTable_part = 
CarbonEnv.getInstance(Spark2TestQueryExecutor.spark).carbonMetastore
       .lookupRelation(Option("iud_db"), 
"dest_tuple_part")(Spark2TestQueryExecutor.spark)
@@ -270,7 +271,7 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
       carbonDataFilename(0).getAbsolutePath,
       "0",
       carbonTable.isTransactionalTable,
-      carbonTable.isHivePartitionTable)
+      CarbonUtil.isStandardCarbonTable(carbonTable))
 
     assert(blockId.startsWith("Part0/Segment_0/part-0-0_batchno0-0-0-"))
     val carbonDataFilename_part = new File(carbonTable_part.getTablePath + 
"/c3=aa").listFiles()
@@ -279,17 +280,18 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
       carbonDataFilename_part(0).getAbsolutePath,
       "0",
       carbonTable.isTransactionalTable,
-      carbonTable.isHivePartitionTable)
+      CarbonUtil.isStandardCarbonTable(carbonTable))
     
assert(blockId_part.startsWith("Part0/Segment_0/part-0-100100000100001_batchno0-0-0-"))
-
+    val segment = Segment.getSegment("0", carbonTable.getTablePath)
     val tableBlockPath = CarbonUpdateUtil
       .getTableBlockPath(listOfTupleId(0),
         carbonTable.getTablePath,
-        carbonTable.isHivePartitionTable)
+        CarbonUtil.isStandardCarbonTable(carbonTable))
+    val segment_part = Segment.getSegment("0", carbonTable_part.getTablePath)
     val tableBl0ckPath_part = CarbonUpdateUtil
       .getTableBlockPath(listOfTupleId_part(0),
         carbonTable_part.getTablePath,
-        carbonTable_part.isHivePartitionTable)
+        CarbonUtil.isStandardCarbonTable(carbonTable_part))
     assert(tableBl0ckPath_part.endsWith("iud_db.db/dest_tuple_part/c3=aa"))
     
assert(tableBlockPath.endsWith("iud_db.db/dest_tuple/Fact/Part0/Segment_0"))
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index 2cb2717..63a7b7b 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -759,6 +759,19 @@ class UpdateCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
     sql("drop table if exists test_dm_index")
   }
 
+  test("flat folder carbon table without alias in set columns with mulitple 
loads") {
+    sql("""drop table if exists iud.dest33_flat""")
+    sql("""create table iud.dest33_part (c1 string,c2 int,c5 string, c3 
string) STORED BY 'org.apache.carbondata.format' 
TBLPROPERTIES('flat_folder'='true')""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table 
iud.dest33_part""")
+    sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/IUD/dest.csv' INTO table 
iud.dest33_part""")
+    sql("""update iud.dest33_part d set (c3,c5 ) = (select s.c33 ,s.c55  from 
iud.source2 s where d.c1 = s.c11) where d.c1 = 'a'""").show()
+    checkAnswer(
+      sql("""select c3,c5 from iud.dest33_part where c1='a'"""),
+      Seq(Row("MGM","Disco"),Row("MGM","Disco"))
+    )
+    sql("""drop table if exists iud.dest33_part""")
+  }
+
   override def afterAll {
     sql("use default")
     sql("drop database  if exists iud cascade")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78a7371c/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
index df3b961..8633243 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
@@ -41,6 +41,7 @@ import 
org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.mutate.{CarbonUpdateUtil, 
DeleteDeltaBlockDetails, SegmentUpdateDetails, TupleIdEnum}
 import org.apache.carbondata.core.mutate.data.RowCountDetailsVO
 import org.apache.carbondata.core.statusmanager.{SegmentStatus, 
SegmentStatusManager, SegmentUpdateStatusManager}
+import org.apache.carbondata.core.util.CarbonUtil
 import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.core.writer.CarbonDeleteDeltaWriterImpl
 import org.apache.carbondata.hadoop.api.{CarbonInputFormat, 
CarbonTableInputFormat}
@@ -111,7 +112,7 @@ object DeleteExecution {
 
     val metadataDetails = SegmentStatusManager.readTableStatusFile(
       CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath))
-
+    val isStandardTable = CarbonUtil.isStandardCarbonTable(carbonTable)
     val rowContRdd =
       sparkSession.sparkContext.parallelize(
         blockMappingVO.getCompleteBlockRowDetailVO.asScala.toSeq,
@@ -126,15 +127,15 @@ object DeleteExecution {
           while (records.hasNext) {
             val ((key), (rowCountDetailsVO, groupedRows)) = records.next
             val segmentId = key.substring(0, 
key.indexOf(CarbonCommonConstants.FILE_SEPARATOR))
-            val segmentFile =
-              
metadataDetails.find(_.getLoadName.equals(segmentId)).get.getSegmentFile
+            val loadDetail =
+              metadataDetails.find(_.getLoadName.equals(segmentId)).get
             result = result ++
                      deleteDeltaFunc(index,
                        key,
                        groupedRows.toIterator,
                        timestamp,
                        rowCountDetailsVO,
-                       carbonTable.isHivePartitionTable)
+                       isStandardTable)
           }
           result
         }
@@ -222,7 +223,7 @@ object DeleteExecution {
         iter: Iterator[Row],
         timestamp: String,
         rowCountDetailsVO: RowCountDetailsVO,
-        isPartitionTable: Boolean
+        isStandardTable: Boolean
     ): Iterator[(SegmentStatus, (SegmentUpdateDetails, ExecutionErrors))] = {
 
       val result = new DeleteDelataResultImpl()
@@ -258,7 +259,8 @@ object DeleteExecution {
             countOfRows = countOfRows + 1
           }
 
-          val blockPath = CarbonUpdateUtil.getTableBlockPath(TID, tablePath, 
isPartitionTable)
+          val blockPath =
+            CarbonUpdateUtil.getTableBlockPath(TID, tablePath, isStandardTable)
           val completeBlockName = CarbonTablePath
             .addDataPartPrefix(CarbonUpdateUtil.getRequiredFieldFromTID(TID, 
TupleIdEnum.BLOCK_ID) +
                                CarbonCommonConstants.FACT_FILE_EXT)

Reply via email to