Github user sounakr commented on a diff in the pull request:

    
https://github.com/apache/incubator-carbondata/pull/604#discussion_r104346516
  
    --- Diff: 
integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
 ---
    @@ -235,121 +238,141 @@ class CarbonMergerRDD[K, V](
         iter
       }
     
    -  override def getPreferredLocations(split: Partition): Seq[String] = {
    -    val theSplit = split.asInstanceOf[CarbonSparkPartition]
    -    theSplit.split.value.getLocations.filter(_ != "localhost")
    -  }
     
       override def getPartitions: Array[Partition] = {
         val startTime = System.currentTimeMillis()
         val absoluteTableIdentifier: AbsoluteTableIdentifier = new 
AbsoluteTableIdentifier(
           hdfsStoreLocation, new CarbonTableIdentifier(databaseName, 
factTableName, tableId)
         )
    -    val updateStatusManger: SegmentUpdateStatusManager = new 
SegmentUpdateStatusManager(
    +    val updateStatusManager: SegmentUpdateStatusManager = new 
SegmentUpdateStatusManager(
           absoluteTableIdentifier)
         val jobConf: JobConf = new JobConf(new Configuration)
         val job: Job = new Job(jobConf)
         val format = 
CarbonInputFormatUtil.createCarbonInputFormat(absoluteTableIdentifier, job)
         var defaultParallelism = sparkContext.defaultParallelism
         val result = new java.util.ArrayList[Partition](defaultParallelism)
    +    var partitionNo = 0
    +    var columnSize = 0
    +    var noOfBlocks = 0
     
         // mapping of the node and block list.
         var nodeBlockMapping: java.util.Map[String, 
java.util.List[Distributable]] = new
    -            java.util.HashMap[String, java.util.List[Distributable]]
    +        java.util.HashMap[String, java.util.List[Distributable]]
     
    -    var noOfBlocks = 0
         val taskInfoList = new java.util.ArrayList[Distributable]
         var carbonInputSplits = mutable.Seq[CarbonInputSplit]()
     
    -    var blocksOfLastSegment: List[TableBlockInfo] = null
    +    var splitsOfLastSegment: List[CarbonInputSplit] = null
    +    // map for keeping the relation of a task and its blocks.
    +    val taskIdMapping: java.util.Map[String, 
java.util.List[CarbonInputSplit]] = new
    +        java.util.HashMap[String, java.util.List[CarbonInputSplit]]
     
         // for each valid segment.
         for (eachSeg <- carbonMergerMapping.validSegments) {
    -      // map for keeping the relation of a task and its blocks.
    -      val taskIdMapping: java.util.Map[String, 
java.util.List[TableBlockInfo]] = new
    -            java.util.HashMap[String, java.util.List[TableBlockInfo]]
     
           // map for keeping the relation of a task and its blocks.
           job.getConfiguration.set(CarbonInputFormat.INPUT_SEGMENT_NUMBERS, 
eachSeg)
     
    +      val updateDetails: UpdateVO = 
updateStatusManager.getInvalidTimestampRange(eachSeg)
    +
           // get splits
           val splits = format.getSplits(job)
    -      carbonInputSplits ++:= 
splits.asScala.map(_.asInstanceOf[CarbonInputSplit])
    -
    -      val updateDetails: UpdateVO = 
updateStatusManger.getInvalidTimestampRange(eachSeg)
    -
    -      // take the blocks of one segment.
    -      val blocksOfOneSegment = carbonInputSplits.map(inputSplit =>
    -        new TableBlockInfo(inputSplit.getPath.toString,
    -          inputSplit.getStart, inputSplit.getSegmentId,
    -          inputSplit.getLocations, inputSplit.getLength, 
inputSplit.getVersion
    -        )
    -      )
    -        .filter(blockInfo => !CarbonUtil
    -          .isInvalidTableBlock(blockInfo, updateDetails, 
updateStatusManger))
     
           // keep on assigning till last one is reached.
    -      if (null != blocksOfOneSegment && blocksOfOneSegment.size > 0) {
    -        blocksOfLastSegment = blocksOfOneSegment.asJava
    +      if (null != splits && splits.size > 0) {
    +        splitsOfLastSegment = 
splits.asScala.map(_.asInstanceOf[CarbonInputSplit]).toList.asJava
           }
     
    -      // populate the task and its block mapping.
    -      blocksOfOneSegment.foreach(f = tableBlockInfo => {
    -        val taskNo = 
CarbonTablePath.DataFileUtil.getTaskNo(tableBlockInfo.getFilePath)
    -        val blockList = taskIdMapping.get(taskNo)
    -        if (null == blockList) {
    -          val blockListTemp = new java.util.ArrayList[TableBlockInfo]()
    -          blockListTemp.add(tableBlockInfo)
    -          taskIdMapping.put(taskNo, blockListTemp)
    -        }
    -        else {
    -          blockList.add(tableBlockInfo)
    -        }
    +      carbonInputSplits ++:= 
splits.asScala.map(_.asInstanceOf[CarbonInputSplit]).filter(entry => {
    +        val blockInfo = new TableBlockInfo(entry.getPath.toString,
    +          entry.getStart, entry.getSegmentId,
    +          entry.getLocations, entry.getLength, entry.getVersion
    +        )
    +        !CarbonUtil
    +          .isInvalidTableBlock(blockInfo, updateDetails, 
updateStatusManager)
           })
    -
    -      noOfBlocks += blocksOfOneSegment.size
    -      taskIdMapping.asScala.foreach(
    -        entry =>
    -          taskInfoList.add(new TableTaskInfo(entry._1, 
entry._2).asInstanceOf[Distributable])
    -      )
    -
         }
     
         // prepare the details required to extract the segment properties 
using last segment.
    -    if (null != carbonInputSplits && carbonInputSplits.nonEmpty) {
    -      // taking head as scala sequence is use and while adding it will add 
at first
    -      // so as we need to update the update the key of older segments with 
latest keygenerator
    -      // we need to take the top of the split
    -      val carbonInputSplit = carbonInputSplits.head
    +    if (null != splitsOfLastSegment && splitsOfLastSegment.size() > 0) {
    +      val carbonInputSplit = splitsOfLastSegment.get(0)
           var dataFileFooter: DataFileFooter = null
     
           try {
             dataFileFooter = CarbonUtil.readMetadatFile(
    -            CarbonInputSplit.getTableBlockInfo(carbonInputSplit))
    +          CarbonInputSplit.getTableBlockInfo(carbonInputSplit))
           } catch {
             case e: IOException =>
               logError("Exception in preparing the data file footer for 
compaction " + e.getMessage)
               throw e
           }
     
    -      carbonMergerMapping.maxSegmentColCardinality = 
dataFileFooter.getSegmentInfo
    -        .getColumnCardinality
    +      columnSize = dataFileFooter.getSegmentInfo.getColumnCardinality.size
           carbonMergerMapping.maxSegmentColumnSchemaList = 
dataFileFooter.getColumnInTable.asScala
             .toList
         }
     
    -    // val blocks = 
carbonInputSplits.map(_.asInstanceOf[Distributable]).asJava
    -    // send complete list of blocks to the mapping util.
    -    nodeBlockMapping = CarbonLoaderUtil.nodeBlockMapping(taskInfoList, -1)
    +    var cardinality = new Array[Int](columnSize)
    +
    +    carbonInputSplits.foreach(splits => {
    +      val taskNo = splits.taskId
    +      var dataFileFooter: DataFileFooter = null
    +
    +      val splitList = taskIdMapping.get(taskNo)
    +      noOfBlocks += 1
    +      if (null == splitList) {
    +        val splitTempList = new util.ArrayList[CarbonInputSplit]()
    +        splitTempList.add(splits)
    +        taskIdMapping.put(taskNo, splitTempList)
    +      } else {
    +        splitList.add(splits)
    +      }
    +
    +      // Check the cardinality of each columns and set the highest.
    +      try {
    +        dataFileFooter = CarbonUtil.readMetadatFile(
    +          CarbonInputSplit.getTableBlockInfo(splits))
    +      } catch {
    +        case e: IOException =>
    +          logError("Exception in preparing the data file footer for 
compaction " + e.getMessage)
    +          throw e
    +      }
    +
    +      // Calculate the Cardinality of the new segment
    +      var targetCardinality = 
dataFileFooter.getSegmentInfo.getColumnCardinality
    --- End diff --
    
    Done. Moved the logic of cardinality calculation to a function.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to