manishnalla1994 commented on a change in pull request #3182: [CARBONDATA-3343] 
Compaction for Range Sort
URL: https://github.com/apache/carbondata/pull/3182#discussion_r280766094
 
 

 ##########
 File path: 
integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
 ##########
 @@ -589,9 +601,91 @@ class CarbonMergerRDD[K, V](
     // Change string type to support all types
     val sampleRdd = scanRdd
       .map(row => (row.get(0, sparkDataType), null))
+    val sortedRdd = sampleRdd.sortBy(_._1, true)(objectOrdering, 
classTag[AnyRef])
     val value = new DataSkewRangePartitioner(
-      defaultParallelism, sampleRdd, true)(objectOrdering, classTag[Object])
-    CarbonCompactionUtil.getRangesFromVals(value.rangeBounds, value.minMaxVals)
+      defaultParallelism, sortedRdd, true)(objectOrdering, classTag[Object])
+    if(minVal == null && maxVal == null) {
+      CarbonCompactionUtil
+        .getRangesFromVals(value.rangeBounds, value.rangeBounds(0), 
value.rangeBounds(1))
+    } else {
+      CarbonCompactionUtil.getRangesFromVals(value.rangeBounds, minVal, maxVal)
+    }
+  }
+
+  def getOverallMinMax(carbonInputSplits: mutable.Seq[CarbonInputSplit],
 
 Review comment:
   I removed this method anyway as I found a different way of handling ranges 
min/max without reading the footer as reading footer would be more costly 
operation. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to