Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 5a97fc795 -> ab9c6c083


fix issue carbondata-339


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/64d4d6da
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/64d4d6da
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/64d4d6da

Branch: refs/heads/master
Commit: 64d4d6daaf6e8adede6cfffe94221d20f365631c
Parents: 5a97fc7
Author: hseagle <hsxup...@gmail.com>
Authored: Thu Oct 27 10:55:53 2016 +0800
Committer: hseagle <hsxup...@gmail.com>
Committed: Thu Oct 27 10:55:53 2016 +0800

----------------------------------------------------------------------
 .../carbondata/spark/util/GlobalDictionaryUtil.scala    | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/64d4d6da/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index 09c32c8..a1f4400 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -742,13 +742,13 @@ object GlobalDictionaryUtil extends Logging {
    */
   def generateGlobalDictionary(sqlContext: SQLContext,
                                carbonLoadModel: CarbonLoadModel,
-                               hdfsLocation: String,
+                               storePath: String,
                                dataFrame: Option[DataFrame] = None): Unit = {
     try {
       val table = 
carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.getAbsoluteTableIdentifier
         .getCarbonTableIdentifier
       // create dictionary folder if not exists
-      val carbonTablePath = CarbonStorePath.getCarbonTablePath(hdfsLocation, 
table)
+      val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, 
table)
       val dictfolderPath = carbonTablePath.getMetadataDirectoryPath
       // columns which need to generate global dictionary file
       val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
@@ -777,7 +777,7 @@ object GlobalDictionaryUtil extends Logging {
         if (colDictFilePath != null) {
           // generate predefined dictionary
           generatePredefinedColDictionary(colDictFilePath, table,
-            dimensions, carbonLoadModel, sqlContext, hdfsLocation, 
dictfolderPath)
+            dimensions, carbonLoadModel, sqlContext, storePath, dictfolderPath)
         }
         if (headers.length > df.columns.length) {
           val msg = "The number of columns in the file header do not match the 
number of " +
@@ -792,7 +792,7 @@ object GlobalDictionaryUtil extends Logging {
           // select column to push down pruning
           df = df.select(requireColumnNames.head, requireColumnNames.tail: _*)
           val model = createDictionaryLoadModel(carbonLoadModel, table, 
requireDimension,
-            hdfsLocation, dictfolderPath, false)
+            storePath, dictfolderPath, false)
           // combine distinct value in a block and partition by column
           val inputRDD = new CarbonBlockDistinctValuesCombineRDD(df.rdd, model)
             .partitionBy(new ColumnPartitioner(model.primDimensions.length))
@@ -815,7 +815,7 @@ object GlobalDictionaryUtil extends Logging {
               dimDataframe = dimDataframe.select(requireColumnNamesForDim.head,
                 requireColumnNamesForDim.tail: _*)
               val modelforDim = createDictionaryLoadModel(carbonLoadModel, 
table,
-                requireDimensionForDim, hdfsLocation, dictfolderPath, false)
+                requireDimensionForDim, storePath, dictfolderPath, false)
               val inputRDDforDim = new CarbonBlockDistinctValuesCombineRDD(
                 dimDataframe.rdd, modelforDim)
                 .partitionBy(new 
ColumnPartitioner(modelforDim.primDimensions.length))
@@ -842,7 +842,7 @@ object GlobalDictionaryUtil extends Logging {
             pruneDimensions(dimensions, headers, headers)
           if (requireDimension.nonEmpty) {
             val model = createDictionaryLoadModel(carbonLoadModel, table, 
requireDimension,
-              hdfsLocation, dictfolderPath, false)
+              storePath, dictfolderPath, false)
             // check if dictionary files contains bad record
             val accumulator = sqlContext.sparkContext.accumulator(0)
             // read local dictionary file, and group by key

Reply via email to