This is an automated email from the ASF dual-hosted git repository.

shaofengshi pushed a commit to branch 2.6.x
in repository https://gitbox.apache.org/repos/asf/kylin.git


The following commit(s) were added to refs/heads/2.6.x by this push:
     new ee91a28  KYLIN-4099: Using no blocking RDD unpersist in spark cubing 
job
ee91a28 is described below

commit ee91a28fec89a3cb18638f087393e3864982b3bf
Author: Liu Shaohui <liushao...@xiaomi.com>
AuthorDate: Fri Jul 19 12:49:10 2019 +0800

    KYLIN-4099: Using no blocking RDD unpersist in spark cubing job
---
 .../main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
 
b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
index f3b0a13..cb1a33f 100644
--- 
a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
+++ 
b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
@@ -197,13 +197,13 @@ public class SparkCubingByLayer extends 
AbstractApplication implements Serializa
 
             allRDDs[level] = allRDDs[level - 
1].flatMapToPair(flatMapFunction).reduceByKey(reducerFunction2, partition)
                     .persist(storageLevel);
-            allRDDs[level - 1].unpersist();
+            allRDDs[level - 1].unpersist(false);
             if (envConfig.isSparkSanityCheckEnabled() == true) {
                 sanityCheck(allRDDs[level], totalCount, level, 
cubeStatsReader, countMeasureIndex);
             }
             saveToHDFS(allRDDs[level], metaUrl, cubeName, cubeSegment, 
outputPath, level, job, envConfig);
         }
-        allRDDs[totalLevels].unpersist();
+        allRDDs[totalLevels].unpersist(false);
         logger.info("Finished on calculating all level cuboids.");
         logger.info("HDFS: Number of bytes written=" + 
jobListener.metrics.getBytesWritten());
         //HadoopUtil.deleteHDFSMeta(metaUrl);

Reply via email to