This is an automated email from the ASF dual-hosted git repository.

shaofengshi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git


The following commit(s) were added to refs/heads/master by this push:
     new e4d3d12  KYLIN-4099: Using no blocking RDD unpersist in spark cubing 
job
e4d3d12 is described below

commit e4d3d12911ce95da2c0d95e3d39c947e6e90a7d8
Author: Liu Shaohui <liushao...@xiaomi.com>
AuthorDate: Fri Jul 19 12:49:10 2019 +0800

    KYLIN-4099: Using no blocking RDD unpersist in spark cubing job
---
 .../main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
 
b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
index 232a1c7..33f3e51 100644
--- 
a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
+++ 
b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkCubingByLayer.java
@@ -197,13 +197,13 @@ public class SparkCubingByLayer extends 
AbstractApplication implements Serializa
 
             allRDDs[level] = allRDDs[level - 
1].flatMapToPair(flatMapFunction).reduceByKey(reducerFunction2, partition)
                     .persist(storageLevel);
-            allRDDs[level - 1].unpersist();
+            allRDDs[level - 1].unpersist(false);
             if (envConfig.isSparkSanityCheckEnabled() == true) {
                 sanityCheck(allRDDs[level], totalCount, level, 
cubeStatsReader, countMeasureIndex);
             }
             saveToHDFS(allRDDs[level], metaUrl, cubeName, cubeSegment, 
outputPath, level, job, envConfig);
         }
-        allRDDs[totalLevels].unpersist();
+        allRDDs[totalLevels].unpersist(false);
         logger.info("Finished on calculating all level cuboids.");
         logger.info("HDFS: Number of bytes written=" + 
jobListener.metrics.getBytesWritten());
         //HadoopUtil.deleteHDFSMeta(metaUrl);

Reply via email to