This is an automated email from the ASF dual-hosted git repository.

shaofengshi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git


The following commit(s) were added to refs/heads/master by this push:
     new 926515b  KYLIN-4320 number of replicas of Cuboid files cannot be 
configured for Spark engine
926515b is described below

commit 926515bfc217167fe570c0cf21a39f54e5b5d1ff
Author: yaqian.zhang <598593...@qq.com>
AuthorDate: Tue Apr 21 11:57:00 2020 +0800

    KYLIN-4320 number of replicas of Cuboid files cannot be configured for 
Spark engine
---
 core-common/src/main/resources/kylin-defaults.properties                | 2 +-
 engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkUtil.java | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/core-common/src/main/resources/kylin-defaults.properties 
b/core-common/src/main/resources/kylin-defaults.properties
index 1dee90f..dc49ac4 100644
--- a/core-common/src/main/resources/kylin-defaults.properties
+++ b/core-common/src/main/resources/kylin-defaults.properties
@@ -331,7 +331,7 @@ kylin.engine.spark-conf.spark.eventLog.enabled=true
 kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
 
kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
 kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
-
+kylin.engine.spark-conf.spark.hadoop.dfs.replication=2
 ### Spark conf for specific job
 kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
 kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
diff --git 
a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkUtil.java 
b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkUtil.java
index fcd24f1..21c423f 100644
--- a/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkUtil.java
+++ b/engine-spark/src/main/java/org/apache/kylin/engine/spark/SparkUtil.java
@@ -138,7 +138,6 @@ public class SparkUtil {
     }
 
     public static void modifySparkHadoopConfiguration(SparkContext sc) throws 
Exception {
-        sc.hadoopConfiguration().set("dfs.replication", "2"); // cuboid 
intermediate files, replication=2
         
sc.hadoopConfiguration().set("mapreduce.output.fileoutputformat.compress", 
"true");
         
sc.hadoopConfiguration().set("mapreduce.output.fileoutputformat.compress.type", 
"BLOCK");
         
sc.hadoopConfiguration().set("mapreduce.output.fileoutputformat.compress.codec",
 "org.apache.hadoop.io.compress.DefaultCodec"); // or 
org.apache.hadoop.io.compress.SnappyCodec

Reply via email to