[ https://issues.apache.org/jira/browse/SPARK-26405?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16728122#comment-16728122 ]
Hyukjin Kwon commented on SPARK-26405: -------------------------------------- BTW, please don't report a JIRA with the title like OOM. I had no idea of what's the JIRA about. > OOM > --- > > Key: SPARK-26405 > URL: https://issues.apache.org/jira/browse/SPARK-26405 > Project: Spark > Issue Type: Bug > Components: Java API, Scheduler, Shuffle, Spark Core, Spark Submit > Affects Versions: 2.2.0 > Reporter: lu > Priority: Major > > Heap memory overflow occurred in the user portrait analysis, and the data > volume analyzed was about 10 million records > spark work memory:4G > using RestSubmissionClient to submit the job > boht the driver memory and executor memory :4g > total executor cores: 6 > spark cores:2 > the cluster size:3 > > INFO worker.WorkerWatcher: Connecting to worker > spark://Worker@192.168.44.181:45315 > Exception in thread "broadcast-exchange-3" java.lang.OutOfMemoryError: Not > enough memory to build and broadcast the table to all worker nodes. As a > workaround, you can either disable broadcast by setting > spark.sql.autoBroadcastJoinThreshold to -1 or increase the spark driver > memory by setting spark.driver.memory to a higher value > at > org.apache.spark.sql.execution.exchange.BroadcastExchangeExec$$anonfun$relationFuture$1$$anonfun$apply$1.apply(BroadcastExchangeExec.scala:102) > at > org.apache.spark.sql.execution.exchange.BroadcastExchangeExec$$anonfun$relationFuture$1$$anonfun$apply$1.apply(BroadcastExchangeExec.scala:73) > at > org.apache.spark.sql.execution.SQLExecution$.withExecutionId(SQLExecution.scala:103) > at > org.apache.spark.sql.execution.exchange.BroadcastExchangeExec$$anonfun$relationFuture$1.apply(BroadcastExchangeExec.scala:72) > at > org.apache.spark.sql.execution.exchange.BroadcastExchangeExec$$anonfun$relationFuture$1.apply(BroadcastExchangeExec.scala:72) > at > scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) > at > scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > Exception in thread "main" java.lang.reflect.InvocationTargetException > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at org.apache.spark.deploy.worker.DriverWrapper$.main(DriverWrapper.scala:58) > at org.apache.spark.deploy.worker.DriverWrapper.main(DriverWrapper.scala) > Caused by: java.util.concurrent.TimeoutException: Futures timed out after > [300 seconds] > at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:219) > at scala.concurrent.impl.Promise$DefaultPromise.result(Promise.scala:223) > at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:201) > at > org.apache.spark.sql.execution.exchange.BroadcastExchangeExec.doExecuteBroadcast(BroadcastExchangeExec.scala:123) > at > org.apache.spark.sql.execution.InputAdapter.doExecuteBroadcast(WholeStageCodegenExec.scala:248) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeBroadcast$1.apply(SparkPlan.scala:127) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeBroadcast$1.apply(SparkPlan.scala:127) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.SparkPlan.executeBroadcast(SparkPlan.scala:126) > at > org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.prepareBroadcast(BroadcastHashJoinExec.scala:98) > at > org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.codegenInner(BroadcastHashJoinExec.scala:197) > at > org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.doConsume(BroadcastHashJoinExec.scala:82) > at > org.apache.spark.sql.execution.CodegenSupport$class.consume(WholeStageCodegenExec.scala:155) > at > org.apache.spark.sql.execution.ProjectExec.consume(basicPhysicalOperators.scala:36) > at > org.apache.spark.sql.execution.ProjectExec.doConsume(basicPhysicalOperators.scala:68) > at > org.apache.spark.sql.execution.CodegenSupport$class.consume(WholeStageCodegenExec.scala:155) > at > org.apache.spark.sql.execution.FilterExec.consume(basicPhysicalOperators.scala:88) > at > org.apache.spark.sql.execution.FilterExec.doConsume(basicPhysicalOperators.scala:209) > at > org.apache.spark.sql.execution.CodegenSupport$class.consume(WholeStageCodegenExec.scala:155) > at > org.apache.spark.sql.execution.SerializeFromObjectExec.consume(objects.scala:107) > at > org.apache.spark.sql.execution.SerializeFromObjectExec.doConsume(objects.scala:129) > at > org.apache.spark.sql.execution.CodegenSupport$class.consume(WholeStageCodegenExec.scala:155) > at > org.apache.spark.sql.execution.InputAdapter.consume(WholeStageCodegenExec.scala:235) > at > org.apache.spark.sql.execution.InputAdapter.doProduce(WholeStageCodegenExec.scala:263) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.InputAdapter.produce(WholeStageCodegenExec.scala:235) > at > org.apache.spark.sql.execution.SerializeFromObjectExec.doProduce(objects.scala:120) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SerializeFromObjectExec.produce(objects.scala:107) > at > org.apache.spark.sql.execution.FilterExec.doProduce(basicPhysicalOperators.scala:128) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.FilterExec.produce(basicPhysicalOperators.scala:88) > at > org.apache.spark.sql.execution.ProjectExec.doProduce(basicPhysicalOperators.scala:46) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.ProjectExec.produce(basicPhysicalOperators.scala:36) > at > org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.doProduce(BroadcastHashJoinExec.scala:77) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.produce(BroadcastHashJoinExec.scala:38) > at > org.apache.spark.sql.execution.ProjectExec.doProduce(basicPhysicalOperators.scala:46) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:80) > at > org.apache.spark.sql.execution.ProjectExec.produce(basicPhysicalOperators.scala:36) > at > org.apache.spark.sql.execution.WholeStageCodegenExec.doCodeGen(WholeStageCodegenExec.scala:331) > at > org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:372) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:116) > at > org.apache.spark.sql.execution.columnar.InMemoryRelation.buildBuffers(InMemoryRelation.scala:91) > at > org.apache.spark.sql.execution.columnar.InMemoryRelation.<init>(InMemoryRelation.scala:86) > at > org.apache.spark.sql.execution.columnar.InMemoryRelation$.apply(InMemoryRelation.scala:42) > at > org.apache.spark.sql.execution.CacheManager$$anonfun$cacheQuery$1.apply(CacheManager.scala:100) > at > org.apache.spark.sql.execution.CacheManager.writeLock(CacheManager.scala:68) > at > org.apache.spark.sql.execution.CacheManager.cacheQuery(CacheManager.scala:92) > at org.apache.spark.sql.Dataset.persist(Dataset.scala:2513) > at org.apache.spark.sql.Dataset.cache(Dataset.scala:2523) > at > cfca.xfraud.habit.util.PersonFeatureUtil.getHabitDev(PersonFeatureUtil.scala:142) > at > cfca.xfraud.habit.strategy.MajorVersionStrategy.getPersonStrategyDF(MajorVersionStrategy.scala:39) > at > cfca.xfraud.habit.handler.MajorVersionHandler.extractFeature1(MajorVersionHandler.scala:59) > at cfca.xfraud.habit.HabitMain$.main(HabitMain.scala:21) > at cfca.xfraud.habit.HabitMain.main(HabitMain.scala) -- This message was sent by Atlassian JIRA (v7.6.3#76005) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org