[ https://issues.apache.org/jira/browse/SPARK-36668?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
liuzruzi updated SPARK-36668: ----------------------------- Attachment: spark-test.tar > py4j.protocol.Py4JJavaError: java.util.NoSuchElementException: None.get > ----------------------------------------------------------------------- > > Key: SPARK-36668 > URL: https://issues.apache.org/jira/browse/SPARK-36668 > Project: Spark > Issue Type: Bug > Components: Java API > Affects Versions: 2.4.5 > Environment: python3.6 > spark2.4.5 > hive3.1 > Reporter: liuzruzi > Priority: Major > Attachments: spark-test.tar > > > 随机森林算子训练出的模型,进行推理报错 > File > "/srv//application_1630214655126_410542/container_e26_1630214655126_410542_02_000001/py4j-0.10.7-src.zip/py4j/protocol.py", > line 328, in get_return_value > py4j.protocol.Py4JJavaError: An error occurred while calling o602.load. > : java.util.NoSuchElementException: None.get > at scala.None$.get(Option.scala:347) > at scala.None$.get(Option.scala:345) > at > org.apache.spark.sql.execution.FileSourceScanExec.needsUnsafeRowConversion$lzycompute(DataSourceScanExec.scala:179) > at > org.apache.spark.sql.execution.FileSourceScanExec.needsUnsafeRowConversion(DataSourceScanExec.scala:177) > at > org.apache.spark.sql.execution.ColumnarBatchScan$class.produceRows(ColumnarBatchScan.scala:167) > at > org.apache.spark.sql.execution.ColumnarBatchScan$class.doProduce(ColumnarBatchScan.scala:85) > at > org.apache.spark.sql.execution.FileSourceScanExec.doProduce(DataSourceScanExec.scala:160) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:90) > at > org.apache.spark.sql.execution.CodegenSupport$$anonfun$produce$1.apply(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:201) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:198) > at > org.apache.spark.sql.execution.CodegenSupport$class.produce(WholeStageCodegenExec.scala:85) > at > org.apache.spark.sql.execution.FileSourceScanExec.produce(DataSourceScanExec.scala:160) > at > org.apache.spark.sql.execution.WholeStageCodegenExec.doCodeGen(WholeStageCodegenExec.scala:492) > at > org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:546) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:177) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:173) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:201) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:198) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:173) > at > org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:89) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:177) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:173) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:201) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:198) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:173) > at > org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:93) > at > org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:91) > at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3061) > at org.apache.spark.sql.Dataset.rdd(Dataset.scala:3059) > at > org.apache.spark.ml.tree.EnsembleModelReadWrite$.loadImpl(treeModels.scala:450) > at > org.apache.spark.ml.classification.GBTClassificationModel$GBTClassificationModelReader.load(GBTClassifier.scala:420) > at > org.apache.spark.ml.classification.GBTClassificationModel$GBTClassificationModelReader.load(GBTClassifier.scala:411) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) > at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) > at py4j.Gateway.invoke(Gateway.java:282) > at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) > at py4j.commands.CallCommand.execute(CallCommand.java:79) > at py4j.GatewayConnection.run(GatewayConnection.java:238) > at java.lang.Thread.run(Thread.java:748) -- This message was sent by Atlassian Jira (v8.3.4#803005) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org