[ https://issues.apache.org/jira/browse/SPARK-15710?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15313193#comment-15313193 ]
Xin Wu commented on SPARK-15710: -------------------------------- hmm.. after another rebase of the master. it seems that the problem is gone, even for pyspark: {code} >>> spark.sql("CREATE DATABASE IF NOT EXISTS test2") 16/06/02 15:16:10 WARN ObjectStore: Failed to get database test2, returning NoSuchObjectException DataFrame[] >>> spark.sql("USE test2") DataFrame[] >>> df = spark.createDataFrame([ ... (0, "a", 10), ... (1, "b", 11), ... (2, "c", 12), ... (3, "a", 14), ... (4, "a", 17), ... (5, "c", 18) ... ], ["id", "category", "age"]) >>> df.write.saveAsTable('test6', mode='overwrite') Jun 2, 2016 3:14:01 PM WARNING: org.apache.parquet.hadoop.MemoryManager: Total allocation exceeds 95.00% (906,992,000 bytes) of heap memory Scaling row group sizes to 96.54% for 7 writers Jun 2, 2016 3:16:43 PM INFO: org.apache.parquet.hadoop.ParquetOutputFormat: Parq16/06/02 15:16:43 WARN HiveMetaStore: Location: file:/Users/xinwu/spark/spark-warehouse/test2.db/test6 specified for non-external table:test6 >>> spark.sql("SELECT * FROM test6 WHERE id = 2").take(1) [Row(id=2, category=u'c', age=12)] >>> spark.sql("SELECT * FROM test6 WHERE id = 2").show() +---+--------+---+ | id|category|age| +---+--------+---+ | 2| c| 12| +---+--------+---+ {code} > Exception with WHERE clause in SQL for non-default Hive database > ---------------------------------------------------------------- > > Key: SPARK-15710 > URL: https://issues.apache.org/jira/browse/SPARK-15710 > Project: Spark > Issue Type: Bug > Components: SQL > Affects Versions: 2.0.0 > Environment: databricks community edition 2.0 preview > Reporter: Igor Fridman > > The following code throws an exception only with non-default database. If I > use 'default' database it works. > {code} > spark.sql("CREATE DATABASE IF NOT EXISTS test") > spark.sql("USE test") > df = spark.createDataFrame([ > (0, "a", 10), > (1, "b", 11), > (2, "c", 12), > (3, "a", 14), > (4, "a", 17), > (5, "c", 18) > ], ["id", "category", "age"]) > df.write.saveAsTable('test', mode='overwrite') > spark.sql("SELECT * FROM test WHERE id = 2").take(1) > {code} > {code} > --------------------------------------------------------------------------- > Py4JJavaError Traceback (most recent call last) > <ipython-input-14-7617766e134d> in <module>() > 13 df.write.saveAsTable('test', mode='overwrite') > 14 > ---> 15 spark.sql("SELECT * FROM test WHERE id = 2").take(1) > /databricks/spark/python/pyspark/sql/dataframe.py in take(self, num) > 333 with SCCallSiteSync(self._sc) as css: > 334 port = > self._sc._jvm.org.apache.spark.sql.execution.python.EvaluatePython.takeAndServe( > --> 335 self._jdf, num) > 336 return list(_load_from_socket(port, > BatchedSerializer(PickleSerializer()))) > 337 > /databricks/spark/python/lib/py4j-0.10.1-src.zip/py4j/java_gateway.py in > __call__(self, *args) > 931 answer = self.gateway_client.send_command(command) > 932 return_value = get_return_value( > --> 933 answer, self.gateway_client, self.target_id, self.name) > 934 > 935 for temp_arg in temp_args: > /databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw) > 61 def deco(*a, **kw): > 62 try: > ---> 63 return f(*a, **kw) > 64 except py4j.protocol.Py4JJavaError as e: > 65 s = e.java_exception.toString() > /databricks/spark/python/lib/py4j-0.10.1-src.zip/py4j/protocol.py in > get_return_value(answer, gateway_client, target_id, name) > 310 raise Py4JJavaError( > 311 "An error occurred while calling {0}{1}{2}.\n". > --> 312 format(target_id, ".", name), value) > 313 else: > 314 raise Py4JError( > Py4JJavaError: An error occurred while calling > z:org.apache.spark.sql.execution.python.EvaluatePython.takeAndServe. > : java.lang.ClassNotFoundException: > org.apache.parquet.filter2.predicate.ValidTypeMap$FullTypeDescriptor > at java.net.URLClassLoader.findClass(URLClassLoader.java:381) > at java.lang.ClassLoader.loadClass(ClassLoader.java:424) > at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) > at java.lang.ClassLoader.loadClass(ClassLoader.java:357) > at java.lang.Class.forName0(Native Method) > at java.lang.Class.forName(Class.java:264) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFilters$.relaxParquetValidTypeMap$lzycompute(ParquetFilters.scala:321) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFilters$.relaxParquetValidTypeMap(ParquetFilters.scala:319) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFilters$.createFilter(ParquetFilters.scala:231) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$7.apply(ParquetFileFormat.scala:309) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$7.apply(ParquetFileFormat.scala:309) > at > scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:251) > at > scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:251) > at scala.collection.immutable.List.foreach(List.scala:318) > at > scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:251) > at scala.collection.AbstractTraversable.flatMap(Traversable.scala:105) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.buildReader(ParquetFileFormat.scala:309) > at > org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.buildReaderWithPartitionValues(ParquetFileFormat.scala:268) > at > org.apache.spark.sql.execution.datasources.FileSourceStrategy$.apply(FileSourceStrategy.scala:112) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:59) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:59) > at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:60) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner.planLater(QueryPlanner.scala:55) > at > org.apache.spark.sql.execution.SparkStrategies$SpecialLimits$.apply(SparkStrategies.scala:55) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:59) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:59) > at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371) > at > org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:60) > at > org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:77) > at > org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:75) > at > org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:82) > at > org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:82) > at > org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:55) > at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2447) > at > org.apache.spark.sql.execution.python.EvaluatePython$.takeAndServe(EvaluatePython.scala:39) > at > org.apache.spark.sql.execution.python.EvaluatePython.takeAndServe(EvaluatePython.scala) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:497) > at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:237) > at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) > at py4j.Gateway.invoke(Gateway.java:280) > at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:128) > at py4j.commands.CallCommand.execute(CallCommand.java:79) > at py4j.GatewayConnection.run(GatewayConnection.java:211) > at java.lang.Thread.run(Thread.java:745) > {code} -- This message was sent by Atlassian JIRA (v6.3.4#6332) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org