[ 
https://issues.apache.org/jira/browse/SPARK-9465?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14990909#comment-14990909
 ] 

Xin Wu commented on SPARK-9465:
-------------------------------

I can not recreate the issue on 1.5.1 or 1.6.0..

{code}
scala> sqlContext.sql("create table parquet_table stored as parquet as select 
1")
scala> sqlContext.sql("select * from parquet_table").show
+---+
|_c0|
+---+
|  1|
+---+
scala> val df = sqlContext.sql("select * from parquet_a")
scala> df.show
+---+
|_c0|
+---+
|  2|
+---+
scala> df.write.mode(SaveMode.Overwrite).saveAsTable("parquet_table")
scala> sqlContext.sql("select * from parquet_table").show
+---+
|_c0|
+---+
|  2|
+---+

{code}
The data from `parquet_a` overwrites the data in `parquet_table`

> Could not read parquet table after recreating it with the same table name
> -------------------------------------------------------------------------
>
>                 Key: SPARK-9465
>                 URL: https://issues.apache.org/jira/browse/SPARK-9465
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 1.4.1
>            Reporter: StanZhai
>
> I'am using SparkSQL in Spark 1.4.1. I encounter an error when using parquet 
> table after recreating it, we can reproduce the error as following: 
> {code}
> // hc is an instance of HiveContext 
> hc.sql("select * from b").show()         // this is ok and b is a parquet 
> table 
> val df = hc.sql("select * from a") 
> df.write.mode(SaveMode.Overwrite).saveAsTable("b") 
> hc.sql("select * from b").show()         // got error 
> {code}
> The error is: 
> {code}
> java.io.FileNotFoundException: File does not exist: 
> /user/hive/warehouse/test.db/b/part-r-00004-3abcbb07-e20a-4b5e-a6e5-59356c3d3149.gz.parquet
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:65) 
>         at 
> org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:55) 
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocationsUpdateTimes(FSNamesystem.java:1716)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocationsInt(FSNamesystem.java:1659)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocations(FSNamesystem.java:1639)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocations(FSNamesystem.java:1613)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getBlockLocations(NameNodeRpcServer.java:497)
>  
>         at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getBlockLocations(ClientNamenodeProtocolServerSideTranslatorPB.java:322)
>  
>         at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>  
>         at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
>  
>         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928) 
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013) 
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009) 
>         at java.security.AccessController.doPrivileged(Native Method) 
>         at javax.security.auth.Subject.doAs(Subject.java:415) 
>         at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
>  
>         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007) 
>         at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native 
> Method) 
>         at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
>  
>         at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>  
>         at java.lang.reflect.Constructor.newInstance(Constructor.java:526) 
>         at 
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
>  
>         at 
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
>  
>         at 
> org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1144) 
>         at 
> org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1132) 
>         at 
> org.apache.hadoop.hdfs.DFSClient.getBlockLocations(DFSClient.java:1182) 
>         at 
> org.apache.hadoop.hdfs.DistributedFileSystem$1.doCall(DistributedFileSystem.java:218)
>  
>         at 
> org.apache.hadoop.hdfs.DistributedFileSystem$1.doCall(DistributedFileSystem.java:214)
>  
>         at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>  
>         at 
> org.apache.hadoop.hdfs.DistributedFileSystem.getFileBlockLocations(DistributedFileSystem.java:214)
>  
>         at 
> org.apache.hadoop.hdfs.DistributedFileSystem.getFileBlockLocations(DistributedFileSystem.java:206)
>  
>         at 
> org.apache.spark.sql.parquet.FilteringParquetRowInputFormat$$anonfun$getTaskSideSplits$1.apply(ParquetTableOperations.scala:625)
>  
>         at 
> org.apache.spark.sql.parquet.FilteringParquetRowInputFormat$$anonfun$getTaskSideSplits$1.apply(ParquetTableOperations.scala:621)
>  
>         at scala.collection.Iterator$class.foreach(Iterator.scala:727) 
>         at scala.collection.AbstractIterator.foreach(Iterator.scala:1157) 
>         at scala.collection.IterableLike$class.foreach(IterableLike.scala:72) 
>         at scala.collection.AbstractIterable.foreach(Iterable.scala:54) 
>         at 
> org.apache.spark.sql.parquet.FilteringParquetRowInputFormat.getTaskSideSplits(ParquetTableOperations.scala:621)
>  
>         at 
> org.apache.spark.sql.parquet.FilteringParquetRowInputFormat.getSplits(ParquetTableOperations.scala:511)
>  
>         at 
> parquet.hadoop.ParquetInputFormat.getSplits(ParquetInputFormat.java:245) 
>         at 
> org.apache.spark.sql.parquet.FilteringParquetRowInputFormat.getSplits(ParquetTableOperations.scala:464)
>  
>         at 
> org.apache.spark.sql.parquet.ParquetRelation2$$anonfun$buildScan$1$$anon$1.getPartitions(newParquet.scala:305)
>  
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) 
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) 
>         at scala.Option.getOrElse(Option.scala:120) 
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) 
>         at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
>  
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) 
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) 
>         at scala.Option.getOrElse(Option.scala:120) 
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) 
>         at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
>  
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) 
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) 
>         at scala.Option.getOrElse(Option.scala:120) 
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) 
>         at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
>  
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:219) 
>         at 
> org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:217) 
>         at scala.Option.getOrElse(Option.scala:120) 
>         at org.apache.spark.rdd.RDD.partitions(RDD.scala:217) 
>         at org.apache.spark.SparkContext.runJob(SparkContext.scala:1781) 
>         at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:885) 
>         at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
>  
>         at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
>  
>         at org.apache.spark.rdd.RDD.withScope(RDD.scala:286) 
>         at org.apache.spark.rdd.RDD.collect(RDD.scala:884) 
>         at 
> cn.zhaishidan.test.service.SparkHiveService.formatDF(SparkHiveService.scala:64)
>  
>         at 
> cn.zhaishidan.test.service.SparkHiveService.query(SparkHiveService.scala:78) 
>         at 
> cn.zhaishidan.test.api.DatabaseApi$$anonfun$query$1.apply(DatabaseApi.scala:41)
>  
>         at 
> cn.zhaishidan.test.api.DatabaseApi$$anonfun$query$1.apply(DatabaseApi.scala:32)
>  
>         at 
> cn.zhaishidan.test.web.JettyUtils$$anon$1.getOrPost(JettyUtils.scala:81) 
>         at 
> cn.zhaishidan.test.web.JettyUtils$$anon$1.doGet(JettyUtils.scala:115) 
>         at javax.servlet.http.HttpServlet.service(HttpServlet.java:735) 
>         at javax.servlet.http.HttpServlet.service(HttpServlet.java:848) 
>         at 
> org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:684) 
>         at 
> org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:501) 
>         at 
> org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1086)
>  
>         at 
> org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:428) 
>         at 
> org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1020)
>  
>         at 
> org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:135) 
>         at 
> org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:52) 
>         at 
> org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:116)
>  
>         at org.eclipse.jetty.server.Server.handle(Server.java:370) 
>         at 
> org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:494)
>  
>         at 
> org.eclipse.jetty.server.AbstractHttpConnection.headerComplete(AbstractHttpConnection.java:971)
>  
>         at 
> org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:1033)
>  
>         at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:644) 
>         at 
> org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) 
>         at 
> org.eclipse.jetty.server.AsyncHttpConnection.handle(AsyncHttpConnection.java:82)
>  
>         at 
> org.eclipse.jetty.io.nio.SelectChannelEndPoint.handle(SelectChannelEndPoint.java:667)
>  
>         at 
> org.eclipse.jetty.io.nio.SelectChannelEndPoint$1.run(SelectChannelEndPoint.java:52)
>  
>         at 
> org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608)
>  
>         at 
> org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543)
>  
>         at java.lang.Thread.run(Thread.java:745) 
> Caused by: 
> org.apache.hadoop.ipc.RemoteException(java.io.FileNotFoundException): File 
> does not exist: 
> /user/hive/warehouse/test.db/b/part-r-00004-3abcbb07-e20a-4b5e-a6e5-59356c3d3149.gz.parquet
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:65) 
>         at 
> org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:55) 
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocationsUpdateTimes(FSNamesystem.java:1716)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocationsInt(FSNamesystem.java:1659)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocations(FSNamesystem.java:1639)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getBlockLocations(FSNamesystem.java:1613)
>  
>         at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getBlockLocations(NameNodeRpcServer.java:497)
>  
>         at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getBlockLocations(ClientNamenodeProtocolServerSideTranslatorPB.java:322)
>  
>         at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>  
>         at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
>  
>         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928) 
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013) 
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009) 
>         at java.security.AccessController.doPrivileged(Native Method) 
>         at javax.security.auth.Subject.doAs(Subject.java:415) 
>         at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
>  
>         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007) 
>         at org.apache.hadoop.ipc.Client.call(Client.java:1410) 
>         at org.apache.hadoop.ipc.Client.call(Client.java:1363) 
>         at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
>  
>         at com.sun.proxy.$Proxy23.getBlockLocations(Unknown Source) 
>         at sun.reflect.GeneratedMethodAccessor22.invoke(Unknown Source) 
>         at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>  
>         at java.lang.reflect.Method.invoke(Method.java:606) 
>         at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:190)
>  
>         at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:103)
>  
>         at com.sun.proxy.$Proxy23.getBlockLocations(Unknown Source) 
>         at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:219)
>  
>         at 
> org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1142) 
>         ... 71 more 
> {code}
> I've set spark.sql.parquet.cacheMetadata=false, so the error is confusing for 
> me. 
> I also found that, we can avoid the error by executing "refresh table 
> test.`b`" after recreating the table 'b'. 
> Currently, To avoid the error, I've modified 
> "spark/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala"
>  at line of 323, and changed the code: 
> {code}
> val paths = Seq(metastoreRelation.hiveQlTable.getDataLocation.toString) 
> val cached = getCached(tableIdentifier, paths, metastoreSchema, None) 
> val parquetRelation = cached.getOrElse { 
>   val created = LogicalRelation( 
>     new ParquetRelation2(paths.toArray, None, None, parquetOptions)(hive)) 
>   cachedDataSourceTables.put(tableIdentifier, created) 
>   created 
> } 
> parquetRelation 
> {code}
> to 
> {code}
> val paths = Seq(metastoreRelation.hiveQlTable.getDataLocation.toString) 
> val parquetRelation = LogicalRelation( 
>     new ParquetRelation2(paths.toArray, None, None, parquetOptions)(hive)) 
> parquetRelation 
> {code}
> It's working fine for me now. I think this should be a bug of parquet 
> relation cache.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to