[ 
https://issues.apache.org/jira/browse/CARBONDATA-593?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

DEEPAK MEHRA updated CARBONDATA-593:
------------------------------------
    Description: 
Select command seems to be not working on carbon-spark-shell . It throws a 
runtime error on select query after show method is invoked. It says 
java.io.FileNotFoundException: File does not exist: 
/home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0


Query Executed :
scala> cc.sql("create table demo(id int,name string,age int,city string) stored 
by 'carbondata'").show

scala> cc.sql("LOAD DATA inpath 'hdfs://hadoop-master:54311/data/employee.csv' 
INTO table demo");


 cc.sql("select * from demo").show
Result :It's throwing java.lang.RuntimeException : File Not Found


scala> cc.sql("select * from mydb.demo").show
INFO  04-01 16:29:22,157 - main Query [SELECT * FROM MYDB.DEMO]
INFO  04-01 16:29:22,160 - Parsing command: select * from mydb.demo
INFO  04-01 16:29:22,161 - Parse Completed
INFO  04-01 16:29:22,162 - Parsing command: select * from mydb.demo
INFO  04-01 16:29:22,163 - Parse Completed
INFO  04-01 16:29:22,163 - 0: get_table : db=mydb tbl=demo
INFO  04-01 16:29:22,163 - ugi=hduser   ip=unknown-ip-addr      cmd=get_table : 
db=mydb tbl=demo        
INFO  04-01 16:29:22,195 - main Starting to optimize plan
java.io.FileNotFoundException: File does not exist: 
/home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1110)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1102)
        at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1102)
        at org.apache.hadoop.fs.FileSystem.resolvePath(FileSystem.java:747)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$15.<init>(DistributedFileSystem.java:726)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem.listLocatedStatus(DistributedFileSystem.java:717)
        at 
org.apache.hadoop.fs.FileSystem.listLocatedStatus(FileSystem.java:1780)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getFileStatusOfSegments(CarbonInputFormat.java:559)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.listStatus(CarbonInputFormat.java:519)
        at 
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:340)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplitsInternal(CarbonInputFormat.java:251)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getTableBlockInfo(CarbonInputFormat.java:372)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSegmentAbstractIndexs(CarbonInputFormat.java:402)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getDataBlocksOfSegment(CarbonInputFormat.java:325)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:288)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:237)
        at 
org.apache.carbondata.spark.rdd.CarbonScanRDD.getPartitions(CarbonScanRDD.scala:82)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
        at 
org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
        at 
org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
        at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
        at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
        at 
org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
        at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
        at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
        at 
org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
        at 
org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
        at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
        at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
        at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
        at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
        at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
        at $iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
        at $iwC$$iwC$$iwC.<init>(<console>:44)
        at $iwC$$iwC.<init>(<console>:46)
        at $iwC.<init>(<console>:48)
        at <init>(<console>:50)
        at .<init>(<console>:54)
        at .<clinit>(<console>)
        at .<init>(<console>:7)
        at .<clinit>(<console>)
        at $print(<console>)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
        at 
org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
        at 
org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
        at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
        at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
        at 
org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
        at 
org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
        at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
        at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
        at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
        at 
org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
        at 
scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
        at 
org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
        at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
        at org.apache.spark.repl.Main$.main(Main.scala:31)
        at org.apache.spark.repl.Main.main(Main.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
        at 
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)





  was:
Select command seems to be not working on carbon-spark-shell . It throws a 
runtime error on select query after show method is invoked. It says 
java.io.FileNotFoundException: File does not exist: 
/home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0


Query Executed : cc.sql("select * from tablename").show
Result :It's throwing java.lang.RuntimeException : File Not Found


scala> cc.sql("select * from mydb.demo").show
INFO  04-01 16:29:22,157 - main Query [SELECT * FROM MYDB.DEMO]
INFO  04-01 16:29:22,160 - Parsing command: select * from mydb.demo
INFO  04-01 16:29:22,161 - Parse Completed
INFO  04-01 16:29:22,162 - Parsing command: select * from mydb.demo
INFO  04-01 16:29:22,163 - Parse Completed
INFO  04-01 16:29:22,163 - 0: get_table : db=mydb tbl=demo
INFO  04-01 16:29:22,163 - ugi=hduser   ip=unknown-ip-addr      cmd=get_table : 
db=mydb tbl=demo        
INFO  04-01 16:29:22,195 - main Starting to optimize plan
java.io.FileNotFoundException: File does not exist: 
/home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1110)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1102)
        at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1102)
        at org.apache.hadoop.fs.FileSystem.resolvePath(FileSystem.java:747)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem$15.<init>(DistributedFileSystem.java:726)
        at 
org.apache.hadoop.hdfs.DistributedFileSystem.listLocatedStatus(DistributedFileSystem.java:717)
        at 
org.apache.hadoop.fs.FileSystem.listLocatedStatus(FileSystem.java:1780)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getFileStatusOfSegments(CarbonInputFormat.java:559)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.listStatus(CarbonInputFormat.java:519)
        at 
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:340)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplitsInternal(CarbonInputFormat.java:251)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getTableBlockInfo(CarbonInputFormat.java:372)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSegmentAbstractIndexs(CarbonInputFormat.java:402)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getDataBlocksOfSegment(CarbonInputFormat.java:325)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:288)
        at 
org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:237)
        at 
org.apache.carbondata.spark.rdd.CarbonScanRDD.getPartitions(CarbonScanRDD.scala:82)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
        at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
        at scala.Option.getOrElse(Option.scala:120)
        at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
        at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
        at 
org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
        at 
org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
        at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
        at 
org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
        at 
org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
        at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
        at 
org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
        at 
org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
        at 
org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
        at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
        at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
        at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
        at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
        at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
        at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
        at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
        at $iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
        at $iwC$$iwC$$iwC.<init>(<console>:44)
        at $iwC$$iwC.<init>(<console>:46)
        at $iwC.<init>(<console>:48)
        at <init>(<console>:50)
        at .<init>(<console>:54)
        at .<clinit>(<console>)
        at .<init>(<console>:7)
        at .<clinit>(<console>)
        at $print(<console>)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
        at 
org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
        at 
org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
        at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
        at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
        at 
org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
        at 
org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
        at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
        at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
        at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
        at 
org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
        at 
org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
        at 
scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
        at 
org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
        at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
        at org.apache.spark.repl.Main$.main(Main.scala:31)
        at org.apache.spark.repl.Main.main(Main.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
        at 
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)






> Select command seems to be not working on carbon-spark-shell . It throws a 
> runtime error on select query after show method is invoked
> -------------------------------------------------------------------------------------------------------------------------------------
>
>                 Key: CARBONDATA-593
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-593
>             Project: CarbonData
>          Issue Type: Bug
>          Components: sql
>    Affects Versions: 1.0.0-incubating
>            Reporter: DEEPAK MEHRA
>            Priority: Minor
>
> Select command seems to be not working on carbon-spark-shell . It throws a 
> runtime error on select query after show method is invoked. It says 
> java.io.FileNotFoundException: File does not exist: 
> /home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0
> Query Executed :
> scala> cc.sql("create table demo(id int,name string,age int,city string) 
> stored by 'carbondata'").show
> scala> cc.sql("LOAD DATA inpath 
> 'hdfs://hadoop-master:54311/data/employee.csv' INTO table demo");
>  cc.sql("select * from demo").show
> Result :It's throwing java.lang.RuntimeException : File Not Found
> scala> cc.sql("select * from mydb.demo").show
> INFO  04-01 16:29:22,157 - main Query [SELECT * FROM MYDB.DEMO]
> INFO  04-01 16:29:22,160 - Parsing command: select * from mydb.demo
> INFO  04-01 16:29:22,161 - Parse Completed
> INFO  04-01 16:29:22,162 - Parsing command: select * from mydb.demo
> INFO  04-01 16:29:22,163 - Parse Completed
> INFO  04-01 16:29:22,163 - 0: get_table : db=mydb tbl=demo
> INFO  04-01 16:29:22,163 - ugi=hduser ip=unknown-ip-addr      cmd=get_table : 
> db=mydb tbl=demo        
> INFO  04-01 16:29:22,195 - main Starting to optimize plan
> java.io.FileNotFoundException: File does not exist: 
> /home/hduser/software/spark-1.6.2-bin-hadoop2.6/carbon.store/mydb/demo/Fact/Part0/Segment_0
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1110)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1102)
>       at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1102)
>       at org.apache.hadoop.fs.FileSystem.resolvePath(FileSystem.java:747)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem$15.<init>(DistributedFileSystem.java:726)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem.listLocatedStatus(DistributedFileSystem.java:717)
>       at 
> org.apache.hadoop.fs.FileSystem.listLocatedStatus(FileSystem.java:1780)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getFileStatusOfSegments(CarbonInputFormat.java:559)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.listStatus(CarbonInputFormat.java:519)
>       at 
> org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:340)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getSplitsInternal(CarbonInputFormat.java:251)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getTableBlockInfo(CarbonInputFormat.java:372)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getSegmentAbstractIndexs(CarbonInputFormat.java:402)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getDataBlocksOfSegment(CarbonInputFormat.java:325)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:288)
>       at 
> org.apache.carbondata.hadoop.CarbonInputFormat.getSplits(CarbonInputFormat.java:237)
>       at 
> org.apache.carbondata.spark.rdd.CarbonScanRDD.getPartitions(CarbonScanRDD.scala:82)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
>       at scala.Option.getOrElse(Option.scala:120)
>       at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
>       at scala.Option.getOrElse(Option.scala:120)
>       at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
>       at scala.Option.getOrElse(Option.scala:120)
>       at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
>       at scala.Option.getOrElse(Option.scala:120)
>       at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
>       at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
>       at scala.Option.getOrElse(Option.scala:120)
>       at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
>       at 
> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
>       at 
> org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>       at 
> org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>       at 
> org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>       at 
> org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>       at 
> org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>       at 
> org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>       at 
> org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>       at 
> org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>       at 
> org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>       at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>       at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>       at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>       at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>       at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>       at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>       at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>       at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
>       at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
>       at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
>       at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
>       at $iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
>       at $iwC$$iwC$$iwC.<init>(<console>:44)
>       at $iwC$$iwC.<init>(<console>:46)
>       at $iwC.<init>(<console>:48)
>       at <init>(<console>:50)
>       at .<init>(<console>:54)
>       at .<clinit>(<console>)
>       at .<init>(<console>:7)
>       at .<clinit>(<console>)
>       at $print(<console>)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)
>       at 
> org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>       at 
> org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>       at 
> org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>       at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>       at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>       at 
> org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
>       at 
> org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
>       at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
>       at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
>       at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
>       at 
> org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
>       at 
> org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
>       at 
> org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
>       at 
> org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
>       at 
> scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
>       at 
> org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
>       at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
>       at org.apache.spark.repl.Main$.main(Main.scala:31)
>       at org.apache.spark.repl.Main.main(Main.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)
>       at 
> org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
>       at 
> org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
>       at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
>       at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
>       at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to