[ 
https://issues.apache.org/jira/browse/CARBONDATA-854?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15955500#comment-15955500
 ] 

Sanoj MG commented on CARBONDATA-854:
-------------------------------------

I am working on this. Can this be assigned to me?

> Carbondata with Datastax / Cassandra
> ------------------------------------
>
>                 Key: CARBONDATA-854
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-854
>             Project: CarbonData
>          Issue Type: Improvement
>          Components: spark-integration
>    Affects Versions: 1.1.0-incubating
>         Environment: Datastax DSE 5.0 ( DSE analytics )
>            Reporter: Sanoj MG
>            Priority: Minor
>             Fix For: 1.1.0-incubating
>
>
> I am trying to get Carbondata working in a Datastax DSE 5.0 cluster. 
> An exception is thrown while trying to create Carbondata table from spark 
> shell. Below are the steps: 
> scala> import com.datastax.spark.connector._
> scala> import org.apache.spark.sql.SaveMode
> scala> import org.apache.spark.sql.CarbonContext
> scala> import org.apache.spark.sql.types._
> scala> val cc = new CarbonContext(sc, "cfs://127.0.0.1/opt/CarbonStore")
> scala> val df = 
> cc.read.parquet("file:///home/cassandra/testdata-30day/cassandra/zone.parquet")
> scala> df.write.format("carbondata").option("tableName", 
> "zone").option("compress", 
> "true").option("TempCSV","false").mode(SaveMode.Overwrite).save()
> Below exception is thrown and it fails to create carbondata table. 
> java.io.FileNotFoundException: /opt/CarbonStore/default/zone/Metadata/schema 
> (No such file or directory)
>         at java.io.FileOutputStream.open0(Native Method)
>         at java.io.FileOutputStream.open(FileOutputStream.java:270)
>         at java.io.FileOutputStream.<init>(FileOutputStream.java:213)
>         at java.io.FileOutputStream.<init>(FileOutputStream.java:133)
>         at 
> org.apache.carbondata.core.datastore.impl.FileFactory.getDataOutputStream(FileFactory.java:207)
>         at 
> org.apache.carbondata.core.writer.ThriftWriter.open(ThriftWriter.java:84)
>         at 
> org.apache.spark.sql.hive.CarbonMetastore.createTableFromThrift(CarbonMetastore.scala:293)
>         at 
> org.apache.spark.sql.execution.command.CreateTable.run(carbonTableSchema.scala:163)
>         at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
>         at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
>         at 
> org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
>         at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
>         at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
>         at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
>         at 
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
>         at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
>         at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
>         at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
>         at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
>         at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:139)
>         at 
> org.apache.carbondata.spark.CarbonDataFrameWriter.saveAsCarbonFile(CarbonDataFrameWriter.scala:39)
>         at 
> org.apache.spark.sql.CarbonSource.createRelation(CarbonDatasourceRelation.scala:109)
>         at 
> org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:222)
>         at 
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148)



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to