Hi

Please check if you have the right for the directory: Constants.METASTORE_DB
you can use "chmod" to add right.

Regards
Liang


xm_zzc wrote
> Hi all:
>   Please help. I directly ran a CarbonData demo program on Eclipse, which
> copy from
> carbondata-examples-spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala,
> but the error occurred, as follows:
*
> Exception in thread "main" java.io.FileNotFoundException:
> file:/data/carbon_data/default/carbon_table/Metadata/schema.write 
*
>       at java.io.FileOutputStream.open0(Native Method)
>       at java.io.FileOutputStream.open(FileOutputStream.java:270)
>       at java.io.FileOutputStream.
> <init>
> (FileOutputStream.java:213)
>       at java.io.FileOutputStream.
> <init>
> (FileOutputStream.java:101)
>       at
> org.apache.carbondata.core.datastore.impl.FileFactory.getDataOutputStream(FileFactory.java:188)
>       at
> org.apache.carbondata.core.fileoperations.AtomicFileOperationsImpl.openForWrite(AtomicFileOperationsImpl.java:61)
>       at
> org.apache.carbondata.core.writer.ThriftWriter.open(ThriftWriter.java:97)
>       at
> org.apache.spark.sql.hive.CarbonMetastore.createSchemaThriftFile(CarbonMetastore.scala:412)
>       at
> org.apache.spark.sql.hive.CarbonMetastore.createTableFromThrift(CarbonMetastore.scala:380)
>       at
> org.apache.spark.sql.execution.command.CreateTable.run(carbonTableSchema.scala:166)
>       at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
>       at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
>       at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
>       at
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>       at
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>       at
> org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
>       at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>       at
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
>       at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
>       at
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
>       at
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
>       at org.apache.spark.sql.Dataset.
> <init>
> (Dataset.scala:185)
>       at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
>       at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
>       at cn.xm.zzc.carbontest.FirstCarbonData$.main(FirstCarbonData.scala:51)
>       at cn.xm.zzc.carbontest.FirstCarbonData.main(FirstCarbonData.scala)
> 
> but I found a file named 'schema.write' in the path
> '/data/carbon_data/default/carbon_table/Metadata/', the file size is 0.
> 
> My program :
> 
>     
/
> val warehouseLocation = Constants.SPARK_WAREHOUSE
>     val storeLocation = Constants.CARBON_FILES
>     
>     //CarbonProperties.getInstance()
>     //  .addProperty("carbon.storelocation", storeLocation)
>     CarbonProperties.getInstance()
>       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
> "yyyy/MM/dd")
>     
>     import org.apache.spark.sql.CarbonSession._
>     val spark = SparkSession
>       .builder()
>       .appName("FirstCarbonData")
>       .master("local")
>       .config("spark.sql.warehouse.dir", warehouseLocation)
>       //.config("javax.jdo.option.ConnectionURL",
>       // 
> s"jdbc:derby:;databaseName=${Constants.METASTORE_DB};create=true")
>       //.enableHiveSupport()
>       .getOrCreateCarbonSession(storeLocation, Constants.METASTORE_DB)
>     
>     spark.sql("DROP TABLE IF EXISTS carbon_table")
> 
>     // Create table
>     spark.sql(
>       s"""
>          | CREATE TABLE carbon_table(
>          |    shortField short,
>          |    intField int,
>          |    bigintField long,
>          |    doubleField double,
>          |    stringField string,
>          |    timestampField timestamp,
>          |    decimalField decimal(18,2),
>          |    dateField date,
>          |    charField char(5),
>          |    floatField float,
>          |    complexData array
> <string>
>          | )
>          | STORED BY 'carbondata'
>          | TBLPROPERTIES('DICTIONARY_INCLUDE'='dateField, charField')
>        """.stripMargin)
> 
>     val path =
> "/home/myubuntu/Works/workspace_latest/incubator-carbondata/examples/spark2/src/main/resources/data.csv"
> 
>     // scalastyle:off
>     spark.sql(
>       s"""
>          | LOAD DATA LOCAL INPATH '$path'
>          | INTO TABLE carbon_table
>          |
> options('FILEHEADER'='shortField,intField,bigintField,doubleField,stringField,timestampField,decimalField,dateField,charField,floatField,complexData','COMPLEX_DELIMITER_LEVEL_1'='#')
>        """.stripMargin)
>     // scalastyle:on
> 
>     spark.sql("""
>              SELECT *
>              FROM carbon_table
>              where stringfield = 'spark' and decimalField > 40
>               """).show
> 
>     spark.sql("""
>              SELECT *
>              FROM carbon_table where length(stringField) = 5
>               """).show
> 
>     spark.sql("""
>              SELECT *
>              FROM carbon_table where date_format(dateField, "yyyy-MM-dd")
> = "2015-07-23"
>               """).show
> 
>     spark.sql("""
>              select count(stringField) from carbon_table
>               """.stripMargin).show
> 
>     spark.sql("""
>            SELECT sum(intField), stringField
>            FROM carbon_table
>            GROUP BY stringField
>               """).show
> 
>     spark.sql(
>       """
>         |select t1.*, t2.*
>         |from carbon_table t1, carbon_table t2
>         |where t1.stringField = t2.stringField
>       """.stripMargin).show
> 
>     spark.sql(
>       """
>         |with t1 as (
>         |select * from carbon_table
>         |union all
>         |select * from carbon_table
>         |)
>         |select t1.*, t2.*
>         |from t1, carbon_table t2
>         |where t1.stringField = t2.stringField
>       """.stripMargin).show
> 
>     spark.sql("""
>              SELECT *
>              FROM carbon_table
>              where stringfield = 'spark' and floatField > 2.8
>               """).show
> 
>     // Drop table
>     // spark.sql("DROP TABLE IF EXISTS carbon_table")
>     
>     spark.stop()
/




--
View this message in context: 
http://apache-carbondata-mailing-list-archive.1130556.n5.nabble.com/java-io-FileNotFoundException-file-data-carbon-data-default-carbon-table-Metadata-schema-write-tp11044p11087.html
Sent from the Apache CarbonData Mailing List archive mailing list archive at 
Nabble.com.

Reply via email to