[ 
https://issues.apache.org/jira/browse/CARBONDATA-643?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Ravindra Pesala updated CARBONDATA-643:
---------------------------------------
    Assignee: Manohar Vanam

> When we are passing ALL_DICTIONARY_PATH' in load query ,it is throwing null 
> pointer exception.
> ----------------------------------------------------------------------------------------------
>
>                 Key: CARBONDATA-643
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-643
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-load
>    Affects Versions: 1.0.0-incubating
>            Reporter: Payal
>            Assignee: Manohar Vanam
>            Priority: Minor
>         Attachments: 7000_UniqData.csv
>
>
> When we are passing ALL_DICTIONARY_PATH' in load query ,it is throwing null 
> pointer exception.
> //CREATE TABLE
> CREATE TABLE uniq_include_dictionary (CUST_ID int,CUST_NAME 
> String,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, 
> BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), 
> DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 
> double,INTEGER_COLUMN1 int) STORED BY 'org.apache.carbondata.format' 
> TBLPROPERTIES('DICTIONARY_INCLUDE'='CUST_ID,Double_COLUMN2,DECIMAL_COLUMN2');
> //LOAD QUERY
>  LOAD DATA INPATH 'hdfs://localhost:54311/payal/7000_UniqData.csv' into table 
> uniq_include_dictionary OPTIONS('DELIMITER'=',' , 
> 'QUOTECHAR'='""','BAD_RECORDS_LOGGER_ENABLE'='TRUE', 
> 'BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1','SINGLE_PASS'='false','ALL_DICTIONARY_PATH'='hdfs://localhost:54311/opt/alldictionary/data.dictionary');
> Error: java.lang.NullPointerException (state=,code=0)
> LOGS
> INFO  16-01 16:56:37,624 - Running query 'LOAD DATA INPATH 
> 'hdfs://localhost:54311/payal/7000_UniqData.csv' into table 
> uniq_include_dictionary OPTIONS('DELIMITER'=',' , 
> 'QUOTECHAR'='"','BAD_RECORDS_LOGGER_ENABLE'='TRUE', 
> 'BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1','SINGLE_PASS'='false','MULTILINE'='true','ALL_DICTIONARY_PATH'='hdfs://localhost:54311/opt/alldictionary/data.dictionary')'
>  with 17ec3816-91e3-462e-aa9b-a7ae76405564
> INFO  16-01 16:56:37,625 - pool-26-thread-34 Query [LOAD DATA INPATH 
> 'HDFS://LOCALHOST:54311/PAYAL/7000_UNIQDATA.CSV' INTO TABLE 
> UNIQ_INCLUDE_DICTIONARY OPTIONS('DELIMITER'=',' , 
> 'QUOTECHAR'='"','BAD_RECORDS_LOGGER_ENABLE'='TRUE', 
> 'BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,DOUBLE_COLUMN1,DOUBLE_COLUMN2,INTEGER_COLUMN1','SINGLE_PASS'='FALSE','MULTILINE'='TRUE','ALL_DICTIONARY_PATH'='HDFS://LOCALHOST:54311/OPT/ALLDICTIONARY/DATA.DICTIONARY')]
> INFO  16-01 16:56:37,641 - Successfully able to get the table metadata file 
> lock
> INFO  16-01 16:56:37,644 - pool-26-thread-34 Initiating Direct Load for the 
> Table : (default.uniq_include_dictionary)
> INFO  16-01 16:56:37,644 - pool-26-thread-34 Generate global dictionary from 
> dictionary files!
> ERROR 16-01 16:56:37,645 - pool-26-thread-34 Exception occured:File does not 
> exist: hdfs://localhost:54311/opt/alldictionary/data.dictionary
> ERROR 16-01 16:56:37,645 - pool-26-thread-34 generate global dictionary failed
> java.lang.NullPointerException
>     at 
> org.apache.carbondata.core.datastorage.store.filesystem.AbstractDFSCarbonFile.getName(AbstractDFSCarbonFile.java:83)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.validateAllDictionaryPath(GlobalDictionaryUtil.scala:649)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:743)
>     at 
> org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:569)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
>     at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
>     at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
>     at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:139)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:211)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:154)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:151)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:164)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> ERROR 16-01 16:56:37,645 - pool-26-thread-34 
> java.lang.NullPointerException
>     at 
> org.apache.carbondata.core.datastorage.store.filesystem.AbstractDFSCarbonFile.getName(AbstractDFSCarbonFile.java:83)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.validateAllDictionaryPath(GlobalDictionaryUtil.scala:649)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:743)
>     at 
> org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:569)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
>     at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
>     at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
>     at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:139)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:211)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:154)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:151)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:164)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> AUDIT 16-01 16:56:37,645 - [sid-vostro-3546][hduser][Thread-263]Dataload 
> failure for default.uniq_include_dictionary. Please check the logs
> INFO  16-01 16:56:37,646 - pool-26-thread-34 Successfully deleted the lock 
> file /tmp/default/uniq_include_dictionary/meta.lock
> INFO  16-01 16:56:37,646 - Table MetaData Unlocked Successfully after data 
> load
> ERROR 16-01 16:56:37,646 - Error executing query, currentState RUNNING, 
> java.lang.NullPointerException
>     at 
> org.apache.carbondata.core.datastorage.store.filesystem.AbstractDFSCarbonFile.getName(AbstractDFSCarbonFile.java:83)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.validateAllDictionaryPath(GlobalDictionaryUtil.scala:649)
>     at 
> org.apache.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:743)
>     at 
> org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:569)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
>     at 
> org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
>     at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
>     at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
>     at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
>     at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
>     at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
>     at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:139)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:211)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:154)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:151)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:164)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> ERROR 16-01 16:56:37,646 - Error running hive query: 
> org.apache.hive.service.cli.HiveSQLException: java.lang.NullPointerException
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:246)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:154)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:151)
>     at java.security.AccessController.doPrivileged(Native Method)
>     at javax.security.auth.Subject.doAs(Subject.java:422)
>     at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
>     at 
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:164)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>     at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to