[ 
https://issues.apache.org/jira/browse/CARBONDATA-3827?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Prasanna Ravichandran updated CARBONDATA-3827:
----------------------------------------------
    Description: 
This issue is seen with opensource jars. Spark 2.4.5 & Carbon 2.0.

Merge DDL is not working as per the mentioned syntax as in CARBONDATA-3597

Test queries: 
 drop table if exists uniqdata1;
 CREATE TABLE uniqdata1 (cust_id int,cust_name String,active_emui_version 
string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
bigint,decimal_column1 decimal(30,10), decimal_column2 
decimal(36,36),double_column1 double, double_column2 double,integer_column1 
int) stored as carbondata;
 load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into table 
uniqdata1 
options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
 drop table if exists uniqdata;
 CREATE TABLE uniqdata (cust_id int,cust_name String,active_emui_version 
string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
bigint,decimal_column1 decimal(30,10), decimal_column2 
decimal(36,36),double_column1 double, double_column2 double,integer_column1 
int) stored as carbondata;
 load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into table 
uniqdata 
options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
  
 merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id; --not 
working , getting parse exeption;

 >merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id;
Error: org.apache.spark.sql.AnalysisException: == Parser1: 
org.apache.spark.sql.parser.CarbonExtensionSpark2SqlParser ==
[1.1] failure: identifier matching regex (?i)EXPLAIN expected
merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id
^;
== Parser2: org.apache.spark.sql.execution.SparkSqlParser ==
mismatched input 'merge' expecting \{'(', 'SELECT', 'FROM', 'ADD', 'DESC', 
'EMPOWER', 'WITH', 'VALUES', 'CREATE', 'TABLE', 'INSERT', 'DELETE', 'DESCRIBE', 
'EXPLAIN', 'SHOW', 'USE', 'DROP', 'ALTER', 'MAP', 'SET', 'RESET', 'START', 
'COMMIT', 'ROLLBACK', 'REDUCE', 'REFRESH', 'CLEAR', 'CACHE', 'UNCACHE', 'DFS', 
'TRUNCATE', 'ANALYZE', 'LIST', 'REVOKE', 'GRANT', 'LOCK', 'UNLOCK', 'MSCK', 
'EXPORT', 'IMPORT', 'LOAD', 'HEALTHCHECK'}(line 1, pos 0)
== SQL ==
merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id
^^^; (state=,code=0)

 

 

 

 

  was:
This issue is seen with opensource jars. Spark 2.4.5 & Carbon 2.0.

Merge DDL is not working as per the mentioned syntax as in CARBONDATA-3597

Test queries: 
 drop table if exists uniqdata1;
 CREATE TABLE uniqdata1 (cust_id int,cust_name String,active_emui_version 
string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
bigint,decimal_column1 decimal(30,10), decimal_column2 
decimal(36,36),double_column1 double, double_column2 double,integer_column1 
int) stored as carbondata;
 load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into table 
uniqdata1 
options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
 drop table if exists uniqdata;
 CREATE TABLE uniqdata (cust_id int,cust_name String,active_emui_version 
string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
bigint,decimal_column1 decimal(30,10), decimal_column2 
decimal(36,36),double_column1 double, double_column2 double,integer_column1 
int) stored as carbondata;
 load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into table 
uniqdata 
options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
  
 merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id; --not 
working;

 
 Attached the screenshot for your reference.

 

!image-2020-05-18-21-30-31-344.png!

 

 

 

 


> Merge DDL is not working as per the mentioned syntax.
> -----------------------------------------------------
>
>                 Key: CARBONDATA-3827
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-3827
>             Project: CarbonData
>          Issue Type: Bug
>            Reporter: Prasanna Ravichandran
>            Priority: Major
>
> This issue is seen with opensource jars. Spark 2.4.5 & Carbon 2.0.
> Merge DDL is not working as per the mentioned syntax as in CARBONDATA-3597
> Test queries: 
>  drop table if exists uniqdata1;
>  CREATE TABLE uniqdata1 (cust_id int,cust_name String,active_emui_version 
> string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
> bigint,decimal_column1 decimal(30,10), decimal_column2 
> decimal(36,36),double_column1 double, double_column2 double,integer_column1 
> int) stored as carbondata;
>  load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into 
> table uniqdata1 
> options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
>  drop table if exists uniqdata;
>  CREATE TABLE uniqdata (cust_id int,cust_name String,active_emui_version 
> string, dob timestamp, doj timestamp, bigint_column1 bigint,bigint_column2 
> bigint,decimal_column1 decimal(30,10), decimal_column2 
> decimal(36,36),double_column1 double, double_column2 double,integer_column1 
> int) stored as carbondata;
>  load data inpath 'hdfs://hacluster/user/prasanna/2000_UniqData.csv' into 
> table uniqdata 
> options('fileheader'='cust_id,cust_name,active_emui_version,dob,doj,bigint_column1,bigint_column2,decimal_column1,decimal_column2,double_column1,double_column2,integer_column1','bad_records_action'='force');
>   
>  merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id; --not 
> working , getting parse exeption;
>  >merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id;
> Error: org.apache.spark.sql.AnalysisException: == Parser1: 
> org.apache.spark.sql.parser.CarbonExtensionSpark2SqlParser ==
> [1.1] failure: identifier matching regex (?i)EXPLAIN expected
> merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id
> ^;
> == Parser2: org.apache.spark.sql.execution.SparkSqlParser ==
> mismatched input 'merge' expecting \{'(', 'SELECT', 'FROM', 'ADD', 'DESC', 
> 'EMPOWER', 'WITH', 'VALUES', 'CREATE', 'TABLE', 'INSERT', 'DELETE', 
> 'DESCRIBE', 'EXPLAIN', 'SHOW', 'USE', 'DROP', 'ALTER', 'MAP', 'SET', 'RESET', 
> 'START', 'COMMIT', 'ROLLBACK', 'REDUCE', 'REFRESH', 'CLEAR', 'CACHE', 
> 'UNCACHE', 'DFS', 'TRUNCATE', 'ANALYZE', 'LIST', 'REVOKE', 'GRANT', 'LOCK', 
> 'UNLOCK', 'MSCK', 'EXPORT', 'IMPORT', 'LOAD', 'HEALTHCHECK'}(line 1, pos 0)
> == SQL ==
> merge into uniqdata1 as a using uniqdata as b on a.cust_id=b.cust_id
> ^^^; (state=,code=0)
>  
>  
>  
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to