anubhav tarar created CARBONDATA-1192:
-----------------------------------------

             Summary: Unable to Select Data From more than one table in hive
                 Key: CARBONDATA-1192
                 URL: https://issues.apache.org/jira/browse/CARBONDATA-1192
             Project: CarbonData
          Issue Type: Bug
          Components: hive-integration
    Affects Versions: 1.2.0
         Environment: hive 1.2,spark 2.1
            Reporter: anubhav tarar
            Assignee: anubhav tarar
             Fix For: 1.2.0


inside spark shell

carbon.sql("DROP TABLE IF EXISTS CUSTOMER ")

    carbon.sql("CREATE TABLE CUSTOMER ( C_CUSTKEY INT ,\n C_NAME STRING ,\n 
C_ADDRESS STRING ,\n " +
           "C_NATIONKEY INT ,\n C_PHONE STRING ,\n C_ACCTBAL DECIMAL(15,2) ,\n 
C_MKTSEGMENT " +
           "STRING ,\n C_COMMENT STRING ) STORED BY 'carbondata' ")

    carbon.sql("LOAD DATA INPATH \"hdfs://localhost:54310/user1/customer.csv\" 
INTO TABLE customer " +
              "OPTIONS('DELIMITER'='|' , 'QUOTECHAR'='\"' , 
'FILEHEADER'='C_CUSTKEY,C_NAME," +
              
"C_ADDRESS,C_NATIONKEY,C_PHONE,C_ACCTBAL,C_MKTSEGMENT,C_COMMENT')")

 carbon.sql("DROP TABLE IF EXISTS ORDERS ")

carbon.sql("CREATE TABLE ORDERS ( O_ORDERKEY INT ,O_CUSTKEY INT ,O_ORDERSTATUS 
STRING ,O_TOTALPRICE DECIMAL(15,2) , O_ORDERDATE TIMESTAMP , O_ORDERPRIORITY 
STRING , O_CLERK STRING , O_SHIPPRIORITY INT , O_COMMENT STRING ) STORED BY 
'carbondata' ")

    carbon.sql("LOAD DATA INPATH 'hdfs://localhost:54310/user1/orders.csv' INTO 
TABLE orders " +
              "OPTIONS('DELIMITER'='|' , 
'QUOTECHAR'='\"','FILEHEADER'='O_ORDERKEY,O_CUSTKEY," +
              
"O_ORDERSTATUS,O_TOTALPRICE,O_ORDERDATE,O_ORDERPRIORITY,O_CLERK,O_SHIPPRIORITY,"
 +
              "O_COMMENT')")

read data from hive shell

hive> select o_custkey,c_custkey from orders,customer limit 2;
Warning: Shuffle Join JOIN[4][tables = [orders, customer]] in Stage 
'Stage-1:MAPRED' is a cross product
Query ID = hduser_20170619125257_d889efa9-261f-436e-9489-fd15d6b76beb
Total jobs = 1
Stage-1 is selected by condition resolver.
Launching Job 1 out of 1
Number of reduce tasks determined at compile time: 1
In order to change the average load for a reducer (in bytes):
  set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
  set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
  set mapreduce.job.reduces=<number>
Job running in-process (local Hadoop)
2017-06-19 12:53:01,987 Stage-1 map = 0%,  reduce = 0%
2017-06-19 12:53:49,113 Stage-1 map = 38%,  reduce = 0%
2017-06-19 12:53:51,127 Stage-1 map = 100%,  reduce = 0%
Ended Job = job_local1708233203_0001 with errors
Error during job, obtaining debugging information...
Job Tracking URL: http://localhost:8080/
FAILED: Execution Error, return code 2 from 
org.apache.hadoop.hive.ql.exec.mr.MapRedTask
MapReduce Jobs Launched: 
Stage-Stage-1:  HDFS Read: 12033731 HDFS Write: 0 FAIL
Total MapReduce CPU Time Spent: 0 msec

 





--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to