Hi,

My Steps:

### HIVE
CREATE TABLE CUSTOMER (
C_CUSTKEY    BIGINT,
C_NAME       VARCHAR(25),
C_ADDRESS    VARCHAR(40),
C_NATIONKEY  BIGINT,
C_PHONE      VARCHAR(15),
C_ACCTBAL    DECIMAL,
C_MKTSEGMENT VARCHAR(10),
C_COMMENT    VARCHAR(117)
) row format serde 'com.bizo.hive.serde.csv.CSVSerde';
LOAD DATA LOCAL INPATH '/usr/local/pdgf/output/CUSTOMER.csv' INTO TABLE 
CUSTOMER;

CREATE TABLE ORDERS (
O_ORDERKEY       BIGINT,
O_CUSTKEY        BIGINT,
O_ORDERSTATUS    string,
O_TOTALPRICE     DECIMAL,
O_ORDERDATE      STRING,
O_ORDERPRIORITY  VARCHAR(15),
O_CLERK          VARCHAR(15),
O_SHIPPRIORITY   INT,
O_COMMENT        VARCHAR(79)
) ROW FORMAT serde 'com.bizo.hive.serde.csv.CSVSerde’;
LOAD DATA LOCAL INPATH '/usr/local/pdgf/output/ORDERS.csv' INTO TABLE ORDERS;

CREATE TABLE LINEITEM (
L_ORDERKEY       BIGINT,
L_PARTKEY        BIGINT,
L_SUPPKEY        BIGINT,
L_LINENUMBER     INT,
L_QUANTITY       DECIMAL,
L_EXTENDEDPRICE  DECIMAL,
L_DISCOUNT       DECIMAL,
L_TAX            DECIMAL,
L_SHIPDATE       STRING,
L_COMMITDATE     STRING,
L_RECEIPTDATE    STRING,
L_RETURNFLAG     STRING,
L_LINESTATUS     STRING,
L_SHIPINSTRUCT   VARCHAR(25),
L_SHIPMODE       VARCHAR(10),
L_COMMENT        VARCHAR(44)
) ROW FORMAT serde 'com.bizo.hive.serde.csv.CSVSerde';
LOAD DATA LOCAL INPATH 'vpdgf/output/LINEITEM.csv' INTO TABLE LINEITEM;
… (same for other tables)


hive> add jar /hadoop/hive/csv-serde-1.1.2-0.11.0-all.jar;
hive> select l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, 
o_orderdate, o_shippriority from customer c join orders o on c.c_mktsegment = 
'BUILDING' and c.c_custkey = o.o_custkey join lineitem l on l.l_orderkey = 
o.o_orderkey where o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' 
group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, 
o_orderdate limit 10;

MapReduce Total cumulative CPU time: 25 seconds 110 msec
Ended Job = job_1414101999739_0004
MapReduce Jobs Launched: 
Job 0: Map: 26  Reduce: 7   Cumulative CPU: 378.14 sec   HDFS Read: 6502040850 
HDFS Write: 173752818 SUCCESS
Job 1: Map: 100  Reduce: 27   Cumulative CPU: 1376.06 sec   HDFS Read: 
26273646797 HDFS Write: 183687996 SUCCESS
Job 2: Map: 3  Reduce: 1   Cumulative CPU: 32.25 sec   HDFS Read: 183694290 
HDFS Write: 183706480 SUCCESS
Job 3: Map: 1  Reduce: 1   Cumulative CPU: 25.11 sec   HDFS Read: 183707750 
HDFS Write: 349 SUCCESS
Total MapReduce CPU Time Spent: 30 minutes 11 seconds 560 msec



### Run the same SQL in Spark
scala> val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
scala> sqlContext.sql("""select l_orderkey, sum(l_extendedprice*(1-l_discount)) 
as revenue, o_orderdate, o_shippriority from customer c join orders o on 
c.c_mktsegment = 'BUILDING' and c.c_custkey = o.o_custkey join lineitem l on 
l.l_orderkey = o.o_orderkey where o_orderdate < '1995-03-15' and l_shipdate > 
'1995-03-15' group by l_orderkey, o_orderdate, o_shippriority order by revenue 
desc, o_orderdate limit 10""").collect().foreach(println);

java.lang.ClassCastException: java.lang.String cannot be cast to 
scala.math.BigDecimal
        scala.math.Numeric$BigDecimalIsFractional$.minus(Numeric.scala:182)
        
org.apache.spark.sql.catalyst.expressions.Subtract$$anonfun$eval$3.apply(arithmetic.scala:64)
        
org.apache.spark.sql.catalyst.expressions.Subtract$$anonfun$eval$3.apply(arithmetic.scala:64)
        
org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:114)
        
org.apache.spark.sql.catalyst.expressions.Subtract.eval(arithmetic.scala:64)
        
org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:108)
        
org.apache.spark.sql.catalyst.expressions.Multiply.eval(arithmetic.scala:70)
        
org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:47)
        
org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:108)
        org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:58)
        
org.apache.spark.sql.catalyst.expressions.MutableLiteral.update(literals.scala:69)
        
org.apache.spark.sql.catalyst.expressions.SumFunction.update(aggregates.scala:433)
        
org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:167)
        
org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
        org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:596)
        org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:596)
        org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
        org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
        org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
        org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
        
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
        
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
        org.apache.spark.scheduler.Task.run(Task.scala:54)
        org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:177)
        
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1185)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1174)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1173)
        at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1173)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
        at scala.Option.foreach(Option.scala:236)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:688)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1391)
        at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
        at akka.actor.ActorCell.invoke(ActorCell.scala:456)
        at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
        at akka.dispatch.Mailbox.run(Mailbox.scala:219)
        at 
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
        at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
        at 
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
        at 
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
        at 
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)14/10/25
 06:50:15 WARN TaskSetManager: Lost task 7.3 in stage 5.0 (TID 575, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 24.2 in stage 5.0 (TID 560, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 22.2 in stage 5.0 (TID 561, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 20.2 in stage 5.0 (TID 564, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 13.2 in stage 5.0 (TID 562, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 27.2 in stage 5.0 (TID 565, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 WARN TaskSetManager: Lost task 34.2 in stage 5.0 (TID 568, 
m137.emblocsoft.net): TaskKilled (killed intentionally)
14/10/25 06:50:15 INFO TaskSchedulerImpl: Removed TaskSet 5.0, whose tasks have 
all completed, from pool 

Regards
Arthur


On 24 Oct, 2014, at 6:56 am, Michael Armbrust <mich...@databricks.com> wrote:

> Can you show the DDL for the table?  It looks like the SerDe might be saying 
> it will produce a decimal type but is actually producing a string.
> 
> On Thu, Oct 23, 2014 at 3:17 PM, arthur.hk.c...@gmail.com 
> <arthur.hk.c...@gmail.com> wrote:
> Hi
> 
> My Spark is 1.1.0 and Hive is 0.12,  I tried to run the same query in both 
> Hive-0.12.0 then Spark-1.1.0,  HiveQL works while SparkSQL failed. 
> 
> 
> hive> select l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, 
> o_orderdate, o_shippriority from customer c join orders o on c.c_mktsegment = 
> 'BUILDING' and c.c_custkey = o.o_custkey join lineitem l on l.l_orderkey = 
> o.o_orderkey where o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' 
> group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, 
> o_orderdate limit 10;
> Ended Job = job_1414067367860_0011
> MapReduce Jobs Launched: 
> Job 0: Map: 1  Reduce: 1   Cumulative CPU: 2.0 sec   HDFS Read: 261 HDFS 
> Write: 96 SUCCESS
> Job 1: Map: 1  Reduce: 1   Cumulative CPU: 0.88 sec   HDFS Read: 458 HDFS 
> Write: 0 SUCCESS
> Total MapReduce CPU Time Spent: 2 seconds 880 msec
> OK
> Time taken: 38.771 seconds
> 
> 
> scala> sqlContext.sql("""select l_orderkey, 
> sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
> from customer c join orders o on c.c_mktsegment = 'BUILDING' and c.c_custkey 
> = o.o_custkey join lineitem l on l.l_orderkey = o.o_orderkey where 
> o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' group by l_orderkey, 
> o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 
> 10""").collect().foreach(println);
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 14 in 
> stage 5.0 failed 4 times, most recent failure: Lost task 14.3 in stage 5.0 
> (TID 568, m34): java.lang.ClassCastException: java.lang.String cannot be cast 
> to scala.math.BigDecimal
>         scala.math.Numeric$BigDecimalIsFractional$.minus(Numeric.scala:182)
>         
> org.apache.spark.sql.catalyst.expressions.Subtract$$anonfun$eval$3.apply(arithmetic.scala:64)
>         
> org.apache.spark.sql.catalyst.expressions.Subtract$$anonfun$eval$3.apply(arithmetic.scala:64)
>         
> org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:114)
>         
> org.apache.spark.sql.catalyst.expressions.Subtract.eval(arithmetic.scala:64)
>         
> org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:108)
>         
> org.apache.spark.sql.catalyst.expressions.Multiply.eval(arithmetic.scala:70)
>         
> org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:47)
>         
> org.apache.spark.sql.catalyst.expressions.Expression.n2(Expression.scala:108)
>         
> org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:58)
>         
> org.apache.spark.sql.catalyst.expressions.MutableLiteral.update(literals.scala:69)
>         
> org.apache.spark.sql.catalyst.expressions.SumFunction.update(aggregates.scala:433)
>         
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:167)
>         
> org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
>         org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:596)
>         org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:596)
>         
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>         org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
>         org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
>         
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>         org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
>         org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
>         
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
>         
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
>         org.apache.spark.scheduler.Task.run(Task.scala:54)
>         org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:177)
>         
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>         
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>         java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>       at 
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1185)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1174)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1173)
>       at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>       at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>       at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1173)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:688)
>       at scala.Option.foreach(Option.scala:236)
>       at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:688)
>       at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1391)
>       at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
>       at akka.actor.ActorCell.invoke(ActorCell.scala:456)
>       at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
>       at akka.dispatch.Mailbox.run(Mailbox.scala:219)
>       at 
> akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
>       at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
>       at 
> scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
>       at 
> scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
>       at 
> scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
> 
> 
> Regards
> Arthur
> 
> 
> 
> 
> 
> 

Reply via email to