[ 
https://issues.apache.org/jira/browse/CARBONDATA-1726?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chetan Bhat updated CARBONDATA-1726:
------------------------------------
    Description: 
Steps :
// prepare csv file for batch loading
cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin

// generate streamSample.csv

100000001,batch_1,city_1,0.1,school_1:school_11$20
100000002,batch_2,city_2,0.2,school_2:school_22$30
100000003,batch_3,city_3,0.3,school_3:school_33$40
100000004,batch_4,city_4,0.4,school_4:school_44$50
100000005,batch_5,city_5,0.5,school_5:school_55$60

// put to hdfs /tmp/streamSample.csv
./hadoop fs -put streamSample.csv /tmp

// spark-beeline
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5 
--driver-memory 5G --num-executors 3 --class 
org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
 "hdfs://hacluster/user/sparkhive/warehouse"

bin/beeline -u jdbc:hive2://10.18.98.34:23040

CREATE TABLE stream_table(
id INT,
name STRING,
city STRING,
salary FLOAT
)
STORED BY 'carbondata'
TBLPROPERTIES('streaming'='true', 'sort_columns'='name');

LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE 
stream_table OPTIONS('HEADER'='false');

// spark-shell 
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-shell --master yarn-client

import java.io.{File, PrintWriter}
import java.net.ServerSocket

import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}

import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}

CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
 "yyyy/MM/dd")

import org.apache.spark.sql.CarbonSession._

val carbonSession = SparkSession.
  builder().
  appName("StreamExample").
  config("spark.sql.warehouse.dir", 
"hdfs://hacluster/user/sparkhive/warehouse").
  config("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
  config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
  config("javax.jdo.option.ConnectionPassword", "huawei").
  config("javax.jdo.option.ConnectionUserName", "sparksql").
  getOrCreateCarbonSession()
   
carbonSession.sparkContext.setLogLevel("ERROR")

carbonSession.sql("select * from stream_table").show

Issue : Select query from spark-shell does not execute successfully for 
streaming table load.
When the executor and driver cores and memory is increased while launching the 
spark shell the issue still occurs.
bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 
--driver-memory 5G --num-executors 3
scala> import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.constants.CarbonCommonConstants

scala> import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.CarbonProperties

scala> import org.apache.carbondata.core.util.path.{CarbonStorePath, 
CarbonTablePath}
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}

scala>

scala> 
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
 "yyyy/MM/dd")
res29: org.apache.carbondata.core.util.CarbonProperties = 
org.apache.carbondata.core.util.CarbonProperties@67b056e7

scala>

scala> import org.apache.spark.sql.CarbonSession._
import org.apache.spark.sql.CarbonSession._

scala>

scala> val carbonSession = SparkSession.
     |   builder().
     |   appName("StreamExample").
     |   config("spark.sql.warehouse.dir", 
"hdfs://hacluster/user/sparkhive/warehouse").
     |   config("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
     |   config("javax.jdo.option.ConnectionDriverName", 
"com.mysql.jdbc.Driver").
     |   config("javax.jdo.option.ConnectionPassword", "huawei").
     |   config("javax.jdo.option.ConnectionUserName", "sparksql").
     |   getOrCreateCarbonSession()
carbonSession: org.apache.spark.sql.SparkSession = 
org.apache.spark.sql.CarbonSession@1d0590bc

scala>
     | carbonSession.sparkContext.setLogLevel("ERROR")

scala> carbonSession.sql("select * from stream_table").show
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 
(TID 65, BLR1000014269, executor 8): java.lang.IllegalStateException: unread 
block data
        at 
java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
        at 
java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
        at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
        at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
        at 
org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
  at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422)
  at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
  at scala.Option.foreach(Option.scala:257)
  at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1918)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1931)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1944)
  at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:333)
  at 
org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
  at 
org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2371)
  at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
  at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2765)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2370)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2377)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2113)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2112)
  at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2795)
  at org.apache.spark.sql.Dataset.head(Dataset.scala:2112)
  at org.apache.spark.sql.Dataset.take(Dataset.scala:2327)
  at org.apache.spark.sql.Dataset.showString(Dataset.scala:248)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:636)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:595)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:604)
  ... 50 elided
Caused by: java.lang.IllegalStateException: unread block data
  at 
java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
  at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
  at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
  at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
  at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
  at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
  at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
  at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
  at 
org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
  at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  at java.lang.Thread.run(Thread.java:745)



Expected : Select query from spark-shell should execute successfully for 
streaming table load.

  was:
Steps :
// prepare csv file for batch loading
cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin

// generate streamSample.csv

100000001,batch_1,city_1,0.1,school_1:school_11$20
100000002,batch_2,city_2,0.2,school_2:school_22$30
100000003,batch_3,city_3,0.3,school_3:school_33$40
100000004,batch_4,city_4,0.4,school_4:school_44$50
100000005,batch_5,city_5,0.5,school_5:school_55$60

// put to hdfs /tmp/streamSample.csv
./hadoop fs -put streamSample.csv /tmp

// spark-beeline
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5 
--driver-memory 5G --num-executors 3 --class 
org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
 "hdfs://hacluster/user/sparkhive/warehouse"

bin/beeline -u jdbc:hive2://10.18.98.34:23040

CREATE TABLE stream_table(
id INT,
name STRING,
city STRING,
salary FLOAT
)
STORED BY 'carbondata'
TBLPROPERTIES('streaming'='true', 'sort_columns'='name');

LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE 
stream_table OPTIONS('HEADER'='false');

// spark-shell 
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-shell --master yarn-client

import java.io.{File, PrintWriter}
import java.net.ServerSocket

import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}

import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}

CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
 "yyyy/MM/dd")

import org.apache.spark.sql.CarbonSession._

val carbonSession = SparkSession.
  builder().
  appName("StreamExample").
  config("spark.sql.warehouse.dir", 
"hdfs://hacluster/user/sparkhive/warehouse").
  config("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
  config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
  config("javax.jdo.option.ConnectionPassword", "huawei").
  config("javax.jdo.option.ConnectionUserName", "sparksql").
  getOrCreateCarbonSession()
   
carbonSession.sparkContext.setLogLevel("ERROR")

carbonSession.sql("select * from stream_table").show

Issue : Select query from spark-shell does not execute successfully for 
streaming table load.
When the executor and driver cores and memory is increased while launching the 
spark shell the issue still occurs.
bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 
--driver-memory 5G --num-executors 3
scala> carbonSession.sql("select * from stream_table").show
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 
(TID 65, BLR1000014269, executor 8): java.lang.IllegalStateException: unread 
block data
        at 
java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
        at 
java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
        at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
        at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
        at 
org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
  at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422)
  at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
  at scala.Option.foreach(Option.scala:257)
  at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1918)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1931)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1944)
  at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:333)
  at 
org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
  at 
org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2371)
  at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
  at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2765)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2370)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2377)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2113)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2112)
  at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2795)
  at org.apache.spark.sql.Dataset.head(Dataset.scala:2112)
  at org.apache.spark.sql.Dataset.take(Dataset.scala:2327)
  at org.apache.spark.sql.Dataset.showString(Dataset.scala:248)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:636)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:595)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:604)
  ... 50 elided
Caused by: java.lang.IllegalStateException: unread block data
  at 
java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
  at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
  at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
  at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
  at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
  at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
  at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
  at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
  at 
org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
  at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  at java.lang.Thread.run(Thread.java:745)



Expected : Select query from spark-shell should execute successfully for 
streaming table load.


> Carbon1.3.0-Streaming - Select query from spark-shell does not execute 
> successfully for streaming table load
> ------------------------------------------------------------------------------------------------------------
>
>                 Key: CARBONDATA-1726
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-1726
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-query
>    Affects Versions: 1.3.0
>         Environment: 3 node ant cluster SUSE 11 SP4
>            Reporter: Chetan Bhat
>            Priority: Blocker
>              Labels: Functional
>
> Steps :
> // prepare csv file for batch loading
> cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin
> // generate streamSample.csv
> 100000001,batch_1,city_1,0.1,school_1:school_11$20
> 100000002,batch_2,city_2,0.2,school_2:school_22$30
> 100000003,batch_3,city_3,0.3,school_3:school_33$40
> 100000004,batch_4,city_4,0.4,school_4:school_44$50
> 100000005,batch_5,city_5,0.5,school_5:school_55$60
> // put to hdfs /tmp/streamSample.csv
> ./hadoop fs -put streamSample.csv /tmp
> // spark-beeline
> cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
> bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 
> 5 --driver-memory 5G --num-executors 3 --class 
> org.apache.carbondata.spark.thriftserver.CarbonThriftServer 
> /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
>  "hdfs://hacluster/user/sparkhive/warehouse"
> bin/beeline -u jdbc:hive2://10.18.98.34:23040
> CREATE TABLE stream_table(
> id INT,
> name STRING,
> city STRING,
> salary FLOAT
> )
> STORED BY 'carbondata'
> TBLPROPERTIES('streaming'='true', 'sort_columns'='name');
> LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE 
> stream_table OPTIONS('HEADER'='false');
> // spark-shell 
> cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
> bin/spark-shell --master yarn-client
> import java.io.{File, PrintWriter}
> import java.net.ServerSocket
> import org.apache.spark.sql.{CarbonEnv, SparkSession}
> import org.apache.spark.sql.hive.CarbonRelation
> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
> import org.apache.carbondata.core.constants.CarbonCommonConstants
> import org.apache.carbondata.core.util.CarbonProperties
> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
>  "yyyy/MM/dd")
> import org.apache.spark.sql.CarbonSession._
> val carbonSession = SparkSession.
>   builder().
>   appName("StreamExample").
>   config("spark.sql.warehouse.dir", 
> "hdfs://hacluster/user/sparkhive/warehouse").
>   config("javax.jdo.option.ConnectionURL", 
> "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
>   config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
>   config("javax.jdo.option.ConnectionPassword", "huawei").
>   config("javax.jdo.option.ConnectionUserName", "sparksql").
>   getOrCreateCarbonSession()
>    
> carbonSession.sparkContext.setLogLevel("ERROR")
> carbonSession.sql("select * from stream_table").show
> Issue : Select query from spark-shell does not execute successfully for 
> streaming table load.
> When the executor and driver cores and memory is increased while launching 
> the spark shell the issue still occurs.
> bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 
> --driver-memory 5G --num-executors 3
> scala> import org.apache.carbondata.core.constants.CarbonCommonConstants
> import org.apache.carbondata.core.constants.CarbonCommonConstants
> scala> import org.apache.carbondata.core.util.CarbonProperties
> import org.apache.carbondata.core.util.CarbonProperties
> scala> import org.apache.carbondata.core.util.path.{CarbonStorePath, 
> CarbonTablePath}
> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
> scala>
> scala> 
> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
>  "yyyy/MM/dd")
> res29: org.apache.carbondata.core.util.CarbonProperties = 
> org.apache.carbondata.core.util.CarbonProperties@67b056e7
> scala>
> scala> import org.apache.spark.sql.CarbonSession._
> import org.apache.spark.sql.CarbonSession._
> scala>
> scala> val carbonSession = SparkSession.
>      |   builder().
>      |   appName("StreamExample").
>      |   config("spark.sql.warehouse.dir", 
> "hdfs://hacluster/user/sparkhive/warehouse").
>      |   config("javax.jdo.option.ConnectionURL", 
> "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
>      |   config("javax.jdo.option.ConnectionDriverName", 
> "com.mysql.jdbc.Driver").
>      |   config("javax.jdo.option.ConnectionPassword", "huawei").
>      |   config("javax.jdo.option.ConnectionUserName", "sparksql").
>      |   getOrCreateCarbonSession()
> carbonSession: org.apache.spark.sql.SparkSession = 
> org.apache.spark.sql.CarbonSession@1d0590bc
> scala>
>      | carbonSession.sparkContext.setLogLevel("ERROR")
> scala> carbonSession.sql("select * from stream_table").show
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
> stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 
> (TID 65, BLR1000014269, executor 8): java.lang.IllegalStateException: unread 
> block data
>         at 
> java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
>         at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
>         at 
> java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
>         at 
> java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
>         at 
> java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
>         at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
>         at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
>         at 
> org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
>         at 
> org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
>         at 
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
>         at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>         at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>         at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>   at 
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435)
>   at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423)
>   at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422)
>   at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>   at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422)
>   at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
>   at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
>   at scala.Option.foreach(Option.scala:257)
>   at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
>   at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650)
>   at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605)
>   at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594)
>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>   at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:1918)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:1931)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:1944)
>   at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:333)
>   at 
> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>   at 
> org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2371)
>   at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
>   at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2765)
>   at 
> org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2370)
>   at 
> org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2377)
>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2113)
>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2112)
>   at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2795)
>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2112)
>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2327)
>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:248)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:636)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:595)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:604)
>   ... 50 elided
> Caused by: java.lang.IllegalStateException: unread block data
>   at 
> java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424)
>   at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383)
>   at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993)
>   at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918)
>   at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801)
>   at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351)
>   at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371)
>   at 
> org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
>   at 
> org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
>   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258)
>   at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>   at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>   at java.lang.Thread.run(Thread.java:745)
> Expected : Select query from spark-shell should execute successfully for 
> streaming table load.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to