[ https://issues.apache.org/jira/browse/CARBONDATA-4235?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Chetan Bhat updated CARBONDATA-4235: ------------------------------------ Description: *Queries –* drop table if exists test_rename; CREATE TABLE test_rename (str1 struct<a:int>, str2 struct<a:struct<b:int>>, str3 struct<a:struct<b:struct<c:int>>> comment 'struct', intfield int,arr1 array<int>, arr2 array<array<int>>, arr3 array<string>, arr4 array<struct<a:int>> comment 'array') STORED AS carbondata; insert into test_rename values (named_struct('a', 2), named_struct('a', named_struct('b', 2)), named_struct('a', named_struct('b',named_struct('c', 2))), 1,array(1,2,3), array(array(1,2),array(3,4)), array('hello','world'), array(named_struct('a',45))); ALTER TABLE test_rename ADD COLUMNS(arr_1 ARRAY<int>); alter table test_rename change str2 str22 struct<a:struct<b:int>>; select str22 from test_rename; select str22.a from test_rename; select str22.a.b from test_rename; Issue : after alter add column when user does rename operation ,the select operation on struct type gives null value and childen of struct gives error *Issue 1 : Exception trace on executing query –* 0: jdbc:hive2://vm2:22550/> select str22.a.b from test_rename; INFO : Execution ID: 2465 Error: org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1100.0 failed 4 times, most recent failure: Lost task 0.3 in stage 1100.0 (TID 10353) (vm1 executor 5): java.nio.BufferUnderflowException at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) at org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.run(Task.scala:131) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Driver stacktrace: at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:396) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$3(SparkExecuteStatementOperation.scala:281) at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78) at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:281) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:268) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:295) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1100.0 failed 4 times, most recent failure: Lost task 0.3 in stage 1100.0 (TID 10353) (vm1 executor 5): java.nio.BufferUnderflowException at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) at org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.run(Task.scala:131) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) *Issue 2 :- NULL value in select query* 0: jdbc:hive2://vm2:22550/> select str22.a from test_rename; INFO : Execution ID: 2464 +-------+ | a | +-------+ | NULL | +-------+ 1 row selected (0.764 seconds) Expected : - Select query must be executed successfully and give the correct expected output. was: *Queries –* drop table if exists test_rename; CREATE TABLE test_rename (str1 struct<a:int>, str2 struct<a:struct<b:int>>, str3 struct<a:struct<b:struct<c:int>>> comment 'struct', intfield int,arr1 array<int>, arr2 array<array<int>>, arr3 array<string>, arr4 array<struct<a:int>> comment 'array') STORED AS carbondata; insert into test_rename values (named_struct('a', 2), named_struct('a', named_struct('b', 2)), named_struct('a', named_struct('b',named_struct('c', 2))), 1,array(1,2,3), array(array(1,2),array(3,4)), array('hello','world'), array(named_struct('a',45))); ALTER TABLE test_rename ADD COLUMNS(arr_1 ARRAY<int>); alter table test_rename change str2 str22 struct<a:struct<b:int>>; select str22 from test_rename; select str22.a from test_rename; select str22.a.b from test_rename; Issue : after alter add column when user does rename operation ,the select operation on struct type gives null value and childen of struct gives error *Exception trace on executing query –* 0: jdbc:hive2://vm2:22550/> select str22.a.b from test_rename; INFO : Execution ID: 2465 Error: org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1100.0 failed 4 times, most recent failure: Lost task 0.3 in stage 1100.0 (TID 10353) (vm1 executor 5): java.nio.BufferUnderflowException at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) at org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.run(Task.scala:131) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Driver stacktrace: at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:396) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$3(SparkExecuteStatementOperation.scala:281) at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78) at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:281) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:268) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761) at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:295) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1100.0 failed 4 times, most recent failure: Lost task 0.3 in stage 1100.0 (TID 10353) (vm1 executor 5): java.nio.BufferUnderflowException at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) at org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) at org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) at org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.run(Task.scala:131) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) > after alter add column when user does rename operation ,the select operation > on struct type gives null value and childen of struct gives error > ------------------------------------------------------------------------------------------------------------------------------------------------ > > Key: CARBONDATA-4235 > URL: https://issues.apache.org/jira/browse/CARBONDATA-4235 > Project: CarbonData > Issue Type: Bug > Components: sql > Affects Versions: 2.2.0 > Environment: Spark 3.1.1, Spark 2.4.5 > Reporter: Chetan Bhat > Priority: Minor > > *Queries –* > drop table if exists test_rename; > CREATE TABLE test_rename (str1 struct<a:int>, str2 struct<a:struct<b:int>>, > str3 struct<a:struct<b:struct<c:int>>> comment 'struct', intfield int,arr1 > array<int>, arr2 array<array<int>>, arr3 array<string>, arr4 > array<struct<a:int>> comment 'array') STORED AS carbondata; > insert into test_rename values (named_struct('a', 2), named_struct('a', > named_struct('b', 2)), named_struct('a', named_struct('b',named_struct('c', > 2))), 1,array(1,2,3), array(array(1,2),array(3,4)), array('hello','world'), > array(named_struct('a',45))); > ALTER TABLE test_rename ADD COLUMNS(arr_1 ARRAY<int>); > alter table test_rename change str2 str22 struct<a:struct<b:int>>; > select str22 from test_rename; > select str22.a from test_rename; > select str22.a.b from test_rename; > > Issue : after alter add column when user does rename operation ,the select > operation on struct type gives null value and childen of struct gives error > > *Issue 1 : Exception trace on executing query –* > 0: jdbc:hive2://vm2:22550/> select str22.a.b from test_rename; > INFO : Execution ID: 2465 > Error: org.apache.hive.service.cli.HiveSQLException: Error running query: > org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in > stage 1100.0 failed 4 times, most recent failure: Lost task 0.3 in stage > 1100.0 (TID 10353) (vm1 executor 5): java.nio.BufferUnderflowException > at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) > at > org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) > at > org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) > at > org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) > at > org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) > at > org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) > at > org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) > at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) > at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) > at > org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown > Source) > at > org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) > at > org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) > at > org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) > at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) > at > org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) > at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) > at org.apache.spark.scheduler.Task.run(Task.scala:131) > at > org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > Driver stacktrace: > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:396) > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$3(SparkExecuteStatementOperation.scala:281) > at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) > at > org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78) > at > org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62) > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46) > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:281) > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:268) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761) > at > org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:295) > at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) > at java.util.concurrent.FutureTask.run(FutureTask.java:266) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > Caused by: org.apache.spark.SparkException: Job aborted due to stage > failure: Task 0 in stage 1100.0 failed 4 times, most recent failure: Lost > task 0.3 in stage 1100.0 (TID 10353) (vm1 executor 5): > java.nio.BufferUnderflowException > at java.nio.HeapByteBuffer.get(HeapByteBuffer.java:155) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:166) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataObject(PrimitiveQueryType.java:147) > at > org.apache.carbondata.core.scan.complextypes.PrimitiveQueryType.getDataBasedOnColumn(PrimitiveQueryType.java:141) > at > org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) > at > org.apache.carbondata.core.scan.complextypes.StructQueryType.getDataBasedOnColumn(StructQueryType.java:160) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillRow(DictionaryBasedResultCollector.java:316) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.fillDimensionData(DictionaryBasedResultCollector.java:288) > at > org.apache.carbondata.core.scan.collector.impl.DictionaryBasedResultCollector.collectResultInRow(DictionaryBasedResultCollector.java:159) > at > org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:110) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:58) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:50) > at > org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:32) > at > org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:56) > at > org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:127) > at > org.apache.carbondata.spark.rdd.CarbonScanRDD$$anon$1.hasNext(CarbonScanRDD.scala:557) > at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) > at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458) > at > org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown > Source) > at > org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) > at > org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:755) > at > org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345) > at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:897) > at > org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:897) > at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:337) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) > at org.apache.spark.scheduler.Task.run(Task.scala:131) > at > org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:499) > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1554) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > > *Issue 2 :- NULL value in select query* > 0: jdbc:hive2://vm2:22550/> select str22.a from test_rename; > INFO : Execution ID: 2464 > +-------+ > | a | > +-------+ > | NULL | > +-------+ > 1 row selected (0.764 seconds) > > Expected : - Select query must be executed successfully and give the correct > expected output. -- This message was sent by Atlassian Jira (v8.3.4#803005)