This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 239deef7823 HIVE-29098 : Iceberg: Fix dataloss post migrating a
partitioned table with default partition to iceberg (#5993)
239deef7823 is described below
commit 239deef78235062a1182642f70044f21648ece11
Author: Vikram Ahuja <[email protected]>
AuthorDate: Fri Jul 25 14:56:23 2025 +0530
HIVE-29098 : Iceberg: Fix dataloss post migrating a partitioned table with
default partition to iceberg (#5993)
This reverts the code changes from HIVE-27367: Iceberg: Encode partition
key values to avoid unnecessary partition keys split in DataFiles.fillFromPath
---
.../org/apache/iceberg/mr/hive/HiveTableUtil.java | 20 +-
.../positive/alter_multi_part_table_to_iceberg.q | 95 +-
.../queries/positive/alter_part_table_to_iceberg.q | 61 +
.../alter_multi_part_table_to_iceberg.q.out | 1320 ++++++++++++++++++++
.../positive/alter_part_table_to_iceberg.q.out | 837 ++++++++++++-
5 files changed, 2311 insertions(+), 22 deletions(-)
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
index 24f070eb8d5..2e5dec6aa04 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
@@ -32,7 +32,6 @@
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
@@ -45,7 +44,6 @@
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.iceberg.AppendFiles;
@@ -54,7 +52,6 @@
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.SerializableTable;
-import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.Transaction;
@@ -158,21 +155,8 @@ private static List<DataFile>
getDataFiles(RemoteIterator<LocatedFileStatus> fil
if (fileName.startsWith(".") || fileName.startsWith("_") ||
fileName.endsWith("metadata.json")) {
continue;
}
- partitionKeys.replaceAll((key, value) ->
FileUtils.escapePathName(value));
-
- int[] stringFields = IntStream.range(0, spec.javaClasses().length)
- .filter(i ->
spec.javaClasses()[i].isAssignableFrom(String.class)).toArray();
-
- dataFiles.addAll(Lists.transform(
- TableMigrationUtil.listPartition(partitionKeys,
fileStatus.getPath().toString(), format, spec,
- conf, metricsConfig, nameMapping),
- dataFile -> {
- StructLike structLike = dataFile.partition();
- for (int pos : stringFields) {
- structLike.set(pos,
FileUtils.unescapePathName(structLike.get(pos, String.class)));
- }
- return dataFile;
- }));
+ dataFiles.addAll(TableMigrationUtil.listPartition(partitionKeys,
fileStatus.getPath().toString(), format, spec,
+ conf, metricsConfig, nameMapping));
}
return dataFiles;
}
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/alter_multi_part_table_to_iceberg.q
b/iceberg/iceberg-handler/src/test/queries/positive/alter_multi_part_table_to_iceberg.q
index f8b8c329f0b..e94a24628ba 100644
---
a/iceberg/iceberg-handler/src/test/queries/positive/alter_multi_part_table_to_iceberg.q
+++
b/iceberg/iceberg-handler/src/test/queries/positive/alter_multi_part_table_to_iceberg.q
@@ -62,4 +62,97 @@ explain alter table tbl_avro convert to iceberg;
alter table tbl_avro convert to iceberg;
describe formatted tbl_avro;
select * from tbl_avro order by a;
-drop table tbl_avro;
\ No newline at end of file
+drop table tbl_avro;
+
+drop table if exists tbl_orc_mixed;
+create external table tbl_orc_mixed(a int) partitioned by (b double, c int, d
string) stored as orc;
+describe formatted tbl_orc_mixed;
+insert into table tbl_orc_mixed partition (b=1.1, c=2, d='one') values (1),
(2), (3);
+insert into table tbl_orc_mixed partition (b=2.22, c=1, d='two') values (4),
(5);
+insert into table tbl_orc_mixed partition (b=3.333, c=3, d='three') values
(6), (7), (8);
+insert into table tbl_orc_mixed partition (b=4.4444, c=4, d='four') values (9);
+insert into table tbl_orc_mixed values (10, '', '', '');
+insert into table tbl_orc_mixed values (11, null, null, null);
+insert into table tbl_orc_mixed values (12, NULL, NULL, NULL);
+insert into table tbl_orc_mixed values (13, '', -2, '');
+insert into table tbl_orc_mixed values (14, null, null, 'random');
+insert into table tbl_orc_mixed values (15, -0.11, NULL, NULL);
+select count(*) from tbl_orc_mixed;
+select * from tbl_orc_mixed order by a;
+explain alter table tbl_orc_mixed convert to iceberg;
+alter table tbl_orc_mixed convert to iceberg;
+describe formatted tbl_orc_mixed;
+select count(*) from tbl_orc_mixed;
+select * from tbl_orc_mixed order by a;
+insert into table tbl_orc_mixed partition (b=5.55555, c = 5, d = 'five')
values (16);
+insert into table tbl_orc_mixed values (17, '', '', '');
+insert into table tbl_orc_mixed values (18, null, null, null);
+insert into table tbl_orc_mixed values (19, NULL, NULL, NULL);
+insert into table tbl_orc_mixed values (20, '', -3, '');
+insert into table tbl_orc_mixed values (21, null, null, 'part');
+insert into table tbl_orc_mixed values (22, -0.234, NULL, NULL);
+select count(*) from tbl_orc_mixed;
+select * from tbl_orc_mixed order by a;
+drop table tbl_orc_mixed;
+
+drop table if exists tbl_parquet_mixed;
+create external table tbl_parquet_mixed(a int) partitioned by (b double, c
int, d string) stored as parquet;
+describe formatted tbl_parquet_mixed;
+insert into table tbl_parquet_mixed partition (b=1.1, c=2, d='one') values
(1), (2), (3);
+insert into table tbl_parquet_mixed partition (b=2.22, c=1, d='two') values
(4), (5);
+insert into table tbl_parquet_mixed partition (b=3.333, c=3, d='three') values
(6), (7), (8);
+insert into table tbl_parquet_mixed partition (b=4.4444, c=4, d='four') values
(9);
+insert into table tbl_parquet_mixed values (10, '', '', '');
+insert into table tbl_parquet_mixed values (11, null, null, null);
+insert into table tbl_parquet_mixed values (12, NULL, NULL, NULL);
+insert into table tbl_parquet_mixed values (13, '', -2, '');
+insert into table tbl_parquet_mixed values (14, null, null, 'random');
+insert into table tbl_parquet_mixed values (15, -0.11, NULL, NULL);
+select count(*) from tbl_parquet_mixed;
+select * from tbl_parquet_mixed order by a;
+explain alter table tbl_parquet_mixed convert to iceberg;
+alter table tbl_parquet_mixed convert to iceberg;
+describe formatted tbl_parquet_mixed;
+select count(*) from tbl_parquet_mixed;
+select * from tbl_parquet_mixed order by a;
+insert into table tbl_parquet_mixed partition (b=5.55555, c = 5, d = 'five')
values (16);
+insert into table tbl_parquet_mixed values (17, '', '', '');
+insert into table tbl_parquet_mixed values (18, null, null, null);
+insert into table tbl_parquet_mixed values (19, NULL, NULL, NULL);
+insert into table tbl_parquet_mixed values (20, '', -3, '');
+insert into table tbl_parquet_mixed values (21, null, null, 'part');
+insert into table tbl_parquet_mixed values (22, -0.234, NULL, NULL);
+select count(*) from tbl_parquet_mixed;
+select * from tbl_parquet_mixed order by a;
+drop table tbl_parquet_mixed;
+
+drop table if exists tbl_avro_mixed;
+create external table tbl_avro_mixed(a int) partitioned by (b double, c int, d
string) stored as avro;
+describe formatted tbl_avro_mixed;
+insert into table tbl_avro_mixed partition (b=1.1, c=2, d='one') values (1),
(2), (3);
+insert into table tbl_avro_mixed partition (b=2.22, c=1, d='two') values (4),
(5);
+insert into table tbl_avro_mixed partition (b=3.333, c=3, d='three') values
(6), (7), (8);
+insert into table tbl_avro_mixed partition (b=4.4444, c=4, d='four') values
(9);
+insert into table tbl_avro_mixed values (10, '', '', '');
+insert into table tbl_avro_mixed values (11, null, null, null);
+insert into table tbl_avro_mixed values (12, NULL, NULL, NULL);
+insert into table tbl_avro_mixed values (13, '', -2, '');
+insert into table tbl_avro_mixed values (14, null, null, 'random');
+insert into table tbl_avro_mixed values (15, -0.11, NULL, NULL);
+select count(*) from tbl_avro_mixed;
+select * from tbl_avro_mixed order by a;
+explain alter table tbl_avro_mixed convert to iceberg;
+alter table tbl_avro_mixed convert to iceberg;
+describe formatted tbl_avro_mixed;
+select count(*) from tbl_avro_mixed;
+select * from tbl_avro_mixed order by a;
+insert into table tbl_avro_mixed partition (b=5.55555, c = 5, d = 'five')
values (16);
+insert into table tbl_avro_mixed values (17, '', '', '');
+insert into table tbl_avro_mixed values (18, null, null, null);
+insert into table tbl_avro_mixed values (19, NULL, NULL, NULL);
+insert into table tbl_avro_mixed values (20, '', -3, '');
+insert into table tbl_avro_mixed values (21, null, null, 'part');
+insert into table tbl_avro_mixed values (22, -0.234, NULL, NULL);
+select count(*) from tbl_avro_mixed;
+select * from tbl_avro_mixed order by a;
+drop table tbl_avro_mixed;
\ No newline at end of file
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/alter_part_table_to_iceberg.q
b/iceberg/iceberg-handler/src/test/queries/positive/alter_part_table_to_iceberg.q
index e2de004407b..d70c851ecb8 100644
---
a/iceberg/iceberg-handler/src/test/queries/positive/alter_part_table_to_iceberg.q
+++
b/iceberg/iceberg-handler/src/test/queries/positive/alter_part_table_to_iceberg.q
@@ -34,13 +34,74 @@ insert into table tbl_parquet partition (b='one') values
(1), (2), (3);
insert into table tbl_parquet partition (b='two') values (4), (5);
insert into table tbl_parquet partition (b='three') values (6), (7), (8);
insert into table tbl_parquet partition (b='four') values (9);
+insert into table tbl_parquet values (10, '');
+insert into table tbl_parquet values (11, null);
+insert into table tbl_parquet values (12, NULL);
+select count(*) from tbl_parquet;
select * from tbl_parquet order by a;
explain alter table tbl_parquet convert to iceberg;
alter table tbl_parquet convert to iceberg;
describe formatted tbl_parquet;
+select count(*) from tbl_parquet;
+select * from tbl_parquet order by a;
+insert into table tbl_parquet partition (b='five') values (13);
+insert into table tbl_parquet values (14, '');
+insert into table tbl_parquet values (15, null);
+insert into table tbl_parquet values (16, NULL);
+select count(*) from tbl_parquet;
select * from tbl_parquet order by a;
drop table tbl_parquet;
+drop table if exists tbl_parquet_int;
+create external table tbl_parquet_int(a int) partitioned by (b int) stored as
parquet;
+describe formatted tbl_parquet_int;
+insert into table tbl_parquet_int partition (b=1) values (1), (2), (3);
+insert into table tbl_parquet_int partition (b=2) values (4), (5);
+insert into table tbl_parquet_int partition (b=3) values (6), (7), (8);
+insert into table tbl_parquet_int partition (b=4) values (9);
+insert into table tbl_parquet_int values (10, '');
+insert into table tbl_parquet_int values (11, null);
+insert into table tbl_parquet_int values (12, NULL);
+select count(*) from tbl_parquet_int;
+select * from tbl_parquet_int order by a;
+explain alter table tbl_parquet_int convert to iceberg;
+alter table tbl_parquet_int convert to iceberg;
+describe formatted tbl_parquet_int;
+select count(*) from tbl_parquet_int;
+select * from tbl_parquet_int order by a;
+insert into table tbl_parquet_int partition (b=5) values (13);
+insert into table tbl_parquet_int values (14, '');
+insert into table tbl_parquet_int values (15, null);
+insert into table tbl_parquet_int values (16, NULL);
+select count(*) from tbl_parquet_int;
+select * from tbl_parquet_int order by a;
+drop table tbl_parquet_int;
+
+drop table if exists tbl_parquet_double;
+create external table tbl_parquet_double(a int) partitioned by (b double)
stored as parquet;
+describe formatted tbl_parquet_double;
+insert into table tbl_parquet_double partition (b=1.1) values (1), (2), (3);
+insert into table tbl_parquet_double partition (b=2.22) values (4), (5);
+insert into table tbl_parquet_double partition (b=3.333) values (6), (7), (8);
+insert into table tbl_parquet_double partition (b=4.4444) values (9);
+insert into table tbl_parquet_double values (10, '');
+insert into table tbl_parquet_double values (11, null);
+insert into table tbl_parquet_double values (12, NULL);
+select count(*) from tbl_parquet_double;
+select * from tbl_parquet_double order by a;
+explain alter table tbl_parquet_double convert to iceberg;
+alter table tbl_parquet_double convert to iceberg;
+describe formatted tbl_parquet_double;
+select count(*) from tbl_parquet_double;
+select * from tbl_parquet_double order by a;
+insert into table tbl_parquet_double partition (b=5.55555) values (13);
+insert into table tbl_parquet_double values (14, '');
+insert into table tbl_parquet_double values (15, null);
+insert into table tbl_parquet_double values (16, NULL);
+select count(*) from tbl_parquet_double;
+select * from tbl_parquet_double order by a;
+drop table tbl_parquet_double;
+
drop table if exists tbl_avro;
create external table tbl_avro(a int) partitioned by (b string) stored as avro;
describe formatted tbl_avro;
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/alter_multi_part_table_to_iceberg.q.out
b/iceberg/iceberg-handler/src/test/results/positive/alter_multi_part_table_to_iceberg.q.out
index 768f4c574a2..c4c1a87209a 100644
---
a/iceberg/iceberg-handler/src/test/results/positive/alter_multi_part_table_to_iceberg.q.out
+++
b/iceberg/iceberg-handler/src/test/results/positive/alter_multi_part_table_to_iceberg.q.out
@@ -808,3 +808,1323 @@ POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@tbl_avro
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl_avro
+PREHOOK: query: drop table if exists tbl_orc_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists tbl_orc_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: create external table tbl_orc_mixed(a int) partitioned by (b
double, c int, d string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: create external table tbl_orc_mixed(a int) partitioned by (b
double, c int, d string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: describe formatted tbl_orc_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: query: describe formatted tbl_orc_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_orc_mixed
+# col_name data_type comment
+a int
+
+# Partition Information
+# col_name data_type comment
+b double
+c int
+d string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ EXTERNAL TRUE
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize #Masked#
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table tbl_orc_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed@b=1.1/c=2/d=one
+POSTHOOK: query: insert into table tbl_orc_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed@b=1.1/c=2/d=one
+POSTHOOK: Lineage: tbl_orc_mixed PARTITION(b=1.1,c=2,d=one).a SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed@b=2.22/c=1/d=two
+POSTHOOK: query: insert into table tbl_orc_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed@b=2.22/c=1/d=two
+POSTHOOK: Lineage: tbl_orc_mixed PARTITION(b=2.22,c=1,d=two).a SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed@b=3.333/c=3/d=three
+POSTHOOK: query: insert into table tbl_orc_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed@b=3.333/c=3/d=three
+POSTHOOK: Lineage: tbl_orc_mixed PARTITION(b=3.333,c=3,d=three).a SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed partition (b=4.4444, c=4,
d='four') values (9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+POSTHOOK: query: insert into table tbl_orc_mixed partition (b=4.4444, c=4,
d='four') values (9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Lineage: tbl_orc_mixed PARTITION(b=4.4444,c=4,d=four).a SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (10, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (10, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (11, null, null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (11, null, null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (12, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (12, NULL, NULL, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (13, '', -2, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (13, '', -2, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=-2,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (14, null, null,
'random')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (14, null, null,
'random')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=random).a
SCRIPT []
+PREHOOK: query: insert into table tbl_orc_mixed values (15, -0.11, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (15, -0.11, NULL, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: Output:
default@tbl_orc_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_orc_mixed
PARTITION(b=-0.11,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: select count(*) from tbl_orc_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Input:
default@tbl_orc_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_orc_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_orc_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_orc_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_orc_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Input:
default@tbl_orc_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_orc_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_orc_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_orc_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_orc_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Input:
default@tbl_orc_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_orc_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_orc_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_orc_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_orc_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Input:
default@tbl_orc_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_orc_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_orc_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_orc_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_orc_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_orc_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL __HIVE_DEFAULT_PARTITION__
+11 NULL NULL __HIVE_DEFAULT_PARTITION__
+12 NULL NULL __HIVE_DEFAULT_PARTITION__
+13 NULL -2 __HIVE_DEFAULT_PARTITION__
+14 NULL NULL random
+15 -0.11 NULL __HIVE_DEFAULT_PARTITION__
+PREHOOK: query: explain alter table tbl_orc_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: query: explain alter table tbl_orc_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_orc_mixed
+Stage-0
+ Convert operation{"table
name:":"default.tbl_orc_mixed","spec:":"AlterTableConvertSpec{ConvertTo=iceberg,
TBLProperties={}}"}
+
+PREHOOK: query: alter table tbl_orc_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: query: alter table tbl_orc_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: describe formatted tbl_orc_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: query: describe formatted tbl_orc_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_orc_mixed
+# col_name data_type comment
+a int
+b double
+c int
+d string
+
+# Partition Transform Information
+# col_name transform_type
+b IDENTITY
+c IDENTITY
+d IDENTITY
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ EXTERNAL TRUE
+ MIGRATED_TO_ICEBERG true
+ bucketing_version 2
+ current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"double\"},{\"id\":3,\"name\":\"c\",\"required\":false,\"type\":\"int\"},{\"id\":4,\"name\":\"d\",\"required\":false,\"type\":\"string\"}]}
+ current-snapshot-id #Masked#
+ current-snapshot-summary
{\"added-data-files\":\"10\",\"added-records\":\"15\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"8\",\"total-records\":\"15\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"10\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-timestamp-ms #Masked#
+ default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000},{\"name\":\"c\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1001},{\"name\":\"d\",\"transform\":\"identity\",\"source-id\":4,\"field-id\":1002}]}
+ format-version 2
+ iceberg.orc.files.only true
+#### A masked pattern was here ####
+ metadata_location hdfs://### HDFS PATH ###
+ numFiles 10
+ numRows 15
+ parquet.compression zstd
+ previous_metadata_location hdfs://### HDFS PATH ###
+ schema.name-mapping.default [ {
+ \"field-id\" : 1,
+ \"names\" : [ \"a\" ]
+ }, {
+ \"field-id\" : 2,
+ \"names\" : [ \"b\" ]
+ }, {
+ \"field-id\" : 3,
+ \"names\" : [ \"c\" ]
+ }, {
+ \"field-id\" : 4,
+ \"names\" : [ \"d\" ]
+ } ]
+ snapshot-count 1
+ storage_handler
org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ table_type ICEBERG
+ totalSize #Masked#
+#### A masked pattern was here ####
+ uuid #Masked#
+ write.delete.mode merge-on-read
+ write.format.default orc
+ write.merge.mode merge-on-read
+ write.update.mode merge-on-read
+
+# Storage Information
+SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+Compressed: No
+Sort Columns: []
+PREHOOK: query: select count(*) from tbl_orc_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_orc_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_orc_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_orc_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+PREHOOK: query: insert into table tbl_orc_mixed partition (b=5.55555, c = 5, d
= 'five') values (16)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed@d=five/b=5.55555/c=5
+POSTHOOK: query: insert into table tbl_orc_mixed partition (b=5.55555, c = 5,
d = 'five') values (16)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed@d=five/b=5.55555/c=5
+PREHOOK: query: insert into table tbl_orc_mixed values (17, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (17, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: insert into table tbl_orc_mixed values (18, null, null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (18, null, null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: insert into table tbl_orc_mixed values (19, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (19, NULL, NULL, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: insert into table tbl_orc_mixed values (20, '', -3, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (20, '', -3, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: insert into table tbl_orc_mixed values (21, null, null, 'part')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (21, null, null,
'part')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: insert into table tbl_orc_mixed values (22, -0.234, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: insert into table tbl_orc_mixed values (22, -0.234, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: select count(*) from tbl_orc_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_orc_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+22
+PREHOOK: query: select * from tbl_orc_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_orc_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+16 5.55555 5 five
+17 NULL NULL
+18 NULL NULL NULL
+19 NULL NULL NULL
+20 NULL -3
+21 NULL NULL part
+22 -0.234 NULL NULL
+PREHOOK: query: drop table tbl_orc_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_orc_mixed
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_orc_mixed
+POSTHOOK: query: drop table tbl_orc_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_orc_mixed
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_orc_mixed
+PREHOOK: query: drop table if exists tbl_parquet_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists tbl_parquet_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: create external table tbl_parquet_mixed(a int) partitioned by
(b double, c int, d string) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: create external table tbl_parquet_mixed(a int) partitioned by
(b double, c int, d string) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: describe formatted tbl_parquet_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: query: describe formatted tbl_parquet_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_mixed
+# col_name data_type comment
+a int
+
+# Partition Information
+# col_name data_type comment
+b double
+c int
+d string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ EXTERNAL TRUE
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize #Masked#
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library:
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
+InputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+OutputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table tbl_parquet_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+POSTHOOK: query: insert into table tbl_parquet_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+POSTHOOK: Lineage: tbl_parquet_mixed PARTITION(b=1.1,c=2,d=one).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+POSTHOOK: query: insert into table tbl_parquet_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+POSTHOOK: Lineage: tbl_parquet_mixed PARTITION(b=2.22,c=1,d=two).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+POSTHOOK: query: insert into table tbl_parquet_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+POSTHOOK: Lineage: tbl_parquet_mixed PARTITION(b=3.333,c=3,d=three).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed partition (b=4.4444, c=4,
d='four') values (9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+POSTHOOK: query: insert into table tbl_parquet_mixed partition (b=4.4444, c=4,
d='four') values (9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Lineage: tbl_parquet_mixed PARTITION(b=4.4444,c=4,d=four).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (10, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (10, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (11, null, null,
null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (11, null, null,
null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (12, NULL, NULL,
NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (12, NULL, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (13, '', -2, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (13, '', -2, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=-2,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (14, null, null,
'random')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (14, null, null,
'random')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=random).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_mixed values (15, -0.11, NULL,
NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (15, -0.11, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: Output:
default@tbl_parquet_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_mixed
PARTITION(b=-0.11,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: select count(*) from tbl_parquet_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Input:
default@tbl_parquet_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_parquet_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Input:
default@tbl_parquet_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_parquet_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_parquet_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_parquet_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_parquet_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_parquet_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL __HIVE_DEFAULT_PARTITION__
+11 NULL NULL __HIVE_DEFAULT_PARTITION__
+12 NULL NULL __HIVE_DEFAULT_PARTITION__
+13 NULL -2 __HIVE_DEFAULT_PARTITION__
+14 NULL NULL random
+15 -0.11 NULL __HIVE_DEFAULT_PARTITION__
+PREHOOK: query: explain alter table tbl_parquet_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: query: explain alter table tbl_parquet_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_mixed
+Stage-0
+ Convert operation{"table
name:":"default.tbl_parquet_mixed","spec:":"AlterTableConvertSpec{ConvertTo=iceberg,
TBLProperties={}}"}
+
+PREHOOK: query: alter table tbl_parquet_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: query: alter table tbl_parquet_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: describe formatted tbl_parquet_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: query: describe formatted tbl_parquet_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_mixed
+# col_name data_type comment
+a int
+b double
+c int
+d string
+
+# Partition Transform Information
+# col_name transform_type
+b IDENTITY
+c IDENTITY
+d IDENTITY
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ EXTERNAL TRUE
+ MIGRATED_TO_ICEBERG true
+ bucketing_version 2
+ current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"double\"},{\"id\":3,\"name\":\"c\",\"required\":false,\"type\":\"int\"},{\"id\":4,\"name\":\"d\",\"required\":false,\"type\":\"string\"}]}
+ current-snapshot-id #Masked#
+ current-snapshot-summary
{\"added-data-files\":\"10\",\"added-records\":\"15\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"8\",\"total-records\":\"15\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"10\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-timestamp-ms #Masked#
+ default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000},{\"name\":\"c\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1001},{\"name\":\"d\",\"transform\":\"identity\",\"source-id\":4,\"field-id\":1002}]}
+ format-version 2
+ iceberg.orc.files.only false
+#### A masked pattern was here ####
+ metadata_location hdfs://### HDFS PATH ###
+ numFiles 10
+ numRows 15
+ parquet.compression zstd
+ previous_metadata_location hdfs://### HDFS PATH ###
+ schema.name-mapping.default [ {
+ \"field-id\" : 1,
+ \"names\" : [ \"a\" ]
+ }, {
+ \"field-id\" : 2,
+ \"names\" : [ \"b\" ]
+ }, {
+ \"field-id\" : 3,
+ \"names\" : [ \"c\" ]
+ }, {
+ \"field-id\" : 4,
+ \"names\" : [ \"d\" ]
+ } ]
+ snapshot-count 1
+ storage_handler
org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ table_type ICEBERG
+ totalSize #Masked#
+#### A masked pattern was here ####
+ uuid #Masked#
+ write.delete.mode merge-on-read
+ write.format.default parquet
+ write.merge.mode merge-on-read
+ write.update.mode merge-on-read
+
+# Storage Information
+SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+Compressed: No
+Sort Columns: []
+PREHOOK: query: select count(*) from tbl_parquet_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_parquet_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+PREHOOK: query: insert into table tbl_parquet_mixed partition (b=5.55555, c =
5, d = 'five') values (16)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed@d=five/b=5.55555/c=5
+POSTHOOK: query: insert into table tbl_parquet_mixed partition (b=5.55555, c =
5, d = 'five') values (16)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed@d=five/b=5.55555/c=5
+PREHOOK: query: insert into table tbl_parquet_mixed values (17, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (17, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: insert into table tbl_parquet_mixed values (18, null, null,
null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (18, null, null,
null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: insert into table tbl_parquet_mixed values (19, NULL, NULL,
NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (19, NULL, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: insert into table tbl_parquet_mixed values (20, '', -3, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (20, '', -3, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: insert into table tbl_parquet_mixed values (21, null, null,
'part')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (21, null, null,
'part')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: insert into table tbl_parquet_mixed values (22, -0.234, NULL,
NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: insert into table tbl_parquet_mixed values (22, -0.234, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: select count(*) from tbl_parquet_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+22
+PREHOOK: query: select * from tbl_parquet_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+16 5.55555 5 five
+17 NULL NULL
+18 NULL NULL NULL
+19 NULL NULL NULL
+20 NULL -3
+21 NULL NULL part
+22 -0.234 NULL NULL
+PREHOOK: query: drop table tbl_parquet_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_parquet_mixed
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_mixed
+POSTHOOK: query: drop table tbl_parquet_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_parquet_mixed
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_mixed
+PREHOOK: query: drop table if exists tbl_avro_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists tbl_avro_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: create external table tbl_avro_mixed(a int) partitioned by (b
double, c int, d string) stored as avro
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: create external table tbl_avro_mixed(a int) partitioned by (b
double, c int, d string) stored as avro
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: describe formatted tbl_avro_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: query: describe formatted tbl_avro_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_avro_mixed
+# col_name data_type comment
+a int
+
+# Partition Information
+# col_name data_type comment
+b double
+c int
+d string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ EXTERNAL TRUE
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize #Masked#
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.avro.AvroSerDe
+InputFormat:
org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat
+OutputFormat:
org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table tbl_avro_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed@b=1.1/c=2/d=one
+POSTHOOK: query: insert into table tbl_avro_mixed partition (b=1.1, c=2,
d='one') values (1), (2), (3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed@b=1.1/c=2/d=one
+POSTHOOK: Lineage: tbl_avro_mixed PARTITION(b=1.1,c=2,d=one).a SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed@b=2.22/c=1/d=two
+POSTHOOK: query: insert into table tbl_avro_mixed partition (b=2.22, c=1,
d='two') values (4), (5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed@b=2.22/c=1/d=two
+POSTHOOK: Lineage: tbl_avro_mixed PARTITION(b=2.22,c=1,d=two).a SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed@b=3.333/c=3/d=three
+POSTHOOK: query: insert into table tbl_avro_mixed partition (b=3.333, c=3,
d='three') values (6), (7), (8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed@b=3.333/c=3/d=three
+POSTHOOK: Lineage: tbl_avro_mixed PARTITION(b=3.333,c=3,d=three).a SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed partition (b=4.4444, c=4,
d='four') values (9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+POSTHOOK: query: insert into table tbl_avro_mixed partition (b=4.4444, c=4,
d='four') values (9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Lineage: tbl_avro_mixed PARTITION(b=4.4444,c=4,d=four).a SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (10, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (10, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (11, null, null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (11, null, null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (12, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (12, NULL, NULL, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (13, '', -2, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (13, '', -2, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=-2,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (14, null, null,
'random')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (14, null, null,
'random')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=__HIVE_DEFAULT_PARTITION__,c=__HIVE_DEFAULT_PARTITION__,d=random).a
SCRIPT []
+PREHOOK: query: insert into table tbl_avro_mixed values (15, -0.11, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (15, -0.11, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: Output:
default@tbl_avro_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_avro_mixed
PARTITION(b=-0.11,c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: select count(*) from tbl_avro_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Input:
default@tbl_avro_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_avro_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_avro_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_avro_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_avro_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Input:
default@tbl_avro_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_avro_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_avro_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_avro_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_avro_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Input:
default@tbl_avro_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_avro_mixed@b=1.1/c=2/d=one
+PREHOOK: Input: default@tbl_avro_mixed@b=2.22/c=1/d=two
+PREHOOK: Input: default@tbl_avro_mixed@b=3.333/c=3/d=three
+PREHOOK: Input: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_avro_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Input:
default@tbl_avro_mixed@b=-0.11/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_avro_mixed@b=1.1/c=2/d=one
+POSTHOOK: Input: default@tbl_avro_mixed@b=2.22/c=1/d=two
+POSTHOOK: Input: default@tbl_avro_mixed@b=3.333/c=3/d=three
+POSTHOOK: Input: default@tbl_avro_mixed@b=4.4444/c=4/d=four
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=-2/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input:
default@tbl_avro_mixed@b=__HIVE_DEFAULT_PARTITION__/c=__HIVE_DEFAULT_PARTITION__/d=random
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL __HIVE_DEFAULT_PARTITION__
+11 NULL NULL __HIVE_DEFAULT_PARTITION__
+12 NULL NULL __HIVE_DEFAULT_PARTITION__
+13 NULL -2 __HIVE_DEFAULT_PARTITION__
+14 NULL NULL random
+15 -0.11 NULL __HIVE_DEFAULT_PARTITION__
+PREHOOK: query: explain alter table tbl_avro_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: query: explain alter table tbl_avro_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_avro_mixed
+Stage-0
+ Convert operation{"table
name:":"default.tbl_avro_mixed","spec:":"AlterTableConvertSpec{ConvertTo=iceberg,
TBLProperties={}}"}
+
+PREHOOK: query: alter table tbl_avro_mixed convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: query: alter table tbl_avro_mixed convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: describe formatted tbl_avro_mixed
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: query: describe formatted tbl_avro_mixed
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_avro_mixed
+# col_name data_type comment
+a int
+b double
+c int
+d string
+
+# Partition Transform Information
+# col_name transform_type
+b IDENTITY
+c IDENTITY
+d IDENTITY
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ EXTERNAL TRUE
+ MIGRATED_TO_ICEBERG true
+ bucketing_version 2
+ current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"double\"},{\"id\":3,\"name\":\"c\",\"required\":false,\"type\":\"int\"},{\"id\":4,\"name\":\"d\",\"required\":false,\"type\":\"string\"}]}
+ current-snapshot-id #Masked#
+ current-snapshot-summary
{\"added-data-files\":\"10\",\"added-records\":\"15\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"8\",\"total-records\":\"15\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"10\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-timestamp-ms #Masked#
+ default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000},{\"name\":\"c\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1001},{\"name\":\"d\",\"transform\":\"identity\",\"source-id\":4,\"field-id\":1002}]}
+ format-version 2
+ iceberg.orc.files.only false
+#### A masked pattern was here ####
+ metadata_location hdfs://### HDFS PATH ###
+ numFiles 10
+ numRows 15
+ parquet.compression zstd
+ previous_metadata_location hdfs://### HDFS PATH ###
+ schema.name-mapping.default [ {
+ \"field-id\" : 1,
+ \"names\" : [ \"a\" ]
+ }, {
+ \"field-id\" : 2,
+ \"names\" : [ \"b\" ]
+ }, {
+ \"field-id\" : 3,
+ \"names\" : [ \"c\" ]
+ }, {
+ \"field-id\" : 4,
+ \"names\" : [ \"d\" ]
+ } ]
+ snapshot-count 1
+ storage_handler
org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ table_type ICEBERG
+ totalSize #Masked#
+#### A masked pattern was here ####
+ uuid #Masked#
+ write.delete.mode merge-on-read
+ write.format.default avro
+ write.merge.mode merge-on-read
+ write.update.mode merge-on-read
+
+# Storage Information
+SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+Compressed: No
+Sort Columns: []
+PREHOOK: query: select count(*) from tbl_avro_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_avro_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+15
+PREHOOK: query: select * from tbl_avro_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_avro_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+PREHOOK: query: insert into table tbl_avro_mixed partition (b=5.55555, c = 5,
d = 'five') values (16)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed@d=five/b=5.55555/c=5
+POSTHOOK: query: insert into table tbl_avro_mixed partition (b=5.55555, c = 5,
d = 'five') values (16)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed@d=five/b=5.55555/c=5
+PREHOOK: query: insert into table tbl_avro_mixed values (17, '', '', '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (17, '', '', '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: insert into table tbl_avro_mixed values (18, null, null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (18, null, null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: insert into table tbl_avro_mixed values (19, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (19, NULL, NULL, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: insert into table tbl_avro_mixed values (20, '', -3, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (20, '', -3, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: insert into table tbl_avro_mixed values (21, null, null,
'part')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (21, null, null,
'part')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: insert into table tbl_avro_mixed values (22, -0.234, NULL,
NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: insert into table tbl_avro_mixed values (22, -0.234, NULL,
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_avro_mixed
+PREHOOK: query: select count(*) from tbl_avro_mixed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_avro_mixed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+22
+PREHOOK: query: select * from tbl_avro_mixed order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_avro_mixed order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1 2 one
+2 1.1 2 one
+3 1.1 2 one
+4 2.22 1 two
+5 2.22 1 two
+6 3.333 3 three
+7 3.333 3 three
+8 3.333 3 three
+9 4.4444 4 four
+10 NULL NULL NULL
+11 NULL NULL NULL
+12 NULL NULL NULL
+13 NULL -2 NULL
+14 NULL NULL random
+15 -0.11 NULL NULL
+16 5.55555 5 five
+17 NULL NULL
+18 NULL NULL NULL
+19 NULL NULL NULL
+20 NULL -3
+21 NULL NULL part
+22 -0.234 NULL NULL
+PREHOOK: query: drop table tbl_avro_mixed
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_avro_mixed
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_avro_mixed
+POSTHOOK: query: drop table tbl_avro_mixed
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_avro_mixed
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_avro_mixed
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/alter_part_table_to_iceberg.q.out
b/iceberg/iceberg-handler/src/test/results/positive/alter_part_table_to_iceberg.q.out
index f9addd2222c..e8b56f6ba92 100644
---
a/iceberg/iceberg-handler/src/test/results/positive/alter_part_table_to_iceberg.q.out
+++
b/iceberg/iceberg-handler/src/test/results/positive/alter_part_table_to_iceberg.q.out
@@ -307,9 +307,59 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tbl_parquet@b=four
POSTHOOK: Lineage: tbl_parquet PARTITION(b=four).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet values (10, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (10, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+POSTHOOK: Output: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet values (11, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (11, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+POSTHOOK: Output: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet values (12, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (12, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+POSTHOOK: Output: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: select count(*) from tbl_parquet
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet
+PREHOOK: Input: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Input: default@tbl_parquet@b=four
+PREHOOK: Input: default@tbl_parquet@b=one
+PREHOOK: Input: default@tbl_parquet@b=three
+PREHOOK: Input: default@tbl_parquet@b=two
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet
+POSTHOOK: Input: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Input: default@tbl_parquet@b=four
+POSTHOOK: Input: default@tbl_parquet@b=one
+POSTHOOK: Input: default@tbl_parquet@b=three
+POSTHOOK: Input: default@tbl_parquet@b=two
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
PREHOOK: query: select * from tbl_parquet order by a
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl_parquet
+PREHOOK: Input: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
PREHOOK: Input: default@tbl_parquet@b=four
PREHOOK: Input: default@tbl_parquet@b=one
PREHOOK: Input: default@tbl_parquet@b=three
@@ -318,6 +368,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: select * from tbl_parquet order by a
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl_parquet
+POSTHOOK: Input: default@tbl_parquet@b=__HIVE_DEFAULT_PARTITION__
POSTHOOK: Input: default@tbl_parquet@b=four
POSTHOOK: Input: default@tbl_parquet@b=one
POSTHOOK: Input: default@tbl_parquet@b=three
@@ -332,6 +383,9 @@ POSTHOOK: Output: hdfs://### HDFS PATH ###
7 three
8 three
9 four
+10 __HIVE_DEFAULT_PARTITION__
+11 __HIVE_DEFAULT_PARTITION__
+12 __HIVE_DEFAULT_PARTITION__
PREHOOK: query: explain alter table tbl_parquet convert to iceberg
PREHOOK: type: ALTERTABLE_CONVERT
PREHOOK: Input: default@tbl_parquet
@@ -374,15 +428,15 @@ Table Parameters:
bucketing_version 2
current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"string\"}]}
current-snapshot-id #Masked#
- current-snapshot-summary
{\"added-data-files\":\"4\",\"added-records\":\"9\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"4\",\"total-records\":\"9\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"4\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-summary
{\"added-data-files\":\"7\",\"added-records\":\"12\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"5\",\"total-records\":\"12\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"7\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
current-snapshot-timestamp-ms #Masked#
default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000}]}
format-version 2
iceberg.orc.files.only false
#### A masked pattern was here ####
metadata_location hdfs://### HDFS PATH ###
- numFiles 4
- numRows 9
+ numFiles 7
+ numRows 12
parquet.compression zstd
previous_metadata_location hdfs://### HDFS PATH ###
schema.name-mapping.default [ {
@@ -409,6 +463,76 @@ InputFormat:
org.apache.iceberg.mr.hive.HiveIcebergInputFormat
OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
Compressed: No
Sort Columns: []
+PREHOOK: query: select count(*) from tbl_parquet
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
+PREHOOK: query: select * from tbl_parquet order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 one
+2 one
+3 one
+4 two
+5 two
+6 three
+7 three
+8 three
+9 four
+10 NULL
+11 NULL
+12 NULL
+PREHOOK: query: insert into table tbl_parquet partition (b='five') values (13)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet@b=five
+POSTHOOK: query: insert into table tbl_parquet partition (b='five') values (13)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet@b=five
+PREHOOK: query: insert into table tbl_parquet values (14, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (14, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+PREHOOK: query: insert into table tbl_parquet values (15, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (15, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+PREHOOK: query: insert into table tbl_parquet values (16, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet
+POSTHOOK: query: insert into table tbl_parquet values (16, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet
+PREHOOK: query: select count(*) from tbl_parquet
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+16
PREHOOK: query: select * from tbl_parquet order by a
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl_parquet
@@ -426,6 +550,13 @@ POSTHOOK: Output: hdfs://### HDFS PATH ###
7 three
8 three
9 four
+10 NULL
+11 NULL
+12 NULL
+13 five
+14
+15 NULL
+16 NULL
PREHOOK: query: drop table tbl_parquet
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@tbl_parquet
@@ -436,6 +567,706 @@ POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@tbl_parquet
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl_parquet
+PREHOOK: query: drop table if exists tbl_parquet_int
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists tbl_parquet_int
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: create external table tbl_parquet_int(a int) partitioned by (b
int) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: create external table tbl_parquet_int(a int) partitioned by
(b int) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: describe formatted tbl_parquet_int
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_int
+POSTHOOK: query: describe formatted tbl_parquet_int
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_int
+# col_name data_type comment
+a int
+
+# Partition Information
+# col_name data_type comment
+b int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ EXTERNAL TRUE
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize #Masked#
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library:
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
+InputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+OutputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table tbl_parquet_int partition (b=1) values (1),
(2), (3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int@b=1
+POSTHOOK: query: insert into table tbl_parquet_int partition (b=1) values (1),
(2), (3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int@b=1
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=1).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int partition (b=2) values (4),
(5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int@b=2
+POSTHOOK: query: insert into table tbl_parquet_int partition (b=2) values (4),
(5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int@b=2
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=2).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int partition (b=3) values (6),
(7), (8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int@b=3
+POSTHOOK: query: insert into table tbl_parquet_int partition (b=3) values (6),
(7), (8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int@b=3
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=3).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int partition (b=4) values (9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int@b=4
+POSTHOOK: query: insert into table tbl_parquet_int partition (b=4) values (9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int@b=4
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=4).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int values (10, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (10, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+POSTHOOK: Output: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int values (11, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (11, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+POSTHOOK: Output: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_int values (12, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (12, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+POSTHOOK: Output: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_int PARTITION(b=__HIVE_DEFAULT_PARTITION__).a
SCRIPT []
+PREHOOK: query: select count(*) from tbl_parquet_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Input: default@tbl_parquet_int@b=1
+PREHOOK: Input: default@tbl_parquet_int@b=2
+PREHOOK: Input: default@tbl_parquet_int@b=3
+PREHOOK: Input: default@tbl_parquet_int@b=4
+PREHOOK: Input: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Input: default@tbl_parquet_int@b=1
+POSTHOOK: Input: default@tbl_parquet_int@b=2
+POSTHOOK: Input: default@tbl_parquet_int@b=3
+POSTHOOK: Input: default@tbl_parquet_int@b=4
+POSTHOOK: Input: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
+PREHOOK: query: select * from tbl_parquet_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Input: default@tbl_parquet_int@b=1
+PREHOOK: Input: default@tbl_parquet_int@b=2
+PREHOOK: Input: default@tbl_parquet_int@b=3
+PREHOOK: Input: default@tbl_parquet_int@b=4
+PREHOOK: Input: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Input: default@tbl_parquet_int@b=1
+POSTHOOK: Input: default@tbl_parquet_int@b=2
+POSTHOOK: Input: default@tbl_parquet_int@b=3
+POSTHOOK: Input: default@tbl_parquet_int@b=4
+POSTHOOK: Input: default@tbl_parquet_int@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1
+2 1
+3 1
+4 2
+5 2
+6 3
+7 3
+8 3
+9 4
+10 NULL
+11 NULL
+12 NULL
+PREHOOK: query: explain alter table tbl_parquet_int convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_int
+POSTHOOK: query: explain alter table tbl_parquet_int convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_int
+Stage-0
+ Convert operation{"table
name:":"default.tbl_parquet_int","spec:":"AlterTableConvertSpec{ConvertTo=iceberg,
TBLProperties={}}"}
+
+PREHOOK: query: alter table tbl_parquet_int convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_int
+POSTHOOK: query: alter table tbl_parquet_int convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: describe formatted tbl_parquet_int
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_int
+POSTHOOK: query: describe formatted tbl_parquet_int
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_int
+# col_name data_type comment
+a int
+b int
+
+# Partition Transform Information
+# col_name transform_type
+b IDENTITY
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ EXTERNAL TRUE
+ MIGRATED_TO_ICEBERG true
+ bucketing_version 2
+ current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"int\"}]}
+ current-snapshot-id #Masked#
+ current-snapshot-summary
{\"added-data-files\":\"7\",\"added-records\":\"12\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"5\",\"total-records\":\"12\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"7\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-timestamp-ms #Masked#
+ default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000}]}
+ format-version 2
+ iceberg.orc.files.only false
+#### A masked pattern was here ####
+ metadata_location hdfs://### HDFS PATH ###
+ numFiles 7
+ numRows 12
+ parquet.compression zstd
+ previous_metadata_location hdfs://### HDFS PATH ###
+ schema.name-mapping.default [ {
+ \"field-id\" : 1,
+ \"names\" : [ \"a\" ]
+ }, {
+ \"field-id\" : 2,
+ \"names\" : [ \"b\" ]
+ } ]
+ snapshot-count 1
+ storage_handler
org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ table_type ICEBERG
+ totalSize #Masked#
+#### A masked pattern was here ####
+ uuid #Masked#
+ write.delete.mode merge-on-read
+ write.format.default parquet
+ write.merge.mode merge-on-read
+ write.update.mode merge-on-read
+
+# Storage Information
+SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+Compressed: No
+Sort Columns: []
+PREHOOK: query: select count(*) from tbl_parquet_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
+PREHOOK: query: select * from tbl_parquet_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1
+2 1
+3 1
+4 2
+5 2
+6 3
+7 3
+8 3
+9 4
+10 NULL
+11 NULL
+12 NULL
+PREHOOK: query: insert into table tbl_parquet_int partition (b=5) values (13)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int@b=5
+POSTHOOK: query: insert into table tbl_parquet_int partition (b=5) values (13)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int@b=5
+PREHOOK: query: insert into table tbl_parquet_int values (14, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (14, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: insert into table tbl_parquet_int values (15, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (15, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: insert into table tbl_parquet_int values (16, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: insert into table tbl_parquet_int values (16, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: select count(*) from tbl_parquet_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+16
+PREHOOK: query: select * from tbl_parquet_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1
+2 1
+3 1
+4 2
+5 2
+6 3
+7 3
+8 3
+9 4
+10 NULL
+11 NULL
+12 NULL
+13 5
+14 NULL
+15 NULL
+16 NULL
+PREHOOK: query: drop table tbl_parquet_int
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_parquet_int
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_int
+POSTHOOK: query: drop table tbl_parquet_int
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_parquet_int
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_int
+PREHOOK: query: drop table if exists tbl_parquet_double
+PREHOOK: type: DROPTABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: drop table if exists tbl_parquet_double
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: database:default
+PREHOOK: query: create external table tbl_parquet_double(a int) partitioned by
(b double) stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: create external table tbl_parquet_double(a int) partitioned
by (b double) stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_double
+PREHOOK: query: describe formatted tbl_parquet_double
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_double
+POSTHOOK: query: describe formatted tbl_parquet_double
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_double
+# col_name data_type comment
+a int
+
+# Partition Information
+# col_name data_type comment
+b double
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ EXTERNAL TRUE
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize #Masked#
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library:
org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
+InputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+OutputFormat:
org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table tbl_parquet_double partition (b=1.1) values
(1), (2), (3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double@b=1.1
+POSTHOOK: query: insert into table tbl_parquet_double partition (b=1.1) values
(1), (2), (3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double@b=1.1
+POSTHOOK: Lineage: tbl_parquet_double PARTITION(b=1.1).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double partition (b=2.22) values
(4), (5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double@b=2.22
+POSTHOOK: query: insert into table tbl_parquet_double partition (b=2.22)
values (4), (5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double@b=2.22
+POSTHOOK: Lineage: tbl_parquet_double PARTITION(b=2.22).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double partition (b=3.333)
values (6), (7), (8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double@b=3.333
+POSTHOOK: query: insert into table tbl_parquet_double partition (b=3.333)
values (6), (7), (8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double@b=3.333
+POSTHOOK: Lineage: tbl_parquet_double PARTITION(b=3.333).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double partition (b=4.4444)
values (9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double@b=4.4444
+POSTHOOK: query: insert into table tbl_parquet_double partition (b=4.4444)
values (9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double@b=4.4444
+POSTHOOK: Lineage: tbl_parquet_double PARTITION(b=4.4444).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double values (10, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (10, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+POSTHOOK: Output: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_double
PARTITION(b=__HIVE_DEFAULT_PARTITION__).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double values (11, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (11, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+POSTHOOK: Output: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_double
PARTITION(b=__HIVE_DEFAULT_PARTITION__).a SCRIPT []
+PREHOOK: query: insert into table tbl_parquet_double values (12, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (12, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+POSTHOOK: Output: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: tbl_parquet_double
PARTITION(b=__HIVE_DEFAULT_PARTITION__).a SCRIPT []
+PREHOOK: query: select count(*) from tbl_parquet_double
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Input: default@tbl_parquet_double@b=1.1
+PREHOOK: Input: default@tbl_parquet_double@b=2.22
+PREHOOK: Input: default@tbl_parquet_double@b=3.333
+PREHOOK: Input: default@tbl_parquet_double@b=4.4444
+PREHOOK: Input: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_double
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Input: default@tbl_parquet_double@b=1.1
+POSTHOOK: Input: default@tbl_parquet_double@b=2.22
+POSTHOOK: Input: default@tbl_parquet_double@b=3.333
+POSTHOOK: Input: default@tbl_parquet_double@b=4.4444
+POSTHOOK: Input: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
+PREHOOK: query: select * from tbl_parquet_double order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Input: default@tbl_parquet_double@b=1.1
+PREHOOK: Input: default@tbl_parquet_double@b=2.22
+PREHOOK: Input: default@tbl_parquet_double@b=3.333
+PREHOOK: Input: default@tbl_parquet_double@b=4.4444
+PREHOOK: Input: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_double order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Input: default@tbl_parquet_double@b=1.1
+POSTHOOK: Input: default@tbl_parquet_double@b=2.22
+POSTHOOK: Input: default@tbl_parquet_double@b=3.333
+POSTHOOK: Input: default@tbl_parquet_double@b=4.4444
+POSTHOOK: Input: default@tbl_parquet_double@b=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1
+2 1.1
+3 1.1
+4 2.22
+5 2.22
+6 3.333
+7 3.333
+8 3.333
+9 4.4444
+10 NULL
+11 NULL
+12 NULL
+PREHOOK: query: explain alter table tbl_parquet_double convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_double
+POSTHOOK: query: explain alter table tbl_parquet_double convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_double
+Stage-0
+ Convert operation{"table
name:":"default.tbl_parquet_double","spec:":"AlterTableConvertSpec{ConvertTo=iceberg,
TBLProperties={}}"}
+
+PREHOOK: query: alter table tbl_parquet_double convert to iceberg
+PREHOOK: type: ALTERTABLE_CONVERT
+PREHOOK: Input: default@tbl_parquet_double
+POSTHOOK: query: alter table tbl_parquet_double convert to iceberg
+POSTHOOK: type: ALTERTABLE_CONVERT
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: default@tbl_parquet_double
+PREHOOK: query: describe formatted tbl_parquet_double
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tbl_parquet_double
+POSTHOOK: query: describe formatted tbl_parquet_double
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tbl_parquet_double
+# col_name data_type comment
+a int
+b double
+
+# Partition Transform Information
+# col_name transform_type
+b IDENTITY
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: EXTERNAL_TABLE
+Table Parameters:
+ EXTERNAL TRUE
+ MIGRATED_TO_ICEBERG true
+ bucketing_version 2
+ current-schema
{\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"a\",\"required\":false,\"type\":\"int\"},{\"id\":2,\"name\":\"b\",\"required\":false,\"type\":\"double\"}]}
+ current-snapshot-id #Masked#
+ current-snapshot-summary
{\"added-data-files\":\"7\",\"added-records\":\"12\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"5\",\"total-records\":\"12\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"7\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\",\"iceberg-version\":\"#Masked#\"}
+ current-snapshot-timestamp-ms #Masked#
+ default-partition-spec
{\"spec-id\":0,\"fields\":[{\"name\":\"b\",\"transform\":\"identity\",\"source-id\":2,\"field-id\":1000}]}
+ format-version 2
+ iceberg.orc.files.only false
+#### A masked pattern was here ####
+ metadata_location hdfs://### HDFS PATH ###
+ numFiles 7
+ numRows 12
+ parquet.compression zstd
+ previous_metadata_location hdfs://### HDFS PATH ###
+ schema.name-mapping.default [ {
+ \"field-id\" : 1,
+ \"names\" : [ \"a\" ]
+ }, {
+ \"field-id\" : 2,
+ \"names\" : [ \"b\" ]
+ } ]
+ snapshot-count 1
+ storage_handler
org.apache.iceberg.mr.hive.HiveIcebergStorageHandler
+ table_type ICEBERG
+ totalSize #Masked#
+#### A masked pattern was here ####
+ uuid #Masked#
+ write.delete.mode merge-on-read
+ write.format.default parquet
+ write.merge.mode merge-on-read
+ write.update.mode merge-on-read
+
+# Storage Information
+SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+Compressed: No
+Sort Columns: []
+PREHOOK: query: select count(*) from tbl_parquet_double
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_double
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+12
+PREHOOK: query: select * from tbl_parquet_double order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_double order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1
+2 1.1
+3 1.1
+4 2.22
+5 2.22
+6 3.333
+7 3.333
+8 3.333
+9 4.4444
+10 NULL
+11 NULL
+12 NULL
+PREHOOK: query: insert into table tbl_parquet_double partition (b=5.55555)
values (13)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double@b=5.55555
+POSTHOOK: query: insert into table tbl_parquet_double partition (b=5.55555)
values (13)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double@b=5.55555
+PREHOOK: query: insert into table tbl_parquet_double values (14, '')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (14, '')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+PREHOOK: query: insert into table tbl_parquet_double values (15, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (15, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+PREHOOK: query: insert into table tbl_parquet_double values (16, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: insert into table tbl_parquet_double values (16, NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@tbl_parquet_double
+PREHOOK: query: select count(*) from tbl_parquet_double
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from tbl_parquet_double
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+16
+PREHOOK: query: select * from tbl_parquet_double order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from tbl_parquet_double order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 1.1
+2 1.1
+3 1.1
+4 2.22
+5 2.22
+6 3.333
+7 3.333
+8 3.333
+9 4.4444
+10 NULL
+11 NULL
+12 NULL
+13 5.55555
+14 NULL
+15 NULL
+16 NULL
+PREHOOK: query: drop table tbl_parquet_double
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_parquet_double
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_parquet_double
+POSTHOOK: query: drop table tbl_parquet_double
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_parquet_double
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_parquet_double
PREHOOK: query: drop table if exists tbl_avro
PREHOOK: type: DROPTABLE
PREHOOK: Output: database:default