[hive] branch master updated: HIVE-27139: Log the details when hiveserver2.sh doing sanity check with the process id (#4116) (Zhihua Deng, reviewed by Sai Hemanth Gantasala)

2023-04-11 Thread dengzh
This is an automated email from the ASF dual-hosted git repository.

dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 91cab2c635b HIVE-27139: Log the details when hiveserver2.sh doing 
sanity check with the process id (#4116) (Zhihua Deng, reviewed by Sai Hemanth 
Gantasala)
91cab2c635b is described below

commit 91cab2c635b853c499d98720a7443c549835162a
Author: dengzh 
AuthorDate: Wed Apr 12 10:16:59 2023 +0800

HIVE-27139: Log the details when hiveserver2.sh doing sanity check with the 
process id (#4116) (Zhihua Deng, reviewed by Sai Hemanth Gantasala)
---
 bin/ext/hiveserver2.sh |  3 ++-
 .../src/java/org/apache/hive/service/server/HiveServer2.java   | 10 ++
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/bin/ext/hiveserver2.sh b/bin/ext/hiveserver2.sh
index ea95565c4eb..53b429cf9bd 100644
--- a/bin/ext/hiveserver2.sh
+++ b/bin/ext/hiveserver2.sh
@@ -28,6 +28,7 @@ before_start() {
   if [ -f $HIVESERVER2_PID ]; then
 if kill -0 $(cat $HIVESERVER2_PID) >/dev/null 2>&1; then
   echo "HiveServer2 running as process $(cat $HIVESERVER2_PID).  Stop it 
first."
+  ps $(cat $HIVESERVER2_PID)
   exit 1
 fi
   fi
@@ -54,7 +55,7 @@ hiveserver2() {
 commands=$(exec $HADOOP jar $JAR $CLASS -H | grep -v '-hiveconf' | awk 
'{print $1}')
 start_hiveserver2='Y'
 for i in "$@"; do
-  if [ $(echo "${commands[@]}" | grep -we "$i") != "" ]; then
+  if [ "$(echo "${commands[@]}" | grep -we $i)" != "" ]; then
 start_hiveserver2='N'
 break
   fi
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java 
b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 1409594d12d..5dde31e4020 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -19,6 +19,8 @@
 package org.apache.hive.service.server;
 
 import com.google.common.base.Joiner;
+
+import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -40,6 +42,7 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.concurrent.BasicThreadFactory;
 import org.apache.curator.framework.CuratorFramework;
@@ -923,6 +926,13 @@ public class HiveServer2 extends CompositeService {
 LOG.error("Error removing znode for this HiveServer2 instance from 
ZooKeeper.", e);
   }
 }
+String pidDir = 
StringUtils.defaultIfEmpty(System.getenv("HIVESERVER2_PID_DIR"),
+System.getenv("HIVE_CONF_DIR"));
+if (StringUtils.isNotEmpty(pidDir)) {
+  File pidFile = new File(pidDir, "hiveserver2.pid");
+  LOG.info("Deleting the tmp HiveServer2 pid file: {}", pidFile);
+  FileUtils.deleteQuietly(pidFile);
+}
 super.decommission();
   }
 



[hive] branch master updated: HIVE-27208: Iceberg: Add support for rename table. (#4185). (Ayush Saxena, reviewed by Denys Kuzmenko, Butao Zhang)

2023-04-11 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 02603b17e79 HIVE-27208: Iceberg: Add support for rename table. 
(#4185). (Ayush Saxena, reviewed by Denys Kuzmenko, Butao Zhang)
02603b17e79 is described below

commit 02603b17e7963879f4ed0af7352d986ad3694458
Author: Ayush Saxena 
AuthorDate: Wed Apr 12 02:32:03 2023 +0530

HIVE-27208: Iceberg: Add support for rename table. (#4185). (Ayush Saxena, 
reviewed by Denys Kuzmenko, Butao Zhang)
---
 .../main/java/org/apache/iceberg/mr/Catalogs.java  |  13 +
 .../iceberg/mr/hive/HiveIcebergMetaHook.java   |  10 +-
 .../src/test/queries/positive/iceberg_rename.q |  65 +
 .../src/test/results/positive/iceberg_rename.q.out | 265 +
 .../ql/ddl/table/AbstractAlterTableOperation.java  |   6 +-
 .../misc/rename/AlterTableRenameOperation.java |   6 +
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |  17 +-
 .../org/apache/hadoop/hive/ql/metadata/Table.java  |  13 +-
 .../apache/hadoop/hive/metastore/HiveMetaHook.java |   8 +
 .../hadoop/hive/metastore/HiveAlterHandler.java|   7 +-
 10 files changed, 400 insertions(+), 10 deletions(-)

diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java
index c58e7d07fc5..ab75665aed0 100644
--- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java
+++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/Catalogs.java
@@ -222,6 +222,19 @@ public final class Catalogs {
 return new HadoopTables(conf).create(schema, spec, map, location);
   }
 
+  public static void renameTable(Configuration conf, Properties props, 
TableIdentifier to) {
+String catalogName = props.getProperty(InputFormatConfig.CATALOG_NAME);
+
+Optional catalog = loadCatalog(conf, catalogName);
+if (catalog.isPresent()) {
+  String name = props.getProperty(NAME);
+  Preconditions.checkNotNull(name, "Table identifier not set");
+  catalog.get().renameTable(TableIdentifier.parse(name), to);
+} else {
+  throw new RuntimeException("Rename from " + props.getProperty(NAME) + " 
to " + to + " failed");
+}
+  }
+
   static Optional loadCatalog(Configuration conf, String catalogName) 
{
 String catalogType = getCatalogType(conf, catalogName);
 if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) {
diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index 174022e5ea2..229e0490f5d 100644
--- 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++ 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -120,7 +120,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
   static final EnumSet SUPPORTED_ALTER_OPS = EnumSet.of(
   AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS, 
AlterTableType.RENAME_COLUMN,
   AlterTableType.ADDPROPS, AlterTableType.DROPPROPS, 
AlterTableType.SETPARTITIONSPEC,
-  AlterTableType.UPDATE_COLUMNS, AlterTableType.SETPARTITIONSPEC, 
AlterTableType.EXECUTE);
+  AlterTableType.UPDATE_COLUMNS, AlterTableType.RENAME, 
AlterTableType.EXECUTE);
   private static final List MIGRATION_ALLOWED_SOURCE_FORMATS = 
ImmutableList.of(
   FileFormat.PARQUET.name().toLowerCase(),
   FileFormat.ORC.name().toLowerCase(),
@@ -319,6 +319,10 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
   throws MetaException {
 catalogProperties = getCatalogProperties(hmsTable);
 setupAlterOperationType(hmsTable, context);
+if (AlterTableType.RENAME.equals(currentAlterTableOp)) {
+  catalogProperties.put(Catalogs.NAME, 
TableIdentifier.of(context.getProperties().get(OLD_DB_NAME),
+  context.getProperties().get(OLD_TABLE_NAME)).toString());
+}
 if (commitLock == null) {
   commitLock = new HiveCommitLock(conf, new CachedClientPool(conf, 
Maps.fromProperties(catalogProperties)),
   catalogProperties.getProperty(Catalogs.NAME), hmsTable.getDbName(), 
hmsTable.getTableName());
@@ -549,6 +553,10 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
 case SETPARTITIONSPEC:
   IcebergTableUtil.updateSpec(conf, icebergTable);
   break;
+case RENAME:
+  Catalogs.renameTable(conf, catalogProperties, 
TableIdentifier.of(hmsTable.getDbName(),
+  hmsTable.getTableName()));
+  break;
   }
 }
   }
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_rename.q 
b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_rename.q
new file mode 100644
index 000

[hive] branch master updated (a20828716e7 -> e397acd81ce)

2023-04-11 Thread abstractdog
This is an automated email from the ASF dual-hosted git repository.

abstractdog pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


from a20828716e7 HIVE-27223: Show Compactions failing with NPE. (#4204). 
(Ayush Saxena, reviewed by Laszlo Vegh)
 add e397acd81ce HIVE-26809 Upgrade ORC to 1.8.3 (#4121) (Zoltan Ratkai 
reviewed by Laszlo Bodor)

No new revisions were added by this update.

Summary of changes:
 beeline/pom.xml|5 +
 ...ery_iceberg_metadata_of_partitioned_table.q.out |4 +-
 .../hive/ql/txn/compactor/TestCompactor.java   |   12 +-
 .../ql/txn/compactor/TestCrudCompactorOnTez.java   |   48 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java |   21 +-
 pom.xml|2 +-
 .../io/orc/encoded/EncodedTreeReaderFactory.java   | 1215 +---
 .../apache/hadoop/hive/ql/TestTxnNoBuckets.java|4 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java  |6 +-
 .../apache/hadoop/hive/ql/io/orc/TestOrcFile.java  |2 +-
 .../beeline/materialized_view_create_rewrite.q.out |4 +-
 .../llap/acid_bloom_filter_orc_file_dump.q.out |8 +-
 .../clientpositive/llap/acid_no_buckets.q.out  |4 +-
 .../clientpositive/llap/acid_table_stats.q.out |   12 +-
 .../clientpositive/llap/alter_merge_orc.q.out  |   24 +-
 .../llap/alter_merge_stats_orc.q.out   |   22 +-
 .../clientpositive/llap/autoColumnStats_4.q.out|4 +-
 .../clientpositive/llap/autoColumnStats_6.q.out|2 +-
 .../columnStatsUpdateForStatsOptimizer_2.q.out |6 +-
 .../llap/column_table_stats_orc.q.out  |   20 +-
 ql/src/test/results/clientpositive/llap/ctas.q.out |2 +-
 .../clientpositive/llap/default_constraint.q.out   |   14 +-
 .../clientpositive/llap/deleteAnalyze.q.out|2 +-
 .../llap/dynpart_sort_opt_vectorization.q.out  |   16 +-
 .../llap/dynpart_sort_optimization2.q.out  |8 +-
 .../explainanalyze_acid_with_direct_insert.q.out   |   48 +-
 .../llap/insert_only_to_acid_convert.q.out |   12 +-
 .../insert_values_orig_table_use_metadata.q.out|8 +-
 .../results/clientpositive/llap/masking_mv.q.out   |4 +-
 .../clientpositive/llap/masking_mv_by_text.q.out   |2 +-
 .../llap/materialized_view_create.q.out|8 +-
 .../llap/materialized_view_create_acid.q.out   |2 +-
 .../llap/materialized_view_create_rewrite.q.out|4 +-
 .../llap/materialized_view_create_rewrite_10.q.out |8 +-
 .../llap/materialized_view_create_rewrite_4.q.out  |   12 +-
 .../llap/materialized_view_create_rewrite_5.q.out  |2 +-
 ...ized_view_create_rewrite_by_text_multi_db.q.out |4 +-
 .../materialized_view_create_rewrite_dummy.q.out   |4 +-
 ...materialized_view_create_rewrite_multi_db.q.out |4 +-
 ...erialized_view_create_rewrite_time_window.q.out |6 +-
 ...ialized_view_create_rewrite_time_window_2.q.out |6 +-
 .../llap/materialized_view_describe.q.out  |6 +-
 .../llap/materialized_view_drop.q.out  |6 +-
 .../llap/materialized_view_partition_cluster.q.out |6 +-
 .../llap/materialized_view_partitioned.q.out   |2 +-
 .../results/clientpositive/llap/orc_analyze.q.out  |   34 +-
 .../clientpositive/llap/orc_file_dump.q.out|  324 +++---
 .../clientpositive/llap/orc_llap_counters.q.out|   84 +-
 .../clientpositive/llap/orc_llap_counters1.q.out   |   10 +-
 .../clientpositive/llap/orc_llap_nonvector.q.out   |2 +-
 .../results/clientpositive/llap/orc_merge1.q.out   |   12 +-
 .../results/clientpositive/llap/orc_merge10.q.out  |   16 +-
 .../results/clientpositive/llap/orc_merge11.q.out  |   12 +-
 .../results/clientpositive/llap/orc_merge2.q.out   |2 +-
 .../results/clientpositive/llap/orc_merge3.q.out   |2 +-
 .../results/clientpositive/llap/orc_merge4.q.out   |6 +-
 .../clientpositive/llap/orc_ppd_basic.q.out|  100 +-
 .../llap/orc_ppd_schema_evol_3a.q.out  |   76 +-
 .../clientpositive/llap/schema_evol_stats.q.out|6 +-
 .../clientpositive/llap/sqlmerge_stats.q.out   |6 +-
 .../results/clientpositive/llap/stats_cbo_1.q.out  |4 +-
 .../clientpositive/llap/stats_histogram.q.out  |2 +-
 .../clientpositive/llap/stats_nonpart.q.out|2 +-
 .../results/clientpositive/llap/stats_part.q.out   |8 +-
 .../results/clientpositive/llap/stats_part2.q.out  |   28 +-
 .../llap/stats_part_multi_insert_acid.q.out|2 +-
 .../clientpositive/llap/stats_sizebug.q.out|4 +-
 .../llap/vectorization_short_regress.q.out |4 +-
 ql/src/test/results/clientpositive/row__id.q.out   |   18 +-
 .../tez/acid_vectorization_original_tez.q.out  |   24 +-
 .../results/clientpositive/tez/orc_merge12.q.out   |4 +-
 standalone-metastore/pom.xml   |2 +-
 72 files changed, 1119 insertions(+), 1296 deletion

[hive] branch master updated (2da612d33d0 -> a20828716e7)

2023-04-11 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


from 2da612d33d0 HIVE-27228: Add missing upgrade SQL statements after 
CQ_NUMBER_OF_BUCKETS column being introduced in HIVE-26719 (Sourabh Badhya, 
reviewed by Stephen Carlin, Laszlo Vegh)
 add a20828716e7 HIVE-27223: Show Compactions failing with NPE. (#4204). 
(Ayush Saxena, reviewed by Laszlo Vegh)

No new revisions were added by this update.

Summary of changes:
 .../hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[hive] branch branch-3 updated (b089ba2f0cf -> d851e9cee06)

2023-04-11 Thread sankarh
This is an automated email from the ASF dual-hosted git repository.

sankarh pushed a change to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hive.git


from b089ba2f0cf HIVE-27200: Backport HIVE-24928 to branch-3 (#4175)
 add d851e9cee06 HIVE-27209: Backport HIVE-24569 - LLAP daemon leaks file 
descriptors/log4j appenders (Stamatis Zampetakis, reviewed by Jesus Camacho 
Rodriguez)

No new revisions were added by this update.

Summary of changes:
 .../hive/llap/daemon/impl/LlapConstants.java   |   7 -
 .../hadoop/hive/llap/daemon/impl/QueryTracker.java |   7 +-
 .../llap/log/LlapRandomAccessFileAppender.java | 183 +
 .../llap/log/LlapRoutingAppenderPurgePolicy.java   | 128 
 .../hadoop/hive/llap/log/LlapWrappedAppender.java  | 222 -
 .../hive/llap/log/Log4jQueryCompleteMarker.java|   2 +-
 .../main/resources/llap-daemon-log4j2.properties   |  20 +-
 7 files changed, 195 insertions(+), 374 deletions(-)
 create mode 100644 
llap-server/src/java/org/apache/hadoop/hive/llap/log/LlapRandomAccessFileAppender.java
 delete mode 100644 
llap-server/src/java/org/apache/hadoop/hive/llap/log/LlapRoutingAppenderPurgePolicy.java
 delete mode 100644 
llap-server/src/java/org/apache/hadoop/hive/llap/log/LlapWrappedAppender.java



[hive] branch master updated: HIVE-27228: Add missing upgrade SQL statements after CQ_NUMBER_OF_BUCKETS column being introduced in HIVE-26719 (Sourabh Badhya, reviewed by Stephen Carlin, Laszlo Vegh)

2023-04-11 Thread veghlaci05
This is an automated email from the ASF dual-hosted git repository.

veghlaci05 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 2da612d33d0 HIVE-27228: Add missing upgrade SQL statements after 
CQ_NUMBER_OF_BUCKETS column being introduced in HIVE-26719 (Sourabh Badhya, 
reviewed by Stephen Carlin, Laszlo Vegh)
2da612d33d0 is described below

commit 2da612d33d00b419ef6f5162538a001384e3df07
Author: Sourabh Badhya <42867455+sourabhbad...@users.noreply.github.com>
AuthorDate: Tue Apr 11 12:43:34 2023 +0530

HIVE-27228: Add missing upgrade SQL statements after CQ_NUMBER_OF_BUCKETS 
column being introduced in HIVE-26719 (Sourabh Badhya, reviewed by Stephen 
Carlin, Laszlo Vegh)
---
 .../upgrade/hive/hive-schema-4.0.0.hive.sql|   2 +
 .../hive/upgrade-4.0.0-alpha-2-to-4.0.0.hive.sql   | 277 +
 .../test/results/clientpositive/llap/sysdb.q.out   |   5 +-
 3 files changed, 282 insertions(+), 2 deletions(-)

diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql 
b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index 65e4e5eaf2a..689bb763389 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -1958,6 +1958,7 @@ CREATE OR REPLACE VIEW `COMPACTIONS`
   `C_INITIATOR_VERSION`,
   `C_CLEANER_START`,
   `C_POOL_NAME`,
+  `C_NUMBER_OF_BUCKETS`,
   `C_TBLPROPERTIES`
 ) AS
 SELECT DISTINCT
@@ -1986,6 +1987,7 @@ SELECT DISTINCT
   C_INITIATOR_VERSION,
   C_CLEANER_START,
   C_POOL_NAME,
+  C_NUMBER_OF_BUCKETS,
   C_TBLPROPERTIES
 FROM
   `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`)
diff --git 
a/metastore/scripts/upgrade/hive/upgrade-4.0.0-alpha-2-to-4.0.0.hive.sql 
b/metastore/scripts/upgrade/hive/upgrade-4.0.0-alpha-2-to-4.0.0.hive.sql
index 3a69db9e3ae..bf2e3a1b3df 100644
--- a/metastore/scripts/upgrade/hive/upgrade-4.0.0-alpha-2-to-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/upgrade-4.0.0-alpha-2-to-4.0.0.hive.sql
@@ -2,5 +2,282 @@ SELECT 'Upgrading MetaStore schema from 4.0.0-alpha-2 to 
4.0.0';
 
 USE SYS;
 
+-- HIVE-27228
+DROP TABLE IF EXISTS `COMPACTION_QUEUE`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` (
+  `CQ_ID` bigint,
+  `CQ_DATABASE` string,
+  `CQ_TABLE` string,
+  `CQ_PARTITION` string,
+  `CQ_STATE` string,
+  `CQ_TYPE` string,
+  `CQ_TBLPROPERTIES` string,
+  `CQ_WORKER_ID` string,
+  `CQ_ENQUEUE_TIME` bigint,
+  `CQ_START` bigint,
+  `CQ_RUN_AS` string,
+  `CQ_HIGHEST_WRITE_ID` bigint,
+  `CQ_HADOOP_JOB_ID` string,
+  `CQ_ERROR_MESSAGE` string,
+  `CQ_NEXT_TXN_ID` bigint,
+  `CQ_TXN_ID` bigint,
+  `CQ_COMMIT_TIME` bigint,
+  `CQ_INITIATOR_ID` string,
+  `CQ_INITIATOR_VERSION` string,
+  `CQ_WORKER_VERSION` string,
+  `CQ_CLEANER_START` bigint,
+  `CQ_POOL_NAME` string,
+  `CQ_NUMBER_OF_BUCKETS` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  \"COMPACTION_QUEUE\".\"CQ_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_DATABASE\",
+  \"COMPACTION_QUEUE\".\"CQ_TABLE\",
+  \"COMPACTION_QUEUE\".\"CQ_PARTITION\",
+  \"COMPACTION_QUEUE\".\"CQ_STATE\",
+  \"COMPACTION_QUEUE\".\"CQ_TYPE\",
+  \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\",
+  \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_ENQUEUE_TIME\",
+  \"COMPACTION_QUEUE\".\"CQ_START\",
+  \"COMPACTION_QUEUE\".\"CQ_RUN_AS\",
+  \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\",
+  \"COMPACTION_QUEUE\".\"CQ_NEXT_TXN_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_TXN_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_COMMIT_TIME\",
+  \"COMPACTION_QUEUE\".\"CQ_INITIATOR_ID\",
+  \"COMPACTION_QUEUE\".\"CQ_INITIATOR_VERSION\",
+  \"COMPACTION_QUEUE\".\"CQ_WORKER_VERSION\",
+  \"COMPACTION_QUEUE\".\"CQ_CLEANER_START\",
+  \"COMPACTION_QUEUE\".\"CQ_POOL_NAME\",
+  \"COMPACTION_QUEUE\".\"CQ_NUMBER_OF_BUCKETS\"
+FROM \"COMPACTION_QUEUE\"
+"
+);
+
+DROP TABLE IF EXISTS `COMPLETED_COMPACTIONS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` (
+  `CC_ID` bigint,
+  `CC_DATABASE` string,
+  `CC_TABLE` string,
+  `CC_PARTITION` string,
+  `CC_STATE` string,
+  `CC_TYPE` string,
+  `CC_TBLPROPERTIES` string,
+  `CC_WORKER_ID` string,
+  `CC_ENQUEUE_TIME` bigint,
+  `CC_START` bigint,
+  `CC_END` bigint,
+  `CC_RUN_AS` string,
+  `CC_HIGHEST_WRITE_ID` bigint,
+  `CC_HADOOP_JOB_ID` string,
+  `CC_ERROR_MESSAGE` string,
+  `CC_NEXT_TXN_ID` bigint,
+  `CC_TXN_ID` bigint,
+  `CC_COMMIT_TIME` bigint,
+  `CC_INITIATOR_ID` string,
+  `CC_INITIATOR_VERSION` string,
+  `CC_WORKER_VERSION` string,
+  `CC_POOL_NAME` string,
+  `CC_NUMBER_OF_BUCKETS` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.qu