This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 10ece7c7538 HIVE-27725: Remove redundant columns in TAB_COL_STATS and 
PART_COL_STATS tables (Wechar Yu, reviewed by Butao Zhang, Denys Kuzmenko, 
Zhihua Deng)
10ece7c7538 is described below

commit 10ece7c75386256e49cac3d41230ca24540430a5
Author: Wechar Yu <yuwq1...@gmail.com>
AuthorDate: Tue Apr 9 22:49:11 2024 +0800

    HIVE-27725: Remove redundant columns in TAB_COL_STATS and PART_COL_STATS 
tables (Wechar Yu, reviewed by Butao Zhang, Denys Kuzmenko, Zhihua Deng)
    
    Closes #4744
---
 .../upgrade/hive/hive-schema-4.1.0.hive.sql        |  10 --
 .../ql/ddl/table/info/desc/DescTableOperation.java |   5 +-
 ql/src/test/queries/clientpositive/sysdb.q         |   4 +-
 .../llap/constraints_explain_ddl.q.out             | 152 ++++++++++-----------
 .../test/results/clientpositive/llap/sysdb.q.out   |  23 ++--
 .../hadoop/hive/metastore/DirectSqlUpdatePart.java |  47 +++----
 .../apache/hadoop/hive/metastore/HMSHandler.java   |  52 ++-----
 .../hadoop/hive/metastore/MetaStoreDirectSql.java  | 119 ++++++++++------
 .../apache/hadoop/hive/metastore/ObjectStore.java  |  36 ++---
 .../hadoop/hive/metastore/StatObjectConverter.java |  34 ++---
 .../model/MPartitionColumnStatistics.java          |  36 -----
 .../metastore/model/MTableColumnStatistics.java    |  27 ----
 .../schematool/SchemaToolTaskMoveDatabase.java     |   2 -
 .../tools/schematool/SchemaToolTaskMoveTable.java  |   2 -
 .../jdbc/queries/FindColumnsWithStatsHandler.java  |  11 +-
 .../src/main/resources/package.jdo                 |  21 ---
 .../src/main/sql/derby/hive-schema-4.1.0.derby.sql |  11 +-
 .../sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql     |  14 ++
 .../src/main/sql/mssql/hive-schema-4.1.0.mssql.sql |  17 +--
 .../sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql     |  11 ++
 .../src/main/sql/mysql/hive-schema-4.1.0.mysql.sql |  11 +-
 .../sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql     |   9 ++
 .../main/sql/oracle/hive-schema-4.1.0.oracle.sql   |  15 +-
 .../sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql   |  11 ++
 .../sql/postgres/hive-schema-4.1.0.postgres.sql    |  23 +---
 .../postgres/upgrade-4.0.0-to-4.1.0.postgres.sql   |  11 ++
 .../hadoop/hive/metastore/TestObjectStore.java     |  48 ++++++-
 .../hadoop/hive/metastore/tools/BenchmarkTool.java |  10 ++
 .../hadoop/hive/metastore/tools/HMSBenchmarks.java |  48 +++++++
 .../hadoop/hive/metastore/tools/HMSClient.java     |  11 ++
 .../apache/hadoop/hive/metastore/tools/Util.java   |  31 +++++
 31 files changed, 446 insertions(+), 416 deletions(-)

diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql 
b/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql
index 7a1cef3f97a..bd478dee30d 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql
@@ -719,8 +719,6 @@ FROM
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` (
  `CS_ID` bigint,
- `DB_NAME` string,
- `TABLE_NAME` string,
  `COLUMN_NAME` string,
  `COLUMN_TYPE` string,
  `TBL_ID` bigint,
@@ -746,8 +744,6 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
  \"CS_ID\",
- \"DB_NAME\",
- \"TABLE_NAME\",
  \"COLUMN_NAME\",
  \"COLUMN_TYPE\",
  \"TBL_ID\",
@@ -771,9 +767,6 @@ FROM
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` (
  `CS_ID` bigint,
- `DB_NAME` string,
- `TABLE_NAME` string,
- `PARTITION_NAME` string,
  `COLUMN_NAME` string,
  `COLUMN_TYPE` string,
  `PART_ID` bigint,
@@ -799,9 +792,6 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
  \"CS_ID\",
- \"DB_NAME\",
- \"TABLE_NAME\",
- \"PARTITION_NAME\",
  \"COLUMN_NAME\",
  \"COLUMN_TYPE\",
  \"PART_ID\",
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
index 940f80526d2..f6ec72bc609 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableOperation.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.TableName;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HMSHandler;
 import org.apache.hadoop.hive.metastore.StatObjectConverter;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -217,9 +216,7 @@ public class DescTableOperation extends 
DDLOperation<DescTableDesc> {
       }
     } else {
       List<String> partitions = new ArrayList<String>();
-      // The partition name is converted to lowercase before generating the 
stats. So we should use the same
-      // lower case name to get the stats.
-      String partName = HMSHandler.lowerCaseConvertPartName(part.getName());
+      String partName = part.getName();
       partitions.add(partName);
       cols.addAll(Hive.getFieldsFromDeserializer(desc.getColumnPath(), 
deserializer, context.getConf()));
       Map<String, List<ColumnStatisticsObj>> partitionColumnStatistics = 
context.getDb().getPartitionColumnStatistics(
diff --git a/ql/src/test/queries/clientpositive/sysdb.q 
b/ql/src/test/queries/clientpositive/sysdb.q
index 4ecd1881850..9370c44ddbf 100644
--- a/ql/src/test/queries/clientpositive/sysdb.q
+++ b/ql/src/test/queries/clientpositive/sysdb.q
@@ -102,9 +102,9 @@ select column_name, grantor, principal_name from 
tbl_col_privs order by column_n
 
 select grantor, principal_name from tbl_privs order by grantor, principal_name 
limit 5;
 
-select table_name, column_name, num_nulls, num_distincts from tab_col_stats 
order by table_name, column_name limit 10;
+select tbls.tbl_name, column_name, num_nulls, num_distincts from tab_col_stats 
inner join tbls on tab_col_stats.tbl_id=tbls.tbl_id join dbs on 
tbls.db_id=dbs.db_id order by tbls.tbl_name, column_name limit 10;
 
-select table_name, partition_name, column_name, num_nulls, num_distincts from 
part_col_stats order by table_name, partition_name, column_name limit 10;
+select tbls.tbl_name, partitions.part_name, column_name, num_nulls, 
num_distincts from part_col_stats inner join partitions on 
part_col_stats.part_id=partitions.part_id inner join tbls on 
partitions.tbl_id=tbls.tbl_id order by tbls.tbl_name, partitions.part_name, 
column_name limit 10;
 
 select schema_version from version order by schema_version limit 5;
 
diff --git 
a/ql/src/test/results/clientpositive/llap/constraints_explain_ddl.q.out 
b/ql/src/test/results/clientpositive/llap/constraints_explain_ddl.q.out
index d2daeff2d7f..6c01f90cdcb 100644
--- a/ql/src/test/results/clientpositive/llap/constraints_explain_ddl.q.out
+++ b/ql/src/test/results/clientpositive/llap/constraints_explain_ddl.q.out
@@ -901,58 +901,58 @@ ALTER TABLE `default`.`customer_removal_n0` ADD 
CONSTRAINT `#### A masked patter
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS 
SET('numRows'='1','rawDataSize'='22' );
 ALTER TABLE `default`.`dates_removal_n0` ADD CONSTRAINT `#### A masked pattern 
was here ####` PRIMARY KEY (`d_datekey`,`d_id`) DISABLE NOVALIDATE RELY;
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS 
SET('numRows'='2','rawDataSize'='102' );
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_address` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_address` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_city` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_city` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_custkey` SET('lowValue'='3','highValue'='3','numNulls'='0','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_custkey` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEBgq/rqgE= 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_mktsegment` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_mktsegment` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_name` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_name` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_address` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_address` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_city` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_city` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_nation` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_nation` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_phone` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_phone` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_region` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_region` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_date` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_date` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_phone` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_phone` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_mktsegment` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_mktsegment` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_datekey` SET('lowValue'='3','highValue'='3','numNulls'='0','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_datekey` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBgq/rqgE= 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminmonth` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminweek` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminyear` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_dayofweek` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_dayofweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_holidayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_holidayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_id` 
SET('lowValue'='0','highValue'='1','numNulls'='0','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_id` BUT 
THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAICwfO+SIDgz8///////wE= 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinmonthfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinmonthfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinweekfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinweekfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_date` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_date` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_dayofweek` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_dayofweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_month` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_month` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_year` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonthnum` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonthnum` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonth` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminweek` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminmonth` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminyear` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_monthnuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_monthnuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weeknuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weeknuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_sellingseason` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_sellingseason` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinweekfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinweekfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinmonthfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinmonthfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_holidayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_holidayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weekdayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weekdayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weeknuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weeknuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_year` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonth` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonthnum` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonthnum` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 
 
 
@@ -1109,42 +1109,42 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`dates_removal_n0` ADD CONSTRAINT `#### A masked pattern 
was here ####` PRIMARY KEY (`d_datekey`,`d_id`) DISABLE NOVALIDATE RELY;
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS 
SET('numRows'='2','rawDataSize'='102' );
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_date` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_date` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_datekey` SET('lowValue'='3','highValue'='3','numNulls'='0','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_datekey` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBgq/rqgE= 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminmonth` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminweek` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminyear` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_dayofweek` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_dayofweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_holidayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_holidayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_id` 
SET('lowValue'='0','highValue'='1','numNulls'='0','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_id` BUT 
THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAICwfO+SIDgz8///////wE= 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinmonthfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinmonthfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinweekfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinweekfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_date` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_date` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_dayofweek` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_dayofweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_month` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_month` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_year` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonthnum` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonthnum` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonth` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminweek` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminweek` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminmonth` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_daynuminyear` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_daynuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_monthnuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_monthnuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weeknuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weeknuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_sellingseason` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_sellingseason` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinweekfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinweekfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_lastdayinmonthfl` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_lastdayinmonthfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_holidayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_holidayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weekdayfl` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weekdayfl` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_weeknuminyear` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_weeknuminyear` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN `d_year` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonth` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonth` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
-ALTER TABLE `default`.`dates_removal_n0` UPDATE STATISTICS FOR COLUMN 
`d_yearmonthnum` SET('lowValue'='0','highValue'='0','numNulls'='2','numDVs'='1' 
);
--- BIT VECTORS PRESENT FOR `default`.`dates_removal_n0` FOR COLUMN 
`d_yearmonthnum` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 
 
 
@@ -1312,22 +1312,22 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`customer_removal_n0` ADD CONSTRAINT `#### A masked 
pattern was here ####` PRIMARY KEY (`c_custkey`) DISABLE NOVALIDATE RELY;
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS 
SET('numRows'='1','rawDataSize'='22' );
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_address` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_address` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_city` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_city` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_custkey` SET('lowValue'='3','highValue'='3','numNulls'='0','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_custkey` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEBgq/rqgE= 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_mktsegment` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_mktsegment` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_name` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_name` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_address` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_address` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_city` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN `c_city` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_nation` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_nation` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
-ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_phone` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_phone` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEA 
 ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_region` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_region` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR 
IS SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_phone` SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_phone` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEA 
+ALTER TABLE `default`.`customer_removal_n0` UPDATE STATISTICS FOR COLUMN 
`c_mktsegment` 
SET('avgColLen'='0.0','maxColLen'='0','numNulls'='1','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`customer_removal_n0` FOR COLUMN 
`c_mktsegment` BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE 
BITVECTOR IS SExMoAEA 
 
 
 
@@ -2515,12 +2515,12 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS 
SET('numRows'='3','rawDataSize'='25' );
 ALTER TABLE `default`.`tconst` CHANGE COLUMN `i` `i` int CONSTRAINT `#### A 
masked pattern was here ####` NOT NULL DISABLE NOVALIDATE RELY;
-ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
--- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `i` 
SET('lowValue'='1','highValue'='3','numNulls'='0','numDVs'='3' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `i` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwfO+SMG7rGLC0vSOAw== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `j` 
SET('lowValue'='1','highValue'='3','numNulls'='1','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `j` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAICwfO+SMG7rGI= 
+ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
+-- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 
 
 
@@ -2644,12 +2644,12 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS 
SET('numRows'='3','rawDataSize'='25' );
 ALTER TABLE `default`.`tconst` CHANGE COLUMN `i` `i` int CONSTRAINT `#### A 
masked pattern was here ####` NOT NULL DISABLE NOVALIDATE RELY;
-ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
--- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `i` 
SET('lowValue'='1','highValue'='3','numNulls'='0','numDVs'='3' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `i` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwfO+SMG7rGLC0vSOAw== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `j` 
SET('lowValue'='1','highValue'='3','numNulls'='1','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `j` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAICwfO+SMG7rGI= 
+ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
+-- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 
 
 
@@ -2774,12 +2774,12 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS 
SET('numRows'='3','rawDataSize'='25' );
 ALTER TABLE `default`.`tconst` CHANGE COLUMN `i` `i` int CONSTRAINT `#### A 
masked pattern was here ####` NOT NULL DISABLE NOVALIDATE RELY;
-ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
--- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `i` 
SET('lowValue'='1','highValue'='3','numNulls'='0','numDVs'='3' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `i` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwfO+SMG7rGLC0vSOAw== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `j` 
SET('lowValue'='1','highValue'='3','numNulls'='1','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `j` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAICwfO+SMG7rGI= 
+ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
+-- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 
 
 
@@ -2903,12 +2903,12 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS 
SET('numRows'='3','rawDataSize'='25' );
 ALTER TABLE `default`.`tconst` CHANGE COLUMN `i` `i` int CONSTRAINT `#### A 
masked pattern was here ####` NOT NULL DISABLE NOVALIDATE RELY;
-ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
--- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `i` 
SET('lowValue'='1','highValue'='3','numNulls'='0','numDVs'='3' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `i` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwfO+SMG7rGLC0vSOAw== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `j` 
SET('lowValue'='1','highValue'='3','numNulls'='1','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `j` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAICwfO+SMG7rGI= 
+ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
+-- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 
 
 
@@ -3092,12 +3092,12 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS 
SET('numRows'='3','rawDataSize'='25' );
 ALTER TABLE `default`.`tconst` CHANGE COLUMN `i` `i` int CONSTRAINT `#### A 
masked pattern was here ####` NOT NULL DISABLE NOVALIDATE RELY;
-ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
--- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `i` 
SET('lowValue'='1','highValue'='3','numNulls'='0','numDVs'='3' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `i` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwfO+SMG7rGLC0vSOAw== 
 ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `j` 
SET('lowValue'='1','highValue'='3','numNulls'='1','numDVs'='2' );
 -- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `j` BUT THEY ARE NOT 
SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS SExMoAICwfO+SMG7rGI= 
+ALTER TABLE `default`.`tconst` UPDATE STATISTICS FOR COLUMN `d_year` 
SET('avgColLen'='4.0','maxColLen'='4','numNulls'='0','numDVs'='3' );
+-- BIT VECTORS PRESENT FOR `default`.`tconst` FOR COLUMN `d_year` BUT THEY ARE 
NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAMDwrjb8gb/vrr2+f////8BgaCT+///////AQ== 
 
 
 
@@ -7035,10 +7035,10 @@ TBLPROPERTIES (
 #### A masked pattern was here ####
 ALTER TABLE `default`.`web_sales` ADD CONSTRAINT `pk1` PRIMARY KEY 
(`ws_order_number`,`ws_item_sk`) DISABLE NOVALIDATE RELY;
 ALTER TABLE `default`.`web_sales` UPDATE STATISTICS 
SET('numRows'='2','rawDataSize'='14' );
-ALTER TABLE `default`.`web_sales` UPDATE STATISTICS FOR COLUMN `ws_item_sk` 
SET('lowValue'='1','highValue'='1','numNulls'='0','numDVs'='1' );
--- BIT VECTORS PRESENT FOR `default`.`web_sales` FOR COLUMN `ws_item_sk` BUT 
THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBwfO+SA== 
 ALTER TABLE `default`.`web_sales` UPDATE STATISTICS FOR COLUMN 
`ws_order_number` 
SET('lowValue'='1','highValue'='1','numNulls'='0','numDVs'='1' );
 -- BIT VECTORS PRESENT FOR `default`.`web_sales` FOR COLUMN `ws_order_number` 
BUT THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBwfO+SA== 
+ALTER TABLE `default`.`web_sales` UPDATE STATISTICS FOR COLUMN `ws_item_sk` 
SET('lowValue'='1','highValue'='1','numNulls'='0','numDVs'='1' );
+-- BIT VECTORS PRESENT FOR `default`.`web_sales` FOR COLUMN `ws_item_sk` BUT 
THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBwfO+SA== 
 ALTER TABLE `default`.`web_sales` UPDATE STATISTICS FOR COLUMN `ws_price` 
SET('numNulls'='0','numDVs'='1','highValue'='1.2000000476837158','lowValue'='1.2000000476837158'
 );
 -- BIT VECTORS PRESENT FOR `default`.`web_sales` FOR COLUMN `ws_price` BUT 
THEY ARE NOT SUPPORTED YET. THE BASE64 VALUE FOR THE BITVECTOR IS 
SExMoAEBwbaGtAM= 
 
diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out 
b/ql/src/test/results/clientpositive/llap/sysdb.q.out
index 368544c35bf..7fe712061b4 100644
--- a/ql/src/test/results/clientpositive/llap/sysdb.q.out
+++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out
@@ -735,7 +735,6 @@ part_col_stats      big_decimal_low_value
 part_col_stats column_name
 part_col_stats column_type
 part_col_stats cs_id
-part_col_stats db_name
 part_col_stats double_high_value
 part_col_stats double_low_value
 part_col_stats engine
@@ -748,8 +747,6 @@ part_col_stats      num_falses
 part_col_stats num_nulls
 part_col_stats num_trues
 part_col_stats part_id
-part_col_stats partition_name
-part_col_stats table_name
 part_privs     authorizer
 part_privs     create_time
 part_privs     grant_option
@@ -907,7 +904,6 @@ tab_col_stats       big_decimal_low_value
 tab_col_stats  column_name
 tab_col_stats  column_type
 tab_col_stats  cs_id
-tab_col_stats  db_name
 tab_col_stats  double_high_value
 tab_col_stats  double_low_value
 tab_col_stats  engine
@@ -919,7 +915,6 @@ tab_col_stats       num_distincts
 tab_col_stats  num_falses
 tab_col_stats  num_nulls
 tab_col_stats  num_trues
-tab_col_stats  table_name
 tab_col_stats  tbl_id
 table_params   param_key
 table_params   param_value
@@ -1446,13 +1441,17 @@ hive_test_user  hive_test_user
 hive_test_user hive_test_user
 hive_test_user hive_test_user
 hive_test_user hive_test_user
-PREHOOK: query: select table_name, column_name, num_nulls, num_distincts from 
tab_col_stats order by table_name, column_name limit 10
+PREHOOK: query: select tbls.tbl_name, column_name, num_nulls, num_distincts 
from tab_col_stats inner join tbls on tab_col_stats.tbl_id=tbls.tbl_id join dbs 
on tbls.db_id=dbs.db_id order by tbls.tbl_name, column_name limit 10
 PREHOOK: type: QUERY
+PREHOOK: Input: sys@dbs
 PREHOOK: Input: sys@tab_col_stats
+PREHOOK: Input: sys@tbls
 #### A masked pattern was here ####
-POSTHOOK: query: select table_name, column_name, num_nulls, num_distincts from 
tab_col_stats order by table_name, column_name limit 10
+POSTHOOK: query: select tbls.tbl_name, column_name, num_nulls, num_distincts 
from tab_col_stats inner join tbls on tab_col_stats.tbl_id=tbls.tbl_id join dbs 
on tbls.db_id=dbs.db_id order by tbls.tbl_name, column_name limit 10
 POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@dbs
 POSTHOOK: Input: sys@tab_col_stats
+POSTHOOK: Input: sys@tbls
 #### A masked pattern was here ####
 part   p_brand 0       16
 part   p_comment       0       25
@@ -1464,13 +1463,17 @@ part    p_retailprice   0       25
 part   p_size  0       21
 part   p_type  0       24
 src    key     0       316
-PREHOOK: query: select table_name, partition_name, column_name, num_nulls, 
num_distincts from part_col_stats order by table_name, partition_name, 
column_name limit 10
+PREHOOK: query: select tbls.tbl_name, partitions.part_name, column_name, 
num_nulls, num_distincts from part_col_stats inner join partitions on 
part_col_stats.part_id=partitions.part_id inner join tbls on 
partitions.tbl_id=tbls.tbl_id order by tbls.tbl_name, partitions.part_name, 
column_name limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@part_col_stats
+PREHOOK: Input: sys@partitions
+PREHOOK: Input: sys@tbls
 #### A masked pattern was here ####
-POSTHOOK: query: select table_name, partition_name, column_name, num_nulls, 
num_distincts from part_col_stats order by table_name, partition_name, 
column_name limit 10
+POSTHOOK: query: select tbls.tbl_name, partitions.part_name, column_name, 
num_nulls, num_distincts from part_col_stats inner join partitions on 
part_col_stats.part_id=partitions.part_id inner join tbls on 
partitions.tbl_id=tbls.tbl_id order by tbls.tbl_name, partitions.part_name, 
column_name limit 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@part_col_stats
+POSTHOOK: Input: sys@partitions
+POSTHOOK: Input: sys@tbls
 #### A masked pattern was here ####
 PREHOOK: query: select schema_version from version order by schema_version 
limit 5
 PREHOOK: type: QUERY
@@ -1536,8 +1539,6 @@ POSTHOOK: query: describe sys.tab_col_stats
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: sys@tab_col_stats
 cs_id                  bigint                  from deserializer   
-db_name                string                  from deserializer   
-table_name             string                  from deserializer   
 column_name            string                  from deserializer   
 column_type            string                  from deserializer   
 tbl_id                 bigint                  from deserializer   
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java
index 441ce26ac6d..2e908ebc1f1 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java
@@ -222,12 +222,11 @@ class DirectSqlUpdatePart {
                                           long maxCsId,
                                           Connection dbConn) throws 
SQLException, MetaException, NoSuchObjectException {
     int numRows = 0;
-    String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", \"CAT_NAME\", 
\"DB_NAME\","
-            + "\"TABLE_NAME\", \"PARTITION_NAME\", \"COLUMN_NAME\", 
\"COLUMN_TYPE\", \"PART_ID\","
+    String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", 
\"COLUMN_NAME\", \"COLUMN_TYPE\", \"PART_ID\","
             + " \"LONG_LOW_VALUE\", \"LONG_HIGH_VALUE\", 
\"DOUBLE_HIGH_VALUE\", \"DOUBLE_LOW_VALUE\","
             + " \"BIG_DECIMAL_LOW_VALUE\", \"BIG_DECIMAL_HIGH_VALUE\", 
\"NUM_NULLS\", \"NUM_DISTINCTS\", \"BIT_VECTOR\" ,"
             + " \"HISTOGRAM\", \"AVG_COL_LEN\", \"MAX_COL_LEN\", 
\"NUM_TRUES\", \"NUM_FALSES\", \"LAST_ANALYZED\", \"ENGINE\") values "
-            + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 
?, ?, ?)";
+            + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
 
     try (PreparedStatement preparedStatement = 
dbConn.prepareStatement(insert)) {
       for (Map.Entry entry : insertMap.entrySet()) {
@@ -236,29 +235,25 @@ class DirectSqlUpdatePart {
         MPartitionColumnStatistics mPartitionColumnStatistics = 
(MPartitionColumnStatistics) entry.getValue();
 
         preparedStatement.setLong(1, maxCsId);
-        preparedStatement.setString(2, 
mPartitionColumnStatistics.getCatName());
-        preparedStatement.setString(3, mPartitionColumnStatistics.getDbName());
-        preparedStatement.setString(4, 
mPartitionColumnStatistics.getTableName());
-        preparedStatement.setString(5, 
mPartitionColumnStatistics.getPartitionName());
-        preparedStatement.setString(6, 
mPartitionColumnStatistics.getColName());
-        preparedStatement.setString(7, 
mPartitionColumnStatistics.getColType());
-        preparedStatement.setLong(8, partId);
-        preparedStatement.setObject(9, 
mPartitionColumnStatistics.getLongLowValue());
-        preparedStatement.setObject(10, 
mPartitionColumnStatistics.getLongHighValue());
-        preparedStatement.setObject(11, 
mPartitionColumnStatistics.getDoubleHighValue());
-        preparedStatement.setObject(12, 
mPartitionColumnStatistics.getDoubleLowValue());
-        preparedStatement.setString(13, 
mPartitionColumnStatistics.getDecimalLowValue());
-        preparedStatement.setString(14, 
mPartitionColumnStatistics.getDecimalHighValue());
-        preparedStatement.setObject(15, 
mPartitionColumnStatistics.getNumNulls());
-        preparedStatement.setObject(16, 
mPartitionColumnStatistics.getNumDVs());
-        preparedStatement.setObject(17, 
mPartitionColumnStatistics.getBitVector());
-        preparedStatement.setBytes(18, 
mPartitionColumnStatistics.getHistogram());
-        preparedStatement.setObject(19, 
mPartitionColumnStatistics.getAvgColLen());
-        preparedStatement.setObject(20, 
mPartitionColumnStatistics.getMaxColLen());
-        preparedStatement.setObject(21, 
mPartitionColumnStatistics.getNumTrues());
-        preparedStatement.setObject(22, 
mPartitionColumnStatistics.getNumFalses());
-        preparedStatement.setLong(23, 
mPartitionColumnStatistics.getLastAnalyzed());
-        preparedStatement.setString(24, 
mPartitionColumnStatistics.getEngine());
+        preparedStatement.setString(2, 
mPartitionColumnStatistics.getColName());
+        preparedStatement.setString(3, 
mPartitionColumnStatistics.getColType());
+        preparedStatement.setLong(4, partId);
+        preparedStatement.setObject(5, 
mPartitionColumnStatistics.getLongLowValue());
+        preparedStatement.setObject(6, 
mPartitionColumnStatistics.getLongHighValue());
+        preparedStatement.setObject(7, 
mPartitionColumnStatistics.getDoubleHighValue());
+        preparedStatement.setObject(8, 
mPartitionColumnStatistics.getDoubleLowValue());
+        preparedStatement.setString(9, 
mPartitionColumnStatistics.getDecimalLowValue());
+        preparedStatement.setString(10, 
mPartitionColumnStatistics.getDecimalHighValue());
+        preparedStatement.setObject(11, 
mPartitionColumnStatistics.getNumNulls());
+        preparedStatement.setObject(12, 
mPartitionColumnStatistics.getNumDVs());
+        preparedStatement.setObject(13, 
mPartitionColumnStatistics.getBitVector());
+        preparedStatement.setBytes(14, 
mPartitionColumnStatistics.getHistogram());
+        preparedStatement.setObject(15, 
mPartitionColumnStatistics.getAvgColLen());
+        preparedStatement.setObject(16, 
mPartitionColumnStatistics.getMaxColLen());
+        preparedStatement.setObject(17, 
mPartitionColumnStatistics.getNumTrues());
+        preparedStatement.setObject(18, 
mPartitionColumnStatistics.getNumFalses());
+        preparedStatement.setLong(19, 
mPartitionColumnStatistics.getLastAnalyzed());
+        preparedStatement.setString(20, 
mPartitionColumnStatistics.getEngine());
 
         maxCsId++;
         numRows++;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index f143ed6ee45..a4c476e48fc 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -6750,28 +6750,6 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     return Warehouse.makeSpecFromName(part_name);
   }
 
-  public static String lowerCaseConvertPartName(String partName) throws 
MetaException {
-    if (partName == null) {
-      return partName;
-    }
-    boolean isFirst = true;
-    Map<String, String> partSpec = Warehouse.makeEscSpecFromName(partName);
-    String convertedPartName = new String();
-
-    for (Map.Entry<String, String> entry : partSpec.entrySet()) {
-      String partColName = entry.getKey();
-      String partColVal = entry.getValue();
-
-      if (!isFirst) {
-        convertedPartName += "/";
-      } else {
-        isFirst = false;
-      }
-      convertedPartName += partColName.toLowerCase() + "=" + partColVal;
-    }
-    return convertedPartName;
-  }
-
   @Override
   @Deprecated
   public ColumnStatistics get_table_column_statistics(String dbName, String 
tableName,
@@ -6839,16 +6817,15 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     String[] parsedDbName = parseDbName(dbName, conf);
     tableName = tableName.toLowerCase();
     colName = colName.toLowerCase();
-    String convertedPartName = lowerCaseConvertPartName(partName);
     startFunction("get_column_statistics_by_partition", ": table=" +
         TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
-            tableName) + " partition=" + convertedPartName + " column=" + 
colName);
+            tableName) + " partition=" + partName + " column=" + colName);
     ColumnStatistics statsObj = null;
 
     try {
       List<ColumnStatistics> list = getMS().getPartitionColumnStatistics(
           parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName,
-          Lists.newArrayList(convertedPartName), Lists.newArrayList(colName),
+          Lists.newArrayList(partName), Lists.newArrayList(colName),
           "hive");
       if (list.isEmpty()) {
         return null;
@@ -6876,13 +6853,9 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     for (String colName : request.getColNames()) {
       lowerCaseColNames.add(colName.toLowerCase());
     }
-    List<String> lowerCasePartNames = new 
ArrayList<>(request.getPartNames().size());
-    for (String partName : request.getPartNames()) {
-      lowerCasePartNames.add(lowerCaseConvertPartName(partName));
-    }
     try {
       List<ColumnStatistics> stats = getMS().getPartitionColumnStatistics(
-          catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames,
+          catName, dbName, tblName, request.getPartNames(), lowerCaseColNames,
           request.getEngine(), request.isSetValidWriteIdList() ? 
request.getValidWriteIdList() : null);
       Map<String, List<ColumnStatisticsObj>> map = new HashMap<>();
       if (stats != null) {
@@ -6977,7 +6950,7 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     statsDesc.setCatName(statsDesc.isSetCatName() ? 
statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf));
     statsDesc.setDbName(statsDesc.getDbName().toLowerCase());
     statsDesc.setTableName(statsDesc.getTableName().toLowerCase());
-    statsDesc.setPartName(lowerCaseConvertPartName(statsDesc.getPartName()));
+    statsDesc.setPartName(statsDesc.getPartName());
     long time = System.currentTimeMillis() / 1000;
     statsDesc.setLastAnalyzed(time);
 
@@ -7136,15 +7109,14 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     if (colName != null) {
       colName = colName.toLowerCase();
     }
-    String convertedPartName = lowerCaseConvertPartName(partName);
     startFunction("delete_column_statistics_by_partition",": table=" +
         TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tableName) +
-        " partition=" + convertedPartName + " column=" + colName);
+        " partition=" + partName + " column=" + colName);
     boolean ret = false, committed = false;
 
     getMS().openTransaction();
     try {
-      List<String> partVals = getPartValsFromName(getMS(), 
parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName);
+      List<String> partVals = getPartValsFromName(getMS(), 
parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, partName);
       Table table = getMS().getTable(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tableName);
       // This API looks unused; if it were used we'd need to update stats 
state and write ID.
       // We cannot just randomly nuke some txn stats.
@@ -7153,19 +7125,19 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
       }
 
       ret = getMS().deletePartitionColumnStatistics(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tableName,
-          convertedPartName, partVals, colName, engine);
+          partName, partVals, colName, engine);
       if (ret) {
         if (transactionalListeners != null && 
!transactionalListeners.isEmpty()) {
           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
               EventType.DELETE_PARTITION_COLUMN_STAT,
               new DeletePartitionColumnStatEvent(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tableName,
-                  convertedPartName, partVals, colName, engine, this));
+                  partName, partVals, colName, engine, this));
         }
         if (!listeners.isEmpty()) {
           MetaStoreListenerNotifier.notifyEvent(listeners,
               EventType.DELETE_PARTITION_COLUMN_STAT,
               new DeletePartitionColumnStatEvent(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tableName,
-                  convertedPartName, partVals, colName, engine, this));
+                  partName, partVals, colName, engine, this));
         }
       }
       committed = getMS().commitTransaction();
@@ -9141,15 +9113,11 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
     for (String colName : request.getColNames()) {
       lowerCaseColNames.add(colName.toLowerCase());
     }
-    List<String> lowerCasePartNames = new 
ArrayList<>(request.getPartNames().size());
-    for (String partName : request.getPartNames()) {
-      lowerCasePartNames.add(lowerCaseConvertPartName(partName));
-    }
     AggrStats aggrStats = null;
 
     try {
       aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName,
-          lowerCasePartNames, lowerCaseColNames, request.getEngine(), 
request.getValidWriteIdList());
+          request.getPartNames(), lowerCaseColNames, request.getEngine(), 
request.getValidWriteIdList());
       return aggrStats;
     } finally {
       endFunction("get_aggr_stats_for", aggrStats == null, null, 
request.getTblName());
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 4a1f2ca8db2..25174e0f0a3 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -300,8 +300,8 @@ class MetaStoreDirectSql {
     try {
       // Force the underlying db to initialize.
       initQueries.add(pm.newQuery(MDatabase.class, "name == ''"));
-      initQueries.add(pm.newQuery(MTableColumnStatistics.class, "dbName == 
''"));
-      initQueries.add(pm.newQuery(MPartitionColumnStatistics.class, "dbName == 
''"));
+      initQueries.add(pm.newQuery(MTableColumnStatistics.class, "colName == 
''"));
+      initQueries.add(pm.newQuery(MPartitionColumnStatistics.class, "colName 
== ''"));
       initQueries.add(pm.newQuery(MConstraint.class, "childIntegerIndex < 0"));
       initQueries.add(pm.newQuery(MNotificationLog.class, "dbName == ''"));
       initQueries.add(pm.newQuery(MNotificationNextId.class, "nextEventId < 
-1"));
@@ -1601,7 +1601,9 @@ class MetaStoreDirectSql {
     }
     final boolean doTrace = LOG.isDebugEnabled();
     final String queryText0 = "select " + getStatsList(enableBitVector, 
enableKll) + " from " + TAB_COL_STATS
-          + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = 
? "
+          + " inner join " + TBLS + " on " + TAB_COL_STATS + ".\"TBL_ID\" = " 
+ TBLS + ".\"TBL_ID\" "
+          + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\" "
+          + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ?"
           + " and \"ENGINE\" = ? and \"COLUMN_NAME\" in (";
     Batchable<String, Object[]> b = new Batchable<String, Object[]>() {
       @Override
@@ -1800,11 +1802,15 @@ class MetaStoreDirectSql {
       final List<String> partNames, List<String> colNames, String engine) 
throws MetaException {
     assert !colNames.isEmpty() && !partNames.isEmpty();
     final boolean doTrace = LOG.isDebugEnabled();
-    final String queryText0  = "select count(\"COLUMN_NAME\") from " + 
PART_COL_STATS + ""
-        + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
-        + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)"
-        + " and \"ENGINE\" = ? "
-        + " group by \"PARTITION_NAME\"";
+    final String queryText0 = "select count(\"COLUMN_NAME\") from " + 
PART_COL_STATS + ""
+        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ?"
+        + " and " + TBLS + ".\"TBL_NAME\" = ?"
+        + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (%1$s) and " + 
PARTITIONS + ".\"PART_NAME\" in (%2$s)"
+        + " and " + PART_COL_STATS + ".\"ENGINE\" = ?"
+        + " group by " + PART_COL_STATS + ".\"PART_ID\"";
     List<Long> allCounts = Batchable.runBatched(batchSize, colNames, new 
Batchable<String, Long>() {
       @Override
       public List<Long> run(final List<String> inputColName) throws 
MetaException {
@@ -1862,8 +1868,13 @@ class MetaStoreDirectSql {
 
   public List<ColStatsObjWithSourceInfo> 
getColStatsForAllTablePartitions(String catName, String dbName,
       boolean enableBitVector, boolean enableKll) throws MetaException {
-    String queryText = "select \"TABLE_NAME\", \"PARTITION_NAME\", " + 
getStatsList(enableBitVector, enableKll)
-        + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and 
\"CAT_NAME\" = ?";
+    String queryText = "select \"TBLS\".\"TBL_NAME\", 
\"PARTITIONS\".\"PART_NAME\", "
+        + getStatsList(enableBitVector, enableKll)
+        + " from " + PART_COL_STATS
+        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+        + " where " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?";
     long start = 0;
     long end = 0;
     boolean doTrace = LOG.isDebugEnabled();
@@ -1943,7 +1954,10 @@ class MetaStoreDirectSql {
         + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
         + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
         + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
-        + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? 
";
+        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? ";
     String queryText = null;
     long start = 0;
     long end = 0;
@@ -1982,13 +1996,16 @@ class MetaStoreDirectSql {
       // In this case, at least a column status for a partition is missing.
       // We need to extrapolate this partition based on the other partitions
       List<ColumnStatisticsObj> colStats = new 
ArrayList<ColumnStatisticsObj>(colNames.size());
-      queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PARTITION_NAME\") "
+      queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PART_COL_STATS\".\"PART_ID\") "
           + " from " + PART_COL_STATS
-          + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = 
? "
-          + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
-          + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
-          + " and \"ENGINE\" = ? "
-          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+          + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+          + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+          + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+          + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? "
+          + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
+          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
+          + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
+          + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\", " + 
PART_COL_STATS + ".\"COLUMN_TYPE\"";
       start = doTrace ? System.nanoTime() : 0;
       List<String> noExtraColumnNames = new ArrayList<String>();
       Map<String, String[]> extraColumnNameTypeParts = new HashMap<String, 
String[]>();
@@ -2055,11 +2072,15 @@ class MetaStoreDirectSql {
         // get sum for all columns to reduce the number of queries
         Map<String, Map<Integer, Object>> sumMap = new HashMap<String, 
Map<Integer, Object>>();
         queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), 
sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
-            + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and 
\"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
-            + " and \"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
-            + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + 
")"
-            + " and \"ENGINE\" = ? "
-            + " group by \"COLUMN_NAME\"";
+            + " from " + PART_COL_STATS
+            + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+            + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+            + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+            + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = 
? and " + TBLS + ".\"TBL_NAME\" = ? "
+            + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
+            + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
+            + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
+            + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\"";
         start = doTrace ? System.nanoTime() : 0;
         try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
           List<String> extraColumnNames = new ArrayList<String>();
@@ -2127,18 +2148,24 @@ class MetaStoreDirectSql {
               // if the aggregation type is min/max, we extrapolate from the
               // left/right borders
               if (!decimal) {
-                queryText = "select \"" + colStatName
-                    + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
-                    + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and 
\"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
-                    + " and \"PARTITION_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-                    + " and \"ENGINE\" = ? "
+                queryText = "select \"" + colStatName + "\",\"PART_NAME\" from 
" + PART_COL_STATS
+                    + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+                    + " inner join " + TBLS + " on " + PARTITIONS + 
".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\""
+                    + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + 
DBS + ".\"DB_ID\""
+                    + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
+                    + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" = ? "
+                    + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
+                    + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
                     + " order by \"" + colStatName + "\"";
               } else {
-                queryText = "select \"" + colStatName
-                    + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
-                    + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and 
\"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
-                    + " and \"PARTITION_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-                    + " and \"ENGINE\" = ? "
+                queryText = "select \"" + colStatName + "\",\"PART_NAME\" from 
" + PART_COL_STATS
+                    + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+                    + " inner join " + TBLS + " on " + PARTITIONS + 
".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\""
+                    + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + 
DBS + ".\"DB_ID\""
+                    + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
+                    + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" = ? "
+                    + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
+                    + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
                     + " order by cast(\"" + colStatName + "\" as decimal)";
               }
               start = doTrace ? System.nanoTime() : 0;
@@ -2166,10 +2193,14 @@ class MetaStoreDirectSql {
                   + 
"avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as 
decimal)),"
                   + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
                   + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")"
-                  + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? 
and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?"
-                  + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in ("
-                  + makeParams(partNames.size()) + ")"
-                  + " and \"ENGINE\" = ? "
+                  + " from " + PART_COL_STATS + ""
+                  + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+                  + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" 
= " + TBLS + ".\"TBL_ID\""
+                  + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + 
DBS + ".\"DB_ID\""
+                  + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
+                  + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" = ? "
+                  + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
+                  + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
                   + " group by \"COLUMN_NAME\"";
               start = doTrace ? System.nanoTime() : 0;
               try(QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
@@ -2246,12 +2277,16 @@ class MetaStoreDirectSql {
       return Collections.emptyList();
     }
     final boolean doTrace = LOG.isDebugEnabled();
-    final String queryText0 = "select \"PARTITION_NAME\", " + 
getStatsList(enableBitVector, enableKll) + " from "
-        + " " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? 
and \"TABLE_NAME\" = ? and " +
-        "\"COLUMN_NAME\""
-        + "  in (%1$s) AND \"PARTITION_NAME\" in (%2$s) "
-        + " and \"ENGINE\" = ? "
-        + " order by \"PARTITION_NAME\"";
+    final String queryText0 = "select \"PARTITIONS\".\"PART_NAME\", " + 
getStatsList(enableBitVector, enableKll)
+        + " from " + PART_COL_STATS
+        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
+        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
+        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? "
+        + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (%1$s)"
+        + " and " + PARTITIONS + ".\"PART_NAME\" in (%2$s)"
+        + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
+        + " order by " + PARTITIONS +  ".\"PART_NAME\"";
     Batchable<String, Object[]> b = new Batchable<String, Object[]>() {
       @Override
       public List<Object[]> run(final List<String> inputColNames) throws 
MetaException {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index a810c9cc695..8594f3a2aba 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -10160,10 +10160,10 @@ public class ObjectStore implements RawStore, 
Configurable {
 
   private void writeMPartitionColumnStatistics(Table table, Partition 
partition,
       MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics 
oldStats) {
-    String catName = mStatsObj.getCatName();
-    String dbName = mStatsObj.getDbName();
-    String tableName = mStatsObj.getTableName();
-    String partName = mStatsObj.getPartitionName();
+    String catName = 
mStatsObj.getPartition().getTable().getDatabase().getCatalogName();
+    String dbName = 
mStatsObj.getPartition().getTable().getDatabase().getName();
+    String tableName = mStatsObj.getPartition().getTable().getTableName();
+    String partName = mStatsObj.getPartition().getPartitionName();
     String colName = mStatsObj.getColName();
 
     Preconditions.checkState(this.currentTransaction.isActive());
@@ -10415,7 +10415,7 @@ public class ObjectStore implements RawStore, 
Configurable {
             public List<MTableColumnStatistics> run(List<String> input)
                 throws MetaException {
               StringBuilder filter =
-                  new StringBuilder("tableName == t1 && dbName == t2 && 
catName == t3 && engine == t4 && (");
+                  new StringBuilder("table.tableName == t1 && 
table.database.name == t2 && table.database.catalogName == t3 && engine == t4 
&& (");
               StringBuilder paramStr = new StringBuilder(
                   "java.lang.String t1, java.lang.String t2, java.lang.String 
t3, java.lang.String t4");
               Object[] params = new Object[input.size() + 4];
@@ -10490,7 +10490,7 @@ public class ObjectStore implements RawStore, 
Configurable {
     try {
       openTransaction();
       query = pm.newQuery(MTableColumnStatistics.class);
-      query.setFilter("tableName == t1 && dbName == t2 && catName == t3");
+      query.setFilter("table.tableName == t1 && table.database.name == t2 && 
table.database.catalogName == t3");
       query.declareParameters("java.lang.String t1, java.lang.String t2, 
java.lang.String t3");
       query.setResult("DISTINCT engine");
       Collection names = (Collection) query.execute(tableName, dbName, 
catName);
@@ -10602,7 +10602,7 @@ public class ObjectStore implements RawStore, 
Configurable {
     try {
       openTransaction();
       query = pm.newQuery(MPartitionColumnStatistics.class);
-      query.setFilter("tableName == t1 && dbName == t2 && catName == t3");
+      query.setFilter("partition.table.tableName == t1 && 
partition.table.database.name == t2 && partition.table.database.catalogName == 
t3");
       query.declareParameters("java.lang.String t1, java.lang.String t2, 
java.lang.String t3");
       query.setResult("DISTINCT engine");
       Collection names = (Collection) query.execute(tableName, dbName, 
catName);
@@ -10701,7 +10701,7 @@ public class ObjectStore implements RawStore, 
Configurable {
         for (int i = 0; i <= mStats.size(); ++i) {
           boolean isLast = i == mStats.size();
           MPartitionColumnStatistics mStatsObj = isLast ? null : mStats.get(i);
-          String partName = isLast ? null : mStatsObj.getPartitionName();
+          String partName = isLast ? null : 
mStatsObj.getPartition().getPartitionName();
           if (isLast || !partName.equals(lastPartName)) {
             if (i != 0) {
               ColumnStatistics colStat = new ColumnStatistics(csd, curList);
@@ -10845,7 +10845,7 @@ public class ObjectStore implements RawStore, 
Configurable {
       List<MPartitionColumnStatistics> result = Collections.emptyList();
       try (Query query = pm.newQuery(MPartitionColumnStatistics.class)) {
         String paramStr = "java.lang.String t1, java.lang.String t2, 
java.lang.String t3, java.lang.String t4";
-        String filter = "tableName == t1 && dbName == t2 && catName == t3 && 
engine == t4 && (";
+        String filter = "partition.table.tableName == t1 && 
partition.table.database.name == t2 && partition.table.database.catalogName == 
t3 && engine == t4 && (";
         Object[] params = new Object[colNames.size() + partNames.size() + 4];
         int i = 0;
         params[i++] = table.getTableName();
@@ -10854,7 +10854,7 @@ public class ObjectStore implements RawStore, 
Configurable {
         params[i++] = engine;
         int firstI = i;
         for (String s : partNames) {
-          filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + i;
+          filter += ((i == firstI) ? "" : " || ") + "partition.partitionName 
== p" + i;
           paramStr += ", java.lang.String p" + i;
           params[i++] = s;
         }
@@ -10868,7 +10868,7 @@ public class ObjectStore implements RawStore, 
Configurable {
         filter += ")";
         query.setFilter(filter);
         query.declareParameters(paramStr);
-        query.setOrdering("partitionName ascending");
+        query.setOrdering("partition.partitionName ascending");
         result = (List<MPartitionColumnStatistics>) 
query.executeWithArray(params);
         pm.retrieveAll(result);
         result = new ArrayList<>(result);
@@ -10890,7 +10890,7 @@ public class ObjectStore implements RawStore, 
Configurable {
       String catName, String dbName, String tableName, List<String> partNames) 
{
     Pair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(
         catName, dbName, tableName, partNames, 
MPartitionColumnStatistics.class,
-        "tableName", "dbName", "partition.partitionName", "catName");
+        "partition.table.tableName", "partition.table.database.name", 
"partition.partitionName", "partition.table.database.catalogName");
     try (QueryWrapper wrapper = new QueryWrapper(queryWithParams.getLeft())) {
       wrapper.deletePersistentAll(queryWithParams.getRight());
     }
@@ -10916,7 +10916,7 @@ public class ObjectStore implements RawStore, 
Configurable {
 
       query = pm.newQuery(MPartitionColumnStatistics.class);
 
-      String filter = "dbName == t2 && tableName == t3 && catName == t4";
+      String filter = "partition.table.database.name == t2 && 
partition.table.tableName == t3 && partition.table.database.catalogName == t4";
       String parameters = "java.lang.String t2, java.lang.String t3, 
java.lang.String t4";
 
       query.setFilter(filter);
@@ -11002,13 +11002,13 @@ public class ObjectStore implements RawStore, 
Configurable {
       String parameters;
       if (colName != null) {
         filter =
-            "partition.partitionName == t1 && dbName == t2 && tableName == t3 
&& "
-                + "colName == t4 && catName == t5" + (engine != null ? " && 
engine == t6" : "");
+            "partition.partitionName == t1 && partition.table.database.name == 
t2 && partition.table.tableName == t3 && "
+                + "colName == t4 && partition.table.database.catalogName == 
t5" + (engine != null ? " && engine == t6" : "");
         parameters =
             "java.lang.String t1, java.lang.String t2, "
                 + "java.lang.String t3, java.lang.String t4, java.lang.String 
t5" + (engine != null ? ", java.lang.String t6" : "");
       } else {
-        filter = "partition.partitionName == t1 && dbName == t2 && tableName 
== t3 && catName == t4" + (engine != null ? " && engine == t5" : "");
+        filter = "partition.partitionName == t1 && 
partition.table.database.name == t2 && partition.table.tableName == t3 && 
partition.table.database.catalogName == t4" + (engine != null ? " && engine == 
t5" : "");
         parameters = "java.lang.String t1, java.lang.String t2, 
java.lang.String t3, java.lang.String t4" + (engine != null ? ", 
java.lang.String t5" : "");
       }
       query.setFilter(filter);
@@ -11096,10 +11096,10 @@ public class ObjectStore implements RawStore, 
Configurable {
       String filter;
       String parameters;
       if (colName != null) {
-        filter = "table.tableName == t1 && dbName == t2 && catName == t3 && 
colName == t4" + (engine != null ? " && engine == t5" : "");
+        filter = "table.tableName == t1 && table.database.name == t2 && 
table.database.catalogName == t3 && colName == t4" + (engine != null ? " && 
engine == t5" : "");
         parameters = "java.lang.String t1, java.lang.String t2, 
java.lang.String t3, java.lang.String t4" + (engine != null ? ", 
java.lang.String t5" : "");
       } else {
-        filter = "table.tableName == t1 && dbName == t2 && catName == t3" + 
(engine != null ? " && engine == t4" : "");
+        filter = "table.tableName == t1 && table.database.name == t2 && 
table.database.catalogName == t3" + (engine != null ? " && engine == t4" : "");
         parameters = "java.lang.String t1, java.lang.String t2, 
java.lang.String t3" + (engine != null ? ", java.lang.String t4" : "");
       }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
index 163c855833e..3c5c1d7f7dd 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
@@ -69,9 +69,6 @@ public class StatObjectConverter {
 
      MTableColumnStatistics mColStats = new MTableColumnStatistics();
      mColStats.setTable(table);
-     mColStats.setDbName(statsDesc.getDbName());
-     mColStats.setCatName(table.getDatabase().getCatalogName());
-     mColStats.setTableName(statsDesc.getTableName());
      mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed());
      mColStats.setColName(statsObj.getColName());
      mColStats.setColType(statsObj.getColType());
@@ -288,9 +285,7 @@ public class StatObjectConverter {
     if (mStatsObj.getNumNulls() != null) {
       setStmt.append("\"NUM_NULLS\" = ? ,");
     }
-    setStmt.append("\"ENGINE\" = ? ,");
-    setStmt.append("\"DB_NAME\" = ? ,");
-    setStmt.append("\"TABLE_NAME\" = ? ");
+    setStmt.append("\"ENGINE\" = ? ");
     return setStmt.toString();
   }
 
@@ -341,8 +336,6 @@ public class StatObjectConverter {
       pst.setObject(colIdx++, mStatsObj.getNumNulls());
     }
     pst.setString(colIdx++, mStatsObj.getEngine());
-    pst.setString(colIdx++, mStatsObj.getDbName());
-    pst.setString(colIdx++, mStatsObj.getTableName());
     return colIdx;
   }
 
@@ -460,9 +453,9 @@ public class StatObjectConverter {
       MTableColumnStatistics mStatsObj) {
     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
     statsDesc.setIsTblLevel(true);
-    statsDesc.setCatName(mStatsObj.getCatName());
-    statsDesc.setDbName(mStatsObj.getDbName());
-    statsDesc.setTableName(mStatsObj.getTableName());
+    statsDesc.setCatName(mStatsObj.getTable().getDatabase().getCatalogName());
+    statsDesc.setDbName(mStatsObj.getTable().getDatabase().getName());
+    statsDesc.setTableName(mStatsObj.getTable().getTableName());
     statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed());
     return statsDesc;
   }
@@ -475,16 +468,7 @@ public class StatObjectConverter {
     }
 
     MPartitionColumnStatistics mColStats = new MPartitionColumnStatistics();
-    if (partition != null) {
-      
mColStats.setCatName(partition.getTable().getDatabase().getCatalogName());
-      mColStats.setPartition(partition);
-    } else {
-      // Assume that the statsDesc has already set catalogName when partition 
is null
-      mColStats.setCatName(statsDesc.getCatName());
-    }
-    mColStats.setDbName(statsDesc.getDbName());
-    mColStats.setTableName(statsDesc.getTableName());
-    mColStats.setPartitionName(statsDesc.getPartName());
+    mColStats.setPartition(partition);
     mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed());
     mColStats.setColName(statsObj.getColName());
     mColStats.setColType(statsObj.getColType());
@@ -668,10 +652,10 @@ public class StatObjectConverter {
     MPartitionColumnStatistics mStatsObj) {
     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
     statsDesc.setIsTblLevel(false);
-    statsDesc.setCatName(mStatsObj.getCatName());
-    statsDesc.setDbName(mStatsObj.getDbName());
-    statsDesc.setTableName(mStatsObj.getTableName());
-    statsDesc.setPartName(mStatsObj.getPartitionName());
+    
statsDesc.setCatName(mStatsObj.getPartition().getTable().getDatabase().getCatalogName());
+    
statsDesc.setDbName(mStatsObj.getPartition().getTable().getDatabase().getName());
+    statsDesc.setTableName(mStatsObj.getPartition().getTable().getTableName());
+    statsDesc.setPartName(mStatsObj.getPartition().getPartitionName());
     statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed());
     return statsDesc;
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
index 7e51b92e49e..f4ad37d9b53 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
@@ -34,10 +34,6 @@ public class MPartitionColumnStatistics {
 
   private MPartition partition;
 
-  private String catName;
-  private String dbName;
-  private String tableName;
-  private String partitionName;
   private String colName;
   private String colType;
   private String engine;
@@ -60,14 +56,6 @@ public class MPartitionColumnStatistics {
 
   public MPartitionColumnStatistics() {}
 
-  public String getTableName() {
-    return tableName;
-  }
-
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
   public String getColName() {
     return colName;
   }
@@ -132,22 +120,6 @@ public class MPartitionColumnStatistics {
     this.lastAnalyzed = lastAnalyzed;
   }
 
-  public String getDbName() {
-    return dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public String getCatName() {
-    return catName;
-  }
-
-  public void setCatName(String catName) {
-    this.catName = catName;
-  }
-
   public MPartition getPartition() {
     return partition;
   }
@@ -156,14 +128,6 @@ public class MPartitionColumnStatistics {
     this.partition = partition;
   }
 
-  public String getPartitionName() {
-    return partitionName;
-  }
-
-  public void setPartitionName(String partitionName) {
-    this.partitionName = partitionName;
-  }
-
   public String getColType() {
     return colType;
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
index 30ba9bf32b7..9feea1958b0 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
@@ -33,9 +33,6 @@ package org.apache.hadoop.hive.metastore.model;
 public class MTableColumnStatistics {
 
   private MTable table;
-  private String catName;
-  private String dbName;
-  private String tableName;
   private String colName;
   private String colType;
   private String engine;
@@ -66,14 +63,6 @@ public class MTableColumnStatistics {
     this.table = table;
   }
 
-  public String getTableName() {
-    return tableName;
-  }
-
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
   public String getColName() {
     return colName;
   }
@@ -146,22 +135,6 @@ public class MTableColumnStatistics {
     this.lastAnalyzed = lastAnalyzed;
   }
 
-  public String getDbName() {
-    return dbName;
-  }
-
-  public void setDbName(String dbName) {
-    this.dbName = dbName;
-  }
-
-  public String getCatName() {
-    return catName;
-  }
-
-  public void setCatName(String catName) {
-    this.catName = catName;
-  }
-
   public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) {
     this.numTrues = numTrues;
     this.numFalses = numFalses;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java
index 7b06f3f7185..08cb4037206 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveDatabase.java
@@ -54,8 +54,6 @@ class SchemaToolTaskMoveDatabase extends SchemaToolTask {
       conn.setAutoCommit(false);
       try (Statement stmt = conn.createStatement()) {
         updateCatalogNameInTable(stmt, "DBS", "CTLG_NAME", "NAME", 
fromCatName, toCatName, dbName, false);
-        updateCatalogNameInTable(stmt, "TAB_COL_STATS", "CAT_NAME", "DB_NAME", 
fromCatName, toCatName, dbName, true);
-        updateCatalogNameInTable(stmt, "PART_COL_STATS", "CAT_NAME", 
"DB_NAME", fromCatName, toCatName, dbName, true);
         updateCatalogNameInTable(stmt, "PARTITION_EVENTS", "CAT_NAME", 
"DB_NAME", fromCatName, toCatName, dbName, true);
         updateCatalogNameInTable(stmt, "NOTIFICATION_LOG", "CAT_NAME", 
"DB_NAME", fromCatName, toCatName, dbName, true);
         conn.commit();
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java
index 70831b00dfc..f7587ba7dac 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/schematool/SchemaToolTaskMoveTable.java
@@ -57,8 +57,6 @@ class SchemaToolTaskMoveTable extends SchemaToolTask {
       conn.setAutoCommit(false);
       try (Statement stmt = conn.createStatement()) {
         updateTableId(stmt);
-        updateDbNameForTable(stmt, "TAB_COL_STATS", "TABLE_NAME", fromCat, 
toCat, fromDb, toDb, tableName);
-        updateDbNameForTable(stmt, "PART_COL_STATS", "TABLE_NAME", fromCat, 
toCat, fromDb, toDb, tableName);
         updateDbNameForTable(stmt, "PARTITION_EVENTS", "TBL_NAME", fromCat, 
toCat, fromDb, toDb, tableName);
         updateDbNameForTable(stmt, "NOTIFICATION_LOG", "TBL_NAME", fromCat, 
toCat, fromDb, toDb, tableName);
         conn.commit();
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
index e20d429e25f..ddea652bcf8 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
@@ -37,11 +37,16 @@ public class FindColumnsWithStatsHandler implements 
QueryHandler<List<String>> {
 
   //language=SQL
   private static final String TABLE_SELECT = "SELECT \"COLUMN_NAME\" FROM 
\"TAB_COL_STATS\" " +
-      "WHERE \"DB_NAME\" = :dbName AND \"TABLE_NAME\" = :tableName";
+      "INNER JOIN \"TBLS\" ON \"TAB_COL_STATS\".\"TBL_ID\" = 
\"TBLS\".\"TBL_ID\" " +
+      "INNER JOIN \"DBS\" ON \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " +
+      "WHERE \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = 
:tableName";
   //language=SQL
   private static final String PARTITION_SELECT = "SELECT \"COLUMN_NAME\" FROM 
\"PART_COL_STATS\" " +
-      "WHERE \"DB_NAME\" = :dbName AND \"TABLE_NAME\" = :tableName AND 
\"PARTITION_NAME\" = :partName";
-  
+      "INNER JOIN \"PARTITIONS\" ON \"PART_COL_STATS\".\"PART_ID\" = 
\"PARTITIONS\".\"PART_ID\" " +
+      "INNER JOIN \"TBLS\" ON \"PARTITIONS\".\"TBL_ID\" = \"TBLS\".\"TBL_ID\" 
" +
+      "INNER JOIN \"DBS\" ON \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " +
+      "WHERE \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = :tableName 
AND \"PARTITIONS\".\"PART_NAME\" = :partName";
+
   private final CompactionInfo ci;
 
   public FindColumnsWithStatsHandler(CompactionInfo ci) {
diff --git 
a/standalone-metastore/metastore-server/src/main/resources/package.jdo 
b/standalone-metastore/metastore-server/src/main/resources/package.jdo
index 982703b2783..5e2f686bff8 100644
--- a/standalone-metastore/metastore-server/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo
@@ -939,15 +939,6 @@
         <column name="CS_ID"/>
       </datastore-identity>
 
-      <field name ="catName">
-        <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
-      <field name ="dbName">
-        <column name="DB_NAME" length="128" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
-      <field name="tableName">
-        <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
       <field name="table">
         <column name="TBL_ID"/>
       </field>
@@ -1013,18 +1004,6 @@
         <column name="CS_ID"/>
       </datastore-identity>
 
-      <field name ="catName">
-        <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
-      <field name ="dbName">
-        <column name="DB_NAME" length="128" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
-      <field name="tableName">
-        <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
-      <field name="partitionName">
-        <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR" 
allows-null="false"/>
-      </field>
       <field name="partition">
         <column name="PART_ID"/>
       </field>
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql
 
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql
index 6216a47e9ac..ea1f90aff89 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql
@@ -89,9 +89,6 @@ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, 
"INPUT_FORMAT" VARCHAR(4000),
 CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, 
"NEXT_VAL" BIGINT NOT NULL);
 
 CREATE TABLE "APP"."TAB_COL_STATS"(
-    "CAT_NAME" VARCHAR(256) NOT NULL,
-    "DB_NAME" VARCHAR(128) NOT NULL,
-    "TABLE_NAME" VARCHAR(256) NOT NULL,
     "COLUMN_NAME" VARCHAR(767) NOT NULL,
     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
     "LONG_LOW_VALUE" BIGINT,
@@ -139,10 +136,6 @@ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT 
NULL generated always as
 CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, 
"TOKEN" VARCHAR(767));
 
 CREATE TABLE "APP"."PART_COL_STATS"(
-    "CAT_NAME" VARCHAR(256) NOT NULL,
-    "DB_NAME" VARCHAR(128) NOT NULL,
-    "TABLE_NAME" VARCHAR(256) NOT NULL,
-    "PARTITION_NAME" VARCHAR(767) NOT NULL,
     "COLUMN_NAME" VARCHAR(767) NOT NULL,
     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
     "LONG_LOW_VALUE" BIGINT,
@@ -266,9 +259,9 @@ CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON 
"APP"."DB_PRIVS" ("AUTHORIZER",
 
 CREATE UNIQUE INDEX "APP"."DCPRIVILEGEINDEX" ON "APP"."DC_PRIVS" 
("AUTHORIZER", "NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DC_PRIV", 
"GRANTOR", "GRANTOR_TYPE");
 
-CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" 
("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME","CAT_NAME");
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" 
("PART_ID","COLUMN_NAME");
 
-CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("DB_NAME", 
"TABLE_NAME", "COLUMN_NAME", "CAT_NAME");
+CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("TBL_ID", 
"COLUMN_NAME");
 
 CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", 
"PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", 
"GRANTOR_TYPE");
 
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql
 
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql
index 12028a9b460..fd56b427ae7 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql
@@ -1,2 +1,16 @@
+-- HIVE-27725
+DROP INDEX "APP"."TAB_COL_STATS_IDX";
+CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("TBL_ID", 
"COLUMN_NAME");
+ALTER TABLE "APP"."TAB_COL_STATS" DROP COLUMN "CAT_NAME";
+ALTER TABLE "APP"."TAB_COL_STATS" DROP COLUMN "DB_NAME";
+ALTER TABLE "APP"."TAB_COL_STATS" DROP COLUMN "TABLE_NAME";
+
+DROP INDEX "APP"."PCS_STATS_IDX";
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" 
("PART_ID","COLUMN_NAME");
+ALTER TABLE "APP"."PART_COL_STATS" DROP COLUMN "CAT_NAME";
+ALTER TABLE "APP"."PART_COL_STATS" DROP COLUMN "DB_NAME";
+ALTER TABLE "APP"."PART_COL_STATS" DROP COLUMN "TABLE_NAME";
+ALTER TABLE "APP"."PART_COL_STATS" DROP COLUMN "PARTITION_NAME";
+
 -- This needs to be the last thing done.  Insert any changes above this line.
 UPDATE "APP".VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release 
version 4.1.0' where VER_ID=1;
\ No newline at end of file
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql
index c32de734c6c..c0cfeafc370 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql
@@ -61,7 +61,6 @@ CREATE TABLE PART_COL_STATS
     AVG_COL_LEN float NULL,
     "COLUMN_NAME" nvarchar(767) NOT NULL,
     COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
@@ -76,16 +75,13 @@ CREATE TABLE PART_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     PART_ID bigint NULL,
-    PARTITION_NAME nvarchar(767) NOT NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL,
-    "CAT_NAME" nvarchar(256) NOT NULL,
     "ENGINE" nvarchar(128) NOT NULL,
     HISTOGRAM varbinary(max) NULL
 );
 
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY 
(CS_ID);
 
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS 
(DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME, CAT_NAME);
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID,COLUMN_NAME);
 
 -- Table PART_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
 CREATE TABLE PART_PRIVS
@@ -230,7 +226,6 @@ CREATE TABLE TAB_COL_STATS
     AVG_COL_LEN float NULL,
     "COLUMN_NAME" nvarchar(767) NOT NULL,
     COLUMN_TYPE nvarchar(128) NOT NULL,
-    DB_NAME nvarchar(128) NOT NULL,
     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
     DOUBLE_HIGH_VALUE float NULL,
@@ -245,14 +240,12 @@ CREATE TABLE TAB_COL_STATS
     NUM_NULLS bigint NOT NULL,
     NUM_TRUES bigint NULL,
     TBL_ID bigint NULL,
-    "TABLE_NAME" nvarchar(256) NOT NULL,
-    "CAT_NAME" nvarchar(256) NOT NULL,
     "ENGINE" nvarchar(128) NOT NULL,
     HISTOGRAM varbinary(max) NULL
 );
 
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
-CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, 
COLUMN_NAME, CAT_NAME);
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME);
 
 -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
 CREATE TABLE TYPES
@@ -723,9 +716,6 @@ CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME");
 -- Constraints for table PART_COL_STATS for class(es) 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY 
(PART_ID) REFERENCES PARTITIONS (PART_ID) ;
 
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-
 -- Constraints for table PART_PRIVS for class(es) 
[org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
 ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) 
REFERENCES PARTITIONS (PART_ID) ;
 
@@ -777,9 +767,6 @@ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
 -- Constraints for table TAB_COL_STATS for class(es) 
[org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY 
(TBL_ID) REFERENCES TBLS (TBL_ID) ;
 
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
-
-
 -- Constraints for table TYPES for class(es) 
[org.apache.hadoop.hive.metastore.model.MType]
 CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
 
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql
index 723f5642dd2..9fbf56fa5d7 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql
@@ -1,5 +1,16 @@
 SELECT 'Upgrading MetaStore schema from  4.0.0 to 4.1.0' AS MESSAGE;
 
+-- HIVE-27725
+DROP INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS;
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME);
+DROP INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS;
+ALTER TABLE TAB_COL_STATS DROP COLUMN CAT_NAME, DB_NAME, TABLE_NAME;
+
+DROP INDEX PCS_STATS_IDX ON PART_COL_STATS;
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID,COLUMN_NAME);
+DROP INDEX PART_COL_STATS_N49 on PART_COL_STATS;
+ALTER TABLE PART_COL_STATS DROP COLUMN CAT_NAME, DB_NAME, TABLE_NAME, 
PARTITION_NAME;
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release 
version 4.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE;
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql
index 9f3b8835332..078f81a097a 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql
@@ -677,9 +677,6 @@ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
 --
 CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
  `CS_ID` bigint(20) NOT NULL,
- `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `TBL_ID` bigint(20) NOT NULL,
@@ -703,16 +700,12 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
   CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` 
(`TBL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
-CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, 
COLUMN_NAME, CAT_NAME) USING BTREE;
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME) USING 
BTREE;
 --
 -- Table structure for table `PART_COL_STATS`
 --
 CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
  `CS_ID` bigint(20) NOT NULL,
- `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT 
NULL,
  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
  `PART_ID` bigint(20) NOT NULL,
@@ -736,7 +729,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
   CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES 
`PARTITIONS` (`PART_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS 
(DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME,CAT_NAME) USING BTREE;
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID,COLUMN_NAME) USING BTREE;
 
 --
 -- Table structure for table `TYPES`
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql
index ffa92601d79..5d535289dff 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql
@@ -1,5 +1,14 @@
 SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE;
 
+-- HIVE-27725
+DROP INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS;
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME) USING 
BTREE;
+ALTER TABLE TAB_COL_STATS DROP COLUMN CAT_NAME, DROP COLUMN DB_NAME, DROP 
COLUMN TABLE_NAME;
+
+DROP INDEX PCS_STATS_IDX ON PART_COL_STATS;
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID, COLUMN_NAME) USING 
BTREE;
+ALTER TABLE PART_COL_STATS DROP COLUMN CAT_NAME, DROP COLUMN DB_NAME, DROP 
COLUMN TABLE_NAME, DROP COLUMN PARTITION_NAME;
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release 
version 4.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE;
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql
 
b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql
index 5623e23092d..b878606fe6e 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql
@@ -527,9 +527,6 @@ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY 
(CTLG_NAME) REFERENCES CTLGS
 
 CREATE TABLE TAB_COL_STATS (
  CS_ID NUMBER NOT NULL,
- CAT_NAME VARCHAR2(256) NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
  COLUMN_NAME VARCHAR2(767) NOT NULL,
  COLUMN_TYPE VARCHAR2(128) NOT NULL,
  TBL_ID NUMBER NOT NULL,
@@ -555,9 +552,7 @@ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY 
PRIMARY KEY (CS_ID);
 
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) 
REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
 
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, 
COLUMN_NAME, CAT_NAME);
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME);
 
 CREATE TABLE VERSION (
   VER_ID NUMBER NOT NULL,
@@ -568,10 +563,6 @@ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY 
(VER_ID);
 
 CREATE TABLE PART_COL_STATS (
  CS_ID NUMBER NOT NULL,
- CAT_NAME VARCHAR2(256) NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(256) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
  COLUMN_NAME VARCHAR2(767) NOT NULL,
  COLUMN_TYPE VARCHAR2(128) NOT NULL,
  PART_ID NUMBER NOT NULL,
@@ -597,9 +588,7 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAINT 
PART_COL_STATS_PKEY PRIMARY KEY (CS_ID
 
 ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY 
(PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
 
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS 
(DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME,CAT_NAME);
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID,COLUMN_NAME);
 
 CREATE TABLE FUNCS (
   FUNC_ID NUMBER NOT NULL,
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql
 
b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql
index e4c9f9a9343..fbc3de39e28 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql
@@ -1,5 +1,16 @@
 SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0' AS Status from dual;
 
+-- HIVE-27725
+DROP INDEX TAB_COL_STATS_IDX;
+CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (TBL_ID, COLUMN_NAME);
+DROP INDEX TAB_COL_STATS_N49;
+ALTER TABLE TAB_COL_STATS DROP (CAT_NAME, DB_NAME, TABLE_NAME);
+
+DROP INDEX PCS_STATS_IDX;
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (PART_ID, COLUMN_NAME);
+DROP INDEX PART_COL_STATS_N49;
+ALTER TABLE PART_COL_STATS DROP (CAT_NAME, DB_NAME, TABLE_NAME, 
PARTITION_NAME);
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release 
version 4.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS Status 
from dual;
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql
 
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql
index 7abb5e78777..2b3a4cd17db 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql
@@ -531,9 +531,6 @@ CREATE TABLE  "DELEGATION_TOKENS"
 
 CREATE TABLE "TAB_COL_STATS" (
  "CS_ID" bigint NOT NULL,
- "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
  "TBL_ID" bigint NOT NULL,
@@ -570,10 +567,6 @@ CREATE TABLE "VERSION" (
 
 CREATE TABLE "PART_COL_STATS" (
  "CS_ID" bigint NOT NULL,
- "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
  "PART_ID" bigint NOT NULL,
@@ -1188,7 +1181,7 @@ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree 
("PART_ID");
 -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
 --
 
-CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree 
("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME","CAT_NAME");
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree 
("PART_ID","COLUMN_NAME");
 
 
 --
@@ -1276,7 +1269,7 @@ CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING 
btree ("TBL_ID");
 -- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
 --
 
-CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree 
("DB_NAME","TABLE_NAME","COLUMN_NAME","CAT_NAME");
+CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree 
("TBL_ID","COLUMN_NAME");
 
 --
 -- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
@@ -1291,18 +1284,6 @@ CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree 
("TBL_ID");
 
 CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
 
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
 --
 -- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; 
Tablespace:
 --
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql
 
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql
index 13d7dd5b107..cae50ce4100 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql
@@ -1,5 +1,16 @@
 SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0';
 
+-- HIVE-27725
+DROP INDEX "TAB_COL_STATS_IDX";
+CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree 
("TBL_ID","COLUMN_NAME");
+DROP INDEX "TAB_COL_STATS_N49";
+ALTER TABLE "TAB_COL_STATS" DROP COLUMN "CAT_NAME", DROP COLUMN "DB_NAME", 
DROP COLUMN "TABLE_NAME";
+
+DROP INDEX "PCS_STATS_IDX";
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree 
("PART_ID","COLUMN_NAME");
+DROP INDEX "PART_COL_STATS_N49";
+ALTER TABLE "PART_COL_STATS" DROP COLUMN "CAT_NAME", DROP COLUMN "DB_NAME", 
DROP COLUMN "TABLE_NAME", DROP COLUMN "PARTITION_NAME";
+
 -- These lines need to be last. Insert any changes above.
 UPDATE "VERSION" SET "SCHEMA_VERSION"='4.1.0', "VERSION_COMMENT"='Hive release 
version 4.1.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0';
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 1da814dd092..40674b9691f 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.AddPackageRequest;
 import org.apache.hadoop.hive.metastore.api.DropPackageRequest;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -646,14 +647,14 @@ public class TestObjectStore {
     createPartitionedTable(false, false);
     // query the partitions with JDO
     List<Partition> partitions;
-    try(AutoCloseable c =deadline()) {
+    try(AutoCloseable c = deadline()) {
       partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           false, true, new 
GetPartitionsArgs.GetPartitionsArgsBuilder().max(10).build());
     }
     Assert.assertEquals(3, partitions.size());
 
     // drop partitions with directSql
-    try(AutoCloseable c =deadline()) {
+    try(AutoCloseable c = deadline()) {
       objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
           Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
     }
@@ -780,6 +781,49 @@ public class TestObjectStore {
     checkBackendTableSize("COLUMNS_V2", 5);
   }
 
+  @Test
+  public void testTableStatisticsOps() throws Exception {
+    createPartitionedTable(true, true);
+
+    List<ColumnStatistics> tabColStats;
+    try (AutoCloseable c = deadline()) {
+      tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
+          Arrays.asList("test_col1", "test_col2"));
+    }
+    Assert.assertEquals(0, tabColStats.size());
+
+    ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, DB1, 
TABLE1);
+    ColumnStatisticsObj statsObj1 = new ColumnStatisticsObj("test_col1", "int",
+        new ColumnStatisticsData(ColumnStatisticsData._Fields.DECIMAL_STATS, 
new DecimalColumnStatsData(100, 1000)));
+    ColumnStatisticsObj statsObj2 = new ColumnStatisticsObj("test_col2", "int",
+        new ColumnStatisticsData(ColumnStatisticsData._Fields.DECIMAL_STATS, 
new DecimalColumnStatsData(200, 2000)));
+    ColumnStatistics colStats = new ColumnStatistics(statsDesc, 
Arrays.asList(statsObj1, statsObj2));
+    colStats.setEngine(ENGINE);
+    objectStore.updateTableColumnStatistics(colStats, null, 0);
+
+    try (AutoCloseable c = deadline()) {
+      tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
+          Arrays.asList("test_col1", "test_col2"));
+    }
+    Assert.assertEquals(1, tabColStats.size());
+    Assert.assertEquals(2, tabColStats.get(0).getStatsObjSize());
+
+    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"test_col1", ENGINE);
+    try (AutoCloseable c = deadline()) {
+      tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
+          Arrays.asList("test_col1", "test_col2"));
+    }
+    Assert.assertEquals(1, tabColStats.size());
+    Assert.assertEquals(1, tabColStats.get(0).getStatsObjSize());
+
+    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"test_col2", ENGINE);
+    try (AutoCloseable c = deadline()) {
+      tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
+          Arrays.asList("test_col1", "test_col2"));
+    }
+    Assert.assertEquals(0, tabColStats.size());
+  }
+
   @Test
   public void testGetPartitionStatistics() throws Exception {
     createPartitionedTable(true, true);
diff --git 
a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java
 
b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java
index 93556e7f0fa..5e3cb133953 100644
--- 
a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java
+++ 
b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java
@@ -59,6 +59,7 @@ import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetP
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitions;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionsByFilter;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionsByName;
+import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionsStat;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetTable;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkListAllTables;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkListDatabases;
@@ -69,6 +70,7 @@ import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkOpen
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkPartitionManagement;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkRenameTable;
 import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkTableCreate;
+import static 
org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkUpdatePartitionsStat;
 import static org.apache.hadoop.hive.metastore.tools.Util.getServerUri;
 import static picocli.CommandLine.Command;
 import static picocli.CommandLine.Option;
@@ -287,6 +289,10 @@ public class BenchmarkTool implements Runnable {
             () -> benchmarkGetPartitionsByName(bench, bData, 1))
         .add("getPartitionsByFilter",
             () -> benchmarkGetPartitionsByFilter(bench, bData, 1))
+        .add("getPartitionsStat",
+            () -> benchmarkGetPartitionsStat(bench, bData, 1))
+        .add("updatePartitionsStat",
+            () -> benchmarkUpdatePartitionsStat(bench, bData, 1))
         .add("renameTable",
             () -> benchmarkRenameTable(bench, bData, 1))
         .add("dropDatabase",
@@ -313,6 +319,10 @@ public class BenchmarkTool implements Runnable {
               () -> benchmarkGetPartitionsByName(bench, bData, howMany))
           .add("getPartitionsByFilter" + '.' + howMany,
               () -> benchmarkGetPartitionsByFilter(bench, bData, howMany))
+          .add("getPartitionsStat" + '.' + howMany,
+              () -> benchmarkGetPartitionsStat(bench, bData, howMany))
+          .add("updatePartitionsStat" + '.' + howMany,
+              () -> benchmarkUpdatePartitionsStat(bench, bData, howMany))
           .add("addPartitions" + '.' + howMany,
               () -> benchmarkCreatePartitions(bench, bData, howMany))
           .add("dropPartitions" + '.' + howMany,
diff --git 
a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java
 
b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java
index fdab0717835..f3f4c74662e 100644
--- 
a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java
+++ 
b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.hive.metastore.PartitionManagementTask;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.thrift.TException;
 import org.jetbrains.annotations.NotNull;
@@ -48,6 +49,7 @@ import static 
org.apache.hadoop.hive.metastore.tools.Util.addManyPartitionsNoExc
 import static org.apache.hadoop.hive.metastore.tools.Util.createManyPartitions;
 import static org.apache.hadoop.hive.metastore.tools.Util.createSchema;
 import static 
org.apache.hadoop.hive.metastore.tools.Util.throwingSupplierWrapper;
+import static 
org.apache.hadoop.hive.metastore.tools.Util.updateManyPartitionsStatsNoException;
 
 /**
  * Actual benchmark code.
@@ -431,6 +433,52 @@ final class HMSBenchmarks {
     }
   }
 
+  static DescriptiveStatistics benchmarkGetPartitionsStat(@NotNull 
MicroBenchmark bench,
+                                                          @NotNull BenchData 
data,
+                                                          int count) {
+    final HMSClient client = data.getClient();
+    String dbName = data.dbName;
+    String tableName = data.tableName;
+
+    BenchmarkUtils.createPartitionedTable(client, dbName, tableName);
+    try {
+      addManyPartitionsNoException(client, dbName, tableName, null,
+              Collections.singletonList("d"), count);
+      List<String> partNames = throwingSupplierWrapper(() ->
+              client.getPartitionNames(dbName, tableName));
+      updateManyPartitionsStatsNoException(client, dbName, tableName, 
partNames);
+      PartitionsStatsRequest request = new PartitionsStatsRequest(
+              dbName, tableName, Arrays.asList("name"), partNames);
+      return bench.measure(
+          () ->
+              throwingSupplierWrapper(() -> client.getPartitionsStats(request))
+      );
+    } finally {
+      throwingSupplierWrapper(() -> client.dropTable(dbName, tableName));
+    }
+  }
+
+  static DescriptiveStatistics benchmarkUpdatePartitionsStat(@NotNull 
MicroBenchmark bench,
+                                                             @NotNull 
BenchData data,
+                                                             int count) {
+    final HMSClient client = data.getClient();
+    String dbName = data.dbName;
+    String tableName = data.tableName;
+
+    BenchmarkUtils.createPartitionedTable(client, dbName, tableName);
+    try {
+      addManyPartitionsNoException(client, dbName, tableName, null,
+              Collections.singletonList("d"), count);
+      List<String> partNames = throwingSupplierWrapper(() ->
+              client.getPartitionNames(dbName, tableName));
+      return bench.measure(
+              () -> updateManyPartitionsStatsNoException(client, dbName, 
tableName, partNames)
+      );
+    } finally {
+      throwingSupplierWrapper(() -> client.dropTable(dbName, tableName));
+    }
+  }
+
   static DescriptiveStatistics benchmarkRenameTable(@NotNull MicroBenchmark 
bench,
                                                     @NotNull BenchData data,
                                                     int count) {
diff --git 
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
 
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
index 61580aa7b22..8b94c423ce2 100644
--- 
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
+++ 
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
 import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
@@ -35,6 +36,8 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsResult;
 import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsRequest;
@@ -276,6 +279,10 @@ final class HMSClient implements AutoCloseable {
     client.add_partitions(partitions);
   }
 
+  void updatePartitionColumnStats(ColumnStatistics colStats) throws TException 
{
+    client.update_partition_column_statistics(colStats);
+  }
+
 
   List<Partition> listPartitions(@NotNull String dbName,
                                  @NotNull String tableName) throws TException {
@@ -334,6 +341,10 @@ final class HMSClient implements AutoCloseable {
     return client.get_partitions_by_filter(dbName, tableName, filter, (short) 
-1);
   }
 
+  PartitionsStatsResult getPartitionsStats(PartitionsStatsRequest request) 
throws TException {
+    return client.get_partitions_statistics_req(request);
+  }
+
   boolean alterTable(@NotNull String dbName, @NotNull String tableName, 
@NotNull Table newTable)
       throws TException {
     client.alter_table(dbName, tableName, newTable);
diff --git 
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java
 
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java
index a3e4a41cb5c..264bfae16b1 100644
--- 
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java
+++ 
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.hive.metastore.tools;
 import com.google.common.base.Joiner;
 import com.google.common.net.HostAndPort;
 import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -31,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.thrift.TException;
 import org.jetbrains.annotations.NotNull;
@@ -611,6 +616,24 @@ public final class Util {
     return null;
   }
 
+  static Object updateManyPartitionsStats(@NotNull HMSClient client,
+                                         @NotNull String dbName,
+                                         @NotNull String tableName,
+                                         @NotNull List<String> partNames) 
throws TException {
+    List<ColumnStatisticsObj> statsObj = new ArrayList<>();
+    ColumnStatisticsData statsData = new ColumnStatisticsData(
+        ColumnStatisticsData._Fields.STRING_STATS, new 
StringColumnStatsData(100, 10.1, 20, 30));
+    statsObj.add(new ColumnStatisticsObj("id", "int", statsData));
+    ColumnStatisticsDesc partDesc = new ColumnStatisticsDesc(false, dbName, 
tableName);
+    for (String partName : partNames) {
+      partDesc.setPartName(partName);
+      ColumnStatistics partColStat = new ColumnStatistics(partDesc, statsObj);
+      partColStat.setEngine("hive");
+      client.updatePartitionColumnStats(partColStat);
+    }
+    return null;
+  }
+
   static List<String> generatePartitionNames(@NotNull String prefix, int 
npartitions) {
     return IntStream.range(0, npartitions).mapToObj(i -> prefix + 
i).collect(Collectors.toList());
   }
@@ -625,6 +648,14 @@ public final class Util {
             addManyPartitions(client, dbName, tableName, parameters, 
arguments, npartitions));
   }
 
+  static void updateManyPartitionsStatsNoException(@NotNull HMSClient client,
+                                                   @NotNull String dbName,
+                                                   @NotNull String tableName,
+                                                   @NotNull List<String> 
partNames) {
+    throwingSupplierWrapper(() ->
+            updateManyPartitionsStats(client, dbName, tableName, partNames));
+  }
+
   /**
    * Filter candidates - find all that match positive matches and do not match
    * any negative matches.

Reply via email to