This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 05201478f80 HIVE-28219: Support drop partitions by names in
IMetaStoreClient (#5223)
05201478f80 is described below
commit 05201478f804904ccf098583b0d5159a3264eeb3
Author: Wechar Yu <[email protected]>
AuthorDate: Thu Aug 28 16:17:44 2025 +0800
HIVE-28219: Support drop partitions by names in IMetaStoreClient (#5223)
---
.../iceberg/mr/hive/HiveIcebergMetaHook.java | 19 ++++++++
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 23 ++++++++--
.../ql/metadata/SessionHiveMetaStoreClient.java | 52 ++++++++++++++--------
.../apache/hadoop/hive/ql/metadata/TestHive.java | 42 +++++++++++++++++
.../hadoop/hive/metastore/IMetaStoreClient.java | 33 ++++++++++++++
.../hive/metastore/client/BaseMetaStoreClient.java | 20 +++++++++
.../client/HookEnabledMetaStoreClient.java | 12 ++---
.../metastore/client/MetaStoreClientWrapper.java | 8 ++--
.../client/ThriftHiveMetaStoreClient.java | 31 ++++---------
.../apache/hadoop/hive/metastore/HiveMetaHook.java | 16 +++++++
10 files changed, 203 insertions(+), 53 deletions(-)
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index 50224923400..a7389f7f20f 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -45,9 +45,11 @@
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -1217,6 +1219,23 @@ public void
preDropPartitions(org.apache.hadoop.hive.metastore.api.Table hmsTabl
} catch (IOException e) {
throw new MetaException(String.format("Error while fetching the
partitions due to: %s", e));
}
+ }
+
+ @Override
+ public void preDropPartitions(org.apache.hadoop.hive.metastore.api.Table
hmsTable,
+ EnvironmentContext context,
+ RequestPartsSpec partsSpec) throws
MetaException {
+ if (partsSpec.isSetExprs()) {
+ List<DropPartitionsExpr> exprs = partsSpec.getExprs();
+ List<org.apache.commons.lang3.tuple.Pair<Integer, byte[]>> partExprs =
Lists.newArrayList();
+ for (DropPartitionsExpr expr : exprs) {
+ partExprs.add(
+ org.apache.commons.lang3.tuple.Pair.of(expr.getPartArchiveLevel(),
expr.getExpr()));
+ }
+ preDropPartitions(hmsTable, context, partExprs);
+ } else if (partsSpec.isSetNames()) {
+ preTruncateTable(hmsTable, context, partsSpec.getNames());
+ }
context.putToProperties(ThriftHiveMetaStoreClient.SKIP_DROP_PARTITION,
"true");
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index eec284a9327..82964ddb3c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -87,6 +87,7 @@
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.DropDatabaseRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.FireEventRequest;
@@ -126,6 +127,7 @@
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
import org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints;
@@ -4065,8 +4067,23 @@ public boolean dropPartition(String dbName, String
tableName, List<String> parti
public List<Partition> dropPartitions(String dbName, String tableName,
List<Pair<Integer, byte[]>> partitionExpressions,
PartitionDropOptions dropOptions) throws HiveException {
+ RequestPartsSpec rps = new RequestPartsSpec();
+ List<DropPartitionsExpr> exprs = new
ArrayList<>(partitionExpressions.size());
+
+ for (Pair<Integer, byte[]> partExpr : partitionExpressions) {
+ DropPartitionsExpr dpe = new DropPartitionsExpr();
+ dpe.setExpr(partExpr.getRight());
+ dpe.setPartArchiveLevel(partExpr.getLeft());
+ exprs.add(dpe);
+ }
+ rps.setExprs(exprs);
+ return dropPartitions(new TableName(getDefaultCatalog(conf), dbName,
tableName), rps, dropOptions);
+ }
+
+ public List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions dropOptions) throws
HiveException {
try {
- Table table = getTable(dbName, tableName);
+ Table table = getTable(tableName.getDb(), tableName.getTable());
if (!dropOptions.deleteData) {
AcidUtils.TableSnapshot snapshot = AcidUtils.getTableSnapshot(conf,
table, true);
if (snapshot != null) {
@@ -4076,8 +4093,8 @@ public List<Partition> dropPartitions(String dbName,
String tableName,
.map(ss -> ss.getTxnMgr().getCurrentTxnId()).orElse(0L);
dropOptions.setTxnId(txnId);
}
- List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
getMSC().dropPartitions(dbName, tableName,
- partitionExpressions, dropOptions);
+ List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
getMSC().dropPartitions(
+ tableName, partsSpec, dropOptions, null);
return convertFromMetastore(table, partitions);
} catch (NoSuchObjectException e) {
throw new HiveException("Partition or table doesn't exist.", e);
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index fee1897cca7..5a897cfd24b 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DeleteColumnStatisticsRequest;
+import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
@@ -82,6 +83,7 @@
import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
import org.apache.hadoop.hive.metastore.api.PrimaryKeysResponse;
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
@@ -92,6 +94,7 @@
import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse;
+import org.apache.hadoop.hive.metastore.cache.CachedStore;
import org.apache.hadoop.hive.metastore.client.MetaStoreClientWrapper;
import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
@@ -1559,28 +1562,39 @@ public boolean dropPartition(String catName, String
dbName, String tableName, St
}
@Override
- public List<Partition> dropPartitions(String catName, String dbName, String
tblName,
- List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
+ public List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions options,
EnvironmentContext context)
throws TException {
- if (isDefaultCatalog(catName)) {
- Table table = getTempTable(dbName, tblName);
+ if (isDefaultCatalog(tableName.getCat())) {
+ Table table = getTempTable(tableName.getDb(), tableName.getTable());
if (table != null) {
TempTable tt = getPartitionedTempTable(table);
+ List<List<String>> partValues = new ArrayList<>();
+ if (partsSpec.isSetExprs()) {
+ List<DropPartitionsExpr> exprs = partsSpec.getExprs();
+ for (DropPartitionsExpr expr : exprs) {
+ String filter = generateJDOFilter(table, expr.getExpr(),
+ conf.get(HiveConf.ConfVars.DEFAULT_PARTITION_NAME.varname));
+ List<Partition> partitions = tt.listPartitionsByFilter(filter);
+ for (Partition p : partitions) {
+ partValues.add(p.getValues());
+ }
+ }
+ } else if (partsSpec.isSetNames()) {
+ List<String> partNames = partsSpec.getNames();
+ for (String partName : partNames) {
+ partValues.add(CachedStore.partNameToVals(partName));
+ }
+ }
+ boolean purgeData = options != null ? options.purgeData : true;
+ boolean deleteData = options != null ? options.deleteData : true;
List<Partition> result = new ArrayList<>();
- for (Pair<Integer, byte[]> pair : partExprs) {
- byte[] expr = pair.getRight();
- String filter = generateJDOFilter(table, expr,
- conf.get(HiveConf.ConfVars.DEFAULT_PARTITION_NAME.varname));
- List<Partition> partitions = tt.listPartitionsByFilter(filter);
- for (Partition p : partitions) {
- Partition droppedPartition = tt.dropPartition(p.getValues());
- if (droppedPartition != null) {
- result.add(droppedPartition);
- boolean purgeData = options != null ? options.purgeData : true;
- boolean deleteData = options != null ? options.deleteData : true;
- if (deleteData && !tt.isExternal()) {
- deletePartitionLocation(droppedPartition, purgeData);
- }
+ for (List<String> partValue : partValues) {
+ Partition droppedPartition = tt.dropPartition(partValue);
+ if (droppedPartition != null) {
+ result.add(droppedPartition);
+ if (deleteData && !tt.isExternal()) {
+ deletePartitionLocation(droppedPartition, purgeData);
}
}
}
@@ -1588,7 +1602,7 @@ public List<Partition> dropPartitions(String catName,
String dbName, String tblN
}
}
- return delegate.dropPartitions(catName, dbName, tblName, partExprs,
options, context);
+ return delegate.dropPartitions(tableName, partsSpec, options, context);
}
@Override
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index f722520d590..0fb4b06611e 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -48,6 +49,7 @@
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
@@ -745,6 +747,46 @@ public void testDropPartitionsWithPurge() throws Exception
{
}
}
+ @Test
+ public void testDropPartitionsByNames() throws Throwable {
+ String catName = Warehouse.DEFAULT_CATALOG_NAME;
+ String dbName = Warehouse.DEFAULT_DATABASE_NAME;
+ String tblName = "table_for_testDropPartitionsByNames";
+ TableName tableName = new TableName(catName, dbName, tblName);
+
+ Table table = createPartitionedTable(dbName, tblName);
+ for (int i = 10; i <= 12; i++) {
+ Map<String, String> partitionSpec = new ImmutableMap.Builder<String,
String>()
+ .put("ds", "20231129")
+ .put("hr", String.valueOf(i))
+ .build();
+ hm.createPartition(table, partitionSpec);
+ }
+
+ List<Partition> partitions = hm.getPartitions(table);
+ assertEquals(3, partitions.size());
+
+ RequestPartsSpec partsSpec = new RequestPartsSpec();
+ partsSpec.setNames(Arrays.asList("ds=20231129/hr=10"));
+ hm.dropPartitions(tableName, partsSpec, PartitionDropOptions.instance());
+ assertEquals(2, hm.getPartitions(table).size());
+
+ try {
+ // drop missing partition name
+ partsSpec.setNames(Arrays.asList("ds=20231129/hr=10",
"ds=20231129/hr=11"));
+ hm.dropPartitions(tableName, partsSpec, PartitionDropOptions.instance());
+ fail("Expected exception");
+ } catch (HiveException e) {
+ // expected
+ assertEquals("Some partitions to drop are missing",
e.getCause().getMessage());
+ assertEquals(2, hm.getPartitions(table).size());
+ }
+
+ partsSpec.setNames(Arrays.asList("ds=20231129/hr=12",
"ds=20231129/hr=11"));
+ hm.dropPartitions(tableName, partsSpec, PartitionDropOptions.instance());
+ assertEquals(0, hm.getPartitions(table).size());
+ }
+
@Test
public void testDropMissingPartitionsByFilter() throws Throwable {
String dbName = Warehouse.DEFAULT_DATABASE_NAME;
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 0b668c155a6..793cfffceda 100644
---
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -1980,6 +1980,8 @@ boolean dropPartition(String catName, String db_name,
String tbl_name, List<Stri
/**
* Drop partitions based on an expression.
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
* @param dbName database name.
* @param tblName table name.
* @param partExprs I don't understand this fully, so can't completely
explain it. The second
@@ -1994,12 +1996,15 @@ boolean dropPartition(String catName, String db_name,
String tbl_name, List<Stri
* @throws MetaException error access the RDBMS or storage.
* @throws TException Thrift transport error.
*/
+ @Deprecated
List<Partition> dropPartitions(String dbName, String tblName,
List<Pair<Integer, byte[]>> partExprs,
boolean deleteData,
boolean ifExists) throws
NoSuchObjectException, MetaException, TException;
/**
* Drop partitions based on an expression.
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
* @param catName catalog name.
* @param dbName database name.
* @param tblName table name.
@@ -2015,6 +2020,7 @@ List<Partition> dropPartitions(String dbName, String
tblName,
* @throws MetaException error access the RDBMS or storage.
* @throws TException Thrift transport error.
*/
+ @Deprecated
default List<Partition> dropPartitions(String catName, String dbName, String
tblName,
List<Pair<Integer, byte[]>> partExprs,
boolean deleteData, boolean ifExists)
@@ -2027,6 +2033,8 @@ default List<Partition> dropPartitions(String catName,
String dbName, String tbl
/**
* Drop partitions based on an expression.
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
* @param catName catalog name.
* @param dbName database name.
* @param tblName table name.
@@ -2044,6 +2052,7 @@ default List<Partition> dropPartitions(String catName,
String dbName, String tbl
* @throws MetaException error access the RDBMS or storage.
* @throws TException Thrift transport error.
*/
+ @Deprecated
default List<Partition> dropPartitions(String catName, String dbName, String
tblName,
List<Pair<Integer, byte[]>>
partExprs, boolean deleteData,
boolean ifExists, boolean needResults)
@@ -2057,6 +2066,8 @@ default List<Partition> dropPartitions(String catName,
String dbName, String tbl
/**
* Generalization of dropPartitions(),
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
* @param dbName Name of the database
* @param tblName Name of the table
* @param partExprs Partition-specification
@@ -2066,6 +2077,7 @@ default List<Partition> dropPartitions(String catName,
String dbName, String tbl
* @throws MetaException error access the RDBMS or storage.
* @throws TException On failure
*/
+ @Deprecated
List<Partition> dropPartitions(String dbName, String tblName,
List<Pair<Integer, byte[]>> partExprs,
PartitionDropOptions options)
@@ -2073,6 +2085,8 @@ List<Partition> dropPartitions(String dbName, String
tblName,
/**
* Generalization of dropPartitions(),
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
* @param catName catalog name
* @param dbName Name of the database
* @param tblName Name of the table
@@ -2083,15 +2097,34 @@ List<Partition> dropPartitions(String dbName, String
tblName,
* @throws MetaException error access the RDBMS or storage.
* @throws TException On failure
*/
+ @Deprecated
List<Partition> dropPartitions(String catName, String dbName, String tblName,
List<Pair<Integer, byte[]>> partExprs,
PartitionDropOptions options)
throws NoSuchObjectException, MetaException, TException;
+ /**
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #dropPartitions(TableName, RequestPartsSpec,
PartitionDropOptions, EnvironmentContext)} instead.
+ */
+ @Deprecated
List<Partition> dropPartitions(String catName, String dbName, String tblName,
List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
throws NoSuchObjectException, MetaException, TException;
+ /**
+ * Drop partitions based on the request partitions specification.
+ * @param tableName Name of the table.
+ * @param partsSpec Specification of the partitions to drop.
+ * @param options Options for dropping partitions.
+ * @param context Environment context for the operation.
+ * @return List of Partitions dropped.
+ * @throws TException thrift transport error.
+ */
+ List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions options,
EnvironmentContext context)
+ throws TException;
+
/**
* Drop a partition.
* @param db_name database name.
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
index 5dd1703b0e8..8a624735a14 100644
---
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
@@ -33,7 +33,10 @@
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.thrift.TException;
+import com.google.common.collect.Lists;
+
import java.nio.ByteBuffer;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
@@ -557,6 +560,23 @@ public final List<Partition> dropPartitions(String
catName, String dbName, Strin
return dropPartitions(catName, dbName, tblName, partExprs, options, null);
}
+ @Override
+ public List<Partition> dropPartitions(String catName, String dbName, String
tblName,
+ List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
+ throws NoSuchObjectException, MetaException, TException {
+ RequestPartsSpec rps = new RequestPartsSpec();
+ List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+
+ for (Pair<Integer, byte[]> partExpr : partExprs) {
+ DropPartitionsExpr dpe = new DropPartitionsExpr();
+ dpe.setExpr(partExpr.getRight());
+ dpe.setPartArchiveLevel(partExpr.getLeft());
+ exprs.add(dpe);
+ }
+ rps.setExprs(exprs);
+ return dropPartitions(new TableName(catName, dbName, tblName), rps,
options, context);
+ }
+
@Override
public final boolean dropPartition(String db_name, String tbl_name, String
name, boolean deleteData)
throws NoSuchObjectException, MetaException, TException {
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
index d8f4174249e..0c4dc3061ca 100644
---
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
@@ -20,6 +20,7 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
@@ -35,6 +36,7 @@
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -311,18 +313,18 @@ private void dropDatabaseCascadePerDb(DropDatabaseRequest
req, List<String> tabl
}
@Override
- public List<Partition> dropPartitions(String catName, String dbName, String
tblName,
- List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
+ public List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions options,
EnvironmentContext context)
throws TException {
- Table table = delegate.getTable(catName, dbName, tblName);
+ Table table = delegate.getTable(tableName.getCat(), tableName.getDb(),
tableName.getTable());
HiveMetaHook hook = getHook(table);
if (hook != null) {
if (context == null) {
context = new EnvironmentContext();
}
- hook.preDropPartitions(table, context, partExprs);
+ hook.preDropPartitions(table, context, partsSpec);
}
- return delegate.dropPartitions(catName, dbName, tblName, partExprs,
options, context);
+ return delegate.dropPartitions(tableName, partsSpec, options, context);
}
@Override
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
index a200ee4a14b..4d63e4f8812 100644
---
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hive.metastore.client;
-import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -504,10 +504,10 @@ public boolean dropPartition(String catName, String
db_name, String tbl_name, Li
}
@Override
- public List<Partition> dropPartitions(String catName, String dbName, String
tblName,
- List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
+ public List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions options,
EnvironmentContext context)
throws NoSuchObjectException, MetaException, TException {
- return delegate.dropPartitions(catName, dbName, tblName, partExprs,
options, context);
+ return delegate.dropPartitions(tableName, partsSpec, options, context);
}
@Override
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
index a2aafd0f3da..24a50145364 100644
---
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl;
@@ -1583,44 +1584,30 @@ public boolean dropPartition(String catName, String
db_name, String tbl_name,
}
@Override
- public List<Partition> dropPartitions(String catName, String dbName, String
tblName,
- List<Pair<Integer, byte[]>> partExprs, PartitionDropOptions options,
EnvironmentContext context)
+ public List<Partition> dropPartitions(TableName tableName,
+ RequestPartsSpec partsSpec, PartitionDropOptions options,
EnvironmentContext context)
throws NoSuchObjectException, MetaException, TException {
+ DropPartitionsRequest req = new DropPartitionsRequest(tableName.getDb(),
tableName.getTable(), partsSpec);
+ req.setCatName(tableName.getCat());
+ req.setDeleteData(options.deleteData);
+ req.setNeedResult(options.returnResults);
+ req.setIfExists(options.ifExists);
+
if (context == null) {
context = new EnvironmentContext();
}
-
if (context.getProperties() != null &&
Boolean.parseBoolean(context.getProperties().get(SKIP_DROP_PARTITION))) {
return Lists.newArrayList();
}
-
- RequestPartsSpec rps = new RequestPartsSpec();
- List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
-
- for (Pair<Integer, byte[]> partExpr : partExprs) {
- DropPartitionsExpr dpe = new DropPartitionsExpr();
- dpe.setExpr(partExpr.getRight());
- dpe.setPartArchiveLevel(partExpr.getLeft());
- exprs.add(dpe);
- }
- rps.setExprs(exprs);
- DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName,
rps);
- req.setCatName(catName);
- req.setDeleteData(options.deleteData);
- req.setNeedResult(options.returnResults);
- req.setIfExists(options.ifExists);
-
if (options.purgeData) {
LOG.info("Dropped partitions will be purged!");
context.putToProperties("ifPurge", "true");
}
if (options.writeId != null) {
- context = Optional.ofNullable(context).orElse(new EnvironmentContext());
context.putToProperties(hive_metastoreConstants.WRITE_ID,
options.writeId.toString());
}
if (options.txnId != null) {
- context = Optional.ofNullable(context).orElse(new EnvironmentContext());
context.putToProperties(hive_metastoreConstants.TXN_ID,
options.txnId.toString());
}
req.setEnvironmentContext(context);
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
index 11dfa1120c8..2569335061e 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Table;
import com.google.common.collect.ImmutableList;
@@ -217,13 +218,28 @@ default void postGetTable(Table table) {
/**
* Called before dropping the partitions from the table in the metastore
during ALTER TABLE DROP PARTITION.
+ * @deprecated since 4.1.0, will be removed in 5.0.0
+ * use {@link #preDropPartitions(Table, EnvironmentContext,
RequestPartsSpec)} instead.
* @param table table whose partition needs to be dropped
* @param context context of the operation
* @param partExprs List of partition expressions
* @throws MetaException
*/
+ @Deprecated
default void preDropPartitions(Table table,
EnvironmentContext context, List<Pair<Integer, byte[]>> partExprs)
throws MetaException {
// Do nothing
}
+
+ /**
+ * Called before dropping the partitions from the table in the metastore
during ALTER TABLE DROP PARTITION.
+ * @param table table whose partition needs to be dropped
+ * @param context context of the operation
+ * @param partsSpec request partition specification
+ * @throws MetaException
+ */
+ default void preDropPartitions(Table table,
+ EnvironmentContext context, RequestPartsSpec partsSpec) throws
MetaException {
+ // Do nothing
+ }
}