SourabhBadhya commented on code in PR #4785:
URL: https://github.com/apache/hive/pull/4785#discussion_r1379862563


##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java:
##########
@@ -997,6 +1011,62 @@ public void 
postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) {
     }
   }
 
+  @Override
+  public void preDropPartition(org.apache.hadoop.hive.metastore.api.Table 
hmsTable,
+      EnvironmentContext context,
+      List<org.apache.commons.lang3.tuple.Pair<Integer, byte[]>> partExprs)
+      throws MetaException {
+    Table icebergTbl = IcebergTableUtil.getTable(conf, hmsTable);
+    DeleteFiles deleteFiles = icebergTbl.newDelete();
+    List<Expression> expressions = partExprs.stream().map(partExpr -> {
+      ExprNodeDesc exprNodeDesc = SerializationUtilities
+          .deserializeObjectWithTypeInformation(partExpr.getRight(), true);
+      SearchArgument sarg = ConvertAstToSearchArg.create(conf, 
(ExprNodeGenericFuncDesc) exprNodeDesc);
+      return HiveIcebergFilterFactory.generateFilterExpression(sarg);
+    }).collect(Collectors.toList());
+    PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils
+        .createMetadataTableInstance(icebergTbl, MetadataTableType.PARTITIONS);
+    List<PartitionData> partitionList = Lists.newArrayList();
+    try (CloseableIterable<FileScanTask> fileScanTasks = 
partitionsTable.newScan().planFiles()) {
+      fileScanTasks.forEach(task -> {
+        
partitionList.addAll(Sets.newHashSet(CloseableIterable.transform(task.asDataTask().rows(),
 row -> {
+          StructProjection data = row.get(0, StructProjection.class);
+          PartitionSpec pSpec = icebergTbl.spec();
+          PartitionData partitionData = new 
PartitionData(pSpec.partitionType());
+          for (int index = 0; index < pSpec.fields().size(); index++) {
+            partitionData.set(index, data.get(index, Object.class));
+          }
+          return partitionData;
+        })));
+      });
+
+      Set<PartitionData> refinedPartList = Sets.newHashSet();
+      for (Expression expr : expressions) {
+        refinedPartList.addAll(partitionList.stream().filter(partitionData -> {
+          ResidualEvaluator resEval = ResidualEvaluator.of(icebergTbl.spec(), 
expr, false);
+          return 
resEval.residualFor(partitionData).isEquivalentTo(Expressions.alwaysTrue());
+        }).collect(Collectors.toList()));
+      }
+
+      Expression partitionSetFilter = Expressions.alwaysFalse();
+      for (PartitionData partitionData : refinedPartList) {
+        Expression partFilter = Expressions.alwaysTrue();
+        for (int i = 0; i < icebergTbl.spec().fields().size(); i += 1) {
+          PartitionField field = icebergTbl.spec().fields().get(i);
+          partFilter = Expressions.and(
+              partFilter, Expressions.equal(field.name(), partitionData.get(i, 
Object.class)));
+        }
+        partitionSetFilter = Expressions.or(partitionSetFilter, partFilter);
+      }
+
+      deleteFiles.deleteFromRowFilter(partitionSetFilter);
+      deleteFiles.commit();
+    } catch (IOException e) {
+      throw new MetaException(String.format("Error while fetching the 
partitions due to: %s", e));
+    }
+    context.putToProperties("dropPartitionSkip", "true");

Review Comment:
   Introduced in HiveMetastoreClient.SKIP_DROP_PARTITION. Done.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org
For additional commands, e-mail: gitbox-h...@hive.apache.org

Reply via email to