SourabhBadhya commented on code in PR #4785: URL: https://github.com/apache/hive/pull/4785#discussion_r1379856048
########## iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java: ########## @@ -997,6 +1011,62 @@ public void postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { } } + @Override + public void preDropPartition(org.apache.hadoop.hive.metastore.api.Table hmsTable, + EnvironmentContext context, + List<org.apache.commons.lang3.tuple.Pair<Integer, byte[]>> partExprs) + throws MetaException { + Table icebergTbl = IcebergTableUtil.getTable(conf, hmsTable); + DeleteFiles deleteFiles = icebergTbl.newDelete(); + List<Expression> expressions = partExprs.stream().map(partExpr -> { + ExprNodeDesc exprNodeDesc = SerializationUtilities + .deserializeObjectWithTypeInformation(partExpr.getRight(), true); + SearchArgument sarg = ConvertAstToSearchArg.create(conf, (ExprNodeGenericFuncDesc) exprNodeDesc); + return HiveIcebergFilterFactory.generateFilterExpression(sarg); + }).collect(Collectors.toList()); + PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils + .createMetadataTableInstance(icebergTbl, MetadataTableType.PARTITIONS); + List<PartitionData> partitionList = Lists.newArrayList(); + try (CloseableIterable<FileScanTask> fileScanTasks = partitionsTable.newScan().planFiles()) { Review Comment: This is a fileScanTask on the Partition metadata table. Its not going to read all datafiles of partitions. ########## iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java: ########## @@ -997,6 +1011,62 @@ public void postGetTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { } } + @Override + public void preDropPartition(org.apache.hadoop.hive.metastore.api.Table hmsTable, + EnvironmentContext context, + List<org.apache.commons.lang3.tuple.Pair<Integer, byte[]>> partExprs) + throws MetaException { + Table icebergTbl = IcebergTableUtil.getTable(conf, hmsTable); + DeleteFiles deleteFiles = icebergTbl.newDelete(); + List<Expression> expressions = partExprs.stream().map(partExpr -> { + ExprNodeDesc exprNodeDesc = SerializationUtilities + .deserializeObjectWithTypeInformation(partExpr.getRight(), true); + SearchArgument sarg = ConvertAstToSearchArg.create(conf, (ExprNodeGenericFuncDesc) exprNodeDesc); + return HiveIcebergFilterFactory.generateFilterExpression(sarg); + }).collect(Collectors.toList()); + PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils + .createMetadataTableInstance(icebergTbl, MetadataTableType.PARTITIONS); + List<PartitionData> partitionList = Lists.newArrayList(); + try (CloseableIterable<FileScanTask> fileScanTasks = partitionsTable.newScan().planFiles()) { + fileScanTasks.forEach(task -> { + partitionList.addAll(Sets.newHashSet(CloseableIterable.transform(task.asDataTask().rows(), row -> { + StructProjection data = row.get(0, StructProjection.class); + PartitionSpec pSpec = icebergTbl.spec(); + PartitionData partitionData = new PartitionData(pSpec.partitionType()); + for (int index = 0; index < pSpec.fields().size(); index++) { + partitionData.set(index, data.get(index, Object.class)); + } + return partitionData; + }))); + }); + + Set<PartitionData> refinedPartList = Sets.newHashSet(); + for (Expression expr : expressions) { Review Comment: Done. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For additional commands, e-mail: gitbox-h...@hive.apache.org