TheR1sing3un commented on code in PR #12537:
URL: https://github.com/apache/hudi/pull/12537#discussion_r1900523204
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -387,70 +332,20 @@ private HoodieData<HoodieRecord<T>>
readRecordsForGroupWithLogs(JavaSparkContext
*/
private HoodieData<HoodieRecord<T>>
readRecordsForGroupBaseFiles(JavaSparkContext jsc,
List<ClusteringOperation> clusteringOps) {
- StorageConfiguration<?> storageConf = getHoodieTable().getStorageConf();
- HoodieWriteConfig writeConfig = getWriteConfig();
-
- // NOTE: It's crucial to make sure that we don't capture whole "this"
object into the
- // closure, as this might lead to issues attempting to serialize its
nested fields
- HoodieTableConfig tableConfig =
getHoodieTable().getMetaClient().getTableConfig();
- String bootstrapBasePath = tableConfig.getBootstrapBasePath().orElse(null);
- Option<String[]> partitionFields = tableConfig.getPartitionFields();
-
int readParallelism =
Math.min(writeConfig.getClusteringGroupReadParallelism(), clusteringOps.size());
return HoodieJavaRDD.of(jsc.parallelize(clusteringOps, readParallelism)
.mapPartitions(clusteringOpsPartition -> {
List<Supplier<ClosableIterator<HoodieRecord<T>>>>
iteratorGettersForPartition = new ArrayList<>();
clusteringOpsPartition.forEachRemaining(clusteringOp -> {
- Supplier<ClosableIterator<HoodieRecord<T>>> recordIteratorGetter =
() -> {
- try {
- Schema readerSchema = HoodieAvroUtils.addMetadataFields(new
Schema.Parser().parse(writeConfig.getSchema()));
- HoodieFileReader baseFileReader =
getBaseOrBootstrapFileReader(storageConf, bootstrapBasePath, partitionFields,
clusteringOp);
-
- Option<BaseKeyGenerator> keyGeneratorOp =
HoodieSparkKeyGeneratorFactory.createBaseKeyGenerator(writeConfig);
- // NOTE: Record have to be cloned here to make sure if it
holds low-level engine-specific
- // payload pointing into a shared, mutable (underlying)
buffer we get a clean copy of
- // it since these records will be shuffled later.
- return new CloseableMappingIterator(
- (ClosableIterator<HoodieRecord>)
baseFileReader.getRecordIterator(readerSchema),
- rec -> ((HoodieRecord)
rec).copy().wrapIntoHoodieRecordPayloadWithKeyGen(readerSchema,
writeConfig.getProps(), keyGeneratorOp));
- } catch (IOException e) {
- throw new HoodieClusteringException("Error reading input data
for " + clusteringOp.getDataFilePath()
- + " and " + clusteringOp.getDeltaFilePaths(), e);
- }
- };
+ Supplier<ClosableIterator<HoodieRecord<T>>> recordIteratorGetter =
() -> getRecordIteratorWithBaseFileOnly(clusteringOp);
iteratorGettersForPartition.add(recordIteratorGetter);
});
return new LazyConcatenatingIterator<>(iteratorGettersForPartition);
}));
}
- private HoodieFileReader
getBaseOrBootstrapFileReader(StorageConfiguration<?> storageConf, String
bootstrapBasePath, Option<String[]> partitionFields, ClusteringOperation
clusteringOp)
Review Comment:
These deleted code simply moved to the SparkJobExecutionStrategy used to
provide a common reading method
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]