vinothchandar commented on code in PR #10422:
URL: https://github.com/apache/hudi/pull/10422#discussion_r1445060501


##########
hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java:
##########
@@ -116,6 +117,7 @@ public void setUp() {
     hadoopConf.set("fs.file.impl", 
org.apache.hadoop.fs.LocalFileSystem.class.getName());
     baseJobConf = new JobConf(hadoopConf);
     baseJobConf.set(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE.key(), 
String.valueOf(1024 * 1024));
+    baseJobConf.set(HoodieReaderConfig.FILE_GROUP_READER_ENABLED.key(), 
"false");

Review Comment:
   why "false"



##########
packaging/bundle-validation/validate.sh:
##########
@@ -93,7 +93,7 @@ test_spark_hadoop_mr_bundles () {
     # save HiveQL query results
     hiveqlresultsdir=/tmp/hadoop-mr-bundle/hiveql/trips/results
     mkdir -p $hiveqlresultsdir
-    $HIVE_HOME/bin/beeline --hiveconf 
hive.input.format=org.apache.hudi.hadoop.HoodieParquetInputFormat \
+    $HIVE_HOME/bin/beeline --verbose --hiveconf 
hive.input.format=org.apache.hudi.hadoop.HoodieParquetInputFormat \

Review Comment:
   does this need to be checked in?



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java:
##########
@@ -91,9 +94,42 @@ private void initAvroInputFormat() {
     }
   }
 
+  private static boolean checkTableIsHudi(final InputSplit split, final 
JobConf job) {

Review Comment:
   rename: checkIfHudiTable



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/ObjectInspectorCache.java:
##########
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.hadoop.utils;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import org.apache.avro.Schema;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ArrayWritableObjectInspector;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.ArrayWritable;
+import org.apache.hadoop.mapred.JobConf;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class ObjectInspectorCache {

Review Comment:
   java docs



##########
hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodieFileGroupReader.java:
##########
@@ -227,23 +231,31 @@ private ClosableIterator<T> 
makeBootstrapBaseFileIterator(HoodieBaseFile baseFil
     BaseFile dataFile = baseFile.getBootstrapBaseFile().get();
     Pair<List<Schema.Field>,List<Schema.Field>> requiredFields = 
getDataAndMetaCols(requiredSchema);
     Pair<List<Schema.Field>,List<Schema.Field>> allFields = 
getDataAndMetaCols(dataSchema);
-
-    Option<ClosableIterator<T>> dataFileIterator = 
requiredFields.getRight().isEmpty() ? Option.empty() :
-        
Option.of(readerContext.getFileRecordIterator(dataFile.getHadoopPath(), 0, 
dataFile.getFileLen(),
-            createSchemaFromFields(allFields.getRight()), 
createSchemaFromFields(requiredFields.getRight()), hadoopConf));
-
-    Option<ClosableIterator<T>> skeletonFileIterator = 
requiredFields.getLeft().isEmpty() ? Option.empty() :
-        
Option.of(readerContext.getFileRecordIterator(baseFile.getHadoopPath(), 0, 
baseFile.getFileLen(),
-            createSchemaFromFields(allFields.getLeft()), 
createSchemaFromFields(requiredFields.getLeft()), hadoopConf));
+    Option<Pair<ClosableIterator<T>,Schema>> dataFileIterator =

Review Comment:
   its cool we are able to add a new engine without much changes to this class.



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java:
##########
@@ -101,6 +102,9 @@ public AbstractRealtimeRecordReader(RealtimeSplit split, 
JobConf job) {
       throw new HoodieException("Could not create HoodieRealtimeRecordReader 
on path " + this.split.getPath(), e);
     }
     prepareHiveAvroSerializer();
+    if (HoodieFileGroupReaderRecordReader.useFilegroupReader(jobConf)) {

Review Comment:
   is an assert or preconditions check better here.



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java:
##########
@@ -91,9 +94,42 @@ private void initAvroInputFormat() {
     }
   }
 
+  private static boolean checkTableIsHudi(final InputSplit split, final 
JobConf job) {
+    try {
+      Option<Path> tablePathOpt = TablePathUtils.getTablePath(((FileSplit) 
split).getPath(), job);
+      if (!tablePathOpt.isPresent()) {
+        return false;
+      }
+      return tablePathOpt.get().getFileSystem(job).exists(new 
Path(tablePathOpt.get(), HoodieTableMetaClient.METAFOLDER_NAME));
+    } catch (IOException e) {
+      return false;
+    }
+  }
+
   @Override
   public RecordReader<NullWritable, ArrayWritable> getRecordReader(final 
InputSplit split, final JobConf job,
                                                                    final 
Reporter reporter) throws IOException {
+
+    if (HoodieFileGroupReaderRecordReader.useFilegroupReader(job)) {
+      try {
+        if (!(split instanceof FileSplit) || !checkTableIsHudi(split, job)) {
+          return super.getRecordReader(split, job, reporter);
+        }
+        if (supportAvroRead && 
HoodieColumnProjectionUtils.supportTimestamp(job)) {

Review Comment:
   note to self; dig into these.



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieArrayWritableAvroUtils.java:
##########
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.hadoop.utils;
+
+import org.apache.hudi.common.util.collection.Pair;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import org.apache.avro.Schema;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ArrayWritableObjectInspector;
+import org.apache.hadoop.io.ArrayWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.JobConf;
+
+import java.util.List;
+import java.util.function.UnaryOperator;
+
+public class HoodieArrayWritableAvroUtils {

Review Comment:
   UT this class?



##########
hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java:
##########
@@ -42,64 +43,123 @@ public class HoodieCombineRealtimeRecordReader implements 
RecordReader<NullWrita
 
   private static final transient Logger LOG = 
LoggerFactory.getLogger(HoodieCombineRealtimeRecordReader.class);
   // RecordReaders for each split
-  List<HoodieRealtimeRecordReader> recordReaders = new LinkedList<>();
+  private List<HoodieRealtimeRecordReader> recordReaders = new LinkedList<>();
   // Points to the currently iterating record reader
-  HoodieRealtimeRecordReader currentRecordReader;
+  private HoodieRealtimeRecordReader currentRecordReader;
+
+  private final boolean useFileGroupReader;
+
+  // RecordReaders for each split
+  private List<HoodieFileGroupReaderRecordReader> recordReadersFG = new 
LinkedList<>();
+  // Points to the currently iterating record reader
+  private HoodieFileGroupReaderRecordReader currentRecordReaderFG;
 
   public HoodieCombineRealtimeRecordReader(JobConf jobConf, CombineFileSplit 
split,
       List<RecordReader> readers) {
-    try {
-      ValidationUtils.checkArgument(((HoodieCombineRealtimeFileSplit) 
split).getRealtimeFileSplits().size() == readers
-          .size(), "Num Splits does not match number of unique 
RecordReaders!");
-      for (InputSplit rtSplit : ((HoodieCombineRealtimeFileSplit) 
split).getRealtimeFileSplits()) {
-        LOG.info("Creating new RealtimeRecordReader for split");
-        recordReaders.add(
-            new HoodieRealtimeRecordReader((HoodieRealtimeFileSplit) rtSplit, 
jobConf, readers.remove(0)));
+    useFileGroupReader = 
HoodieFileGroupReaderRecordReader.useFilegroupReader(jobConf);
+    if (useFileGroupReader) {

Review Comment:
   IIUC this just replicates the logic for new FG reader



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to