vinishjail97 commented on code in PR #650:
URL: https://github.com/apache/incubator-xtable/pull/650#discussion_r1967124578
##########
xtable-api/src/main/java/org/apache/xtable/conversion/ExternalTable.java:
##########
@@ -34,12 +34,16 @@
class ExternalTable {
/** The name of the table. */
protected final @NonNull String name;
+
Review Comment:
Are these changes coming from `mvn spotless:apply` ? Wondering how latest
main branch doesn't reflect these.
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetTableExtractor.java:
##########
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.delta;
+
+import java.time.Instant;
+import java.util.List;
+
+import lombok.Builder;
+
+import org.apache.parquet.hadoop.ParquetFileReader;
+
+import org.apache.xtable.model.InternalTable;
+import org.apache.xtable.model.schema.InternalPartitionField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.storage.DataLayoutStrategy;
+import org.apache.xtable.model.storage.TableFormat;
+
+/**
+ * Extracts {@link InternalTable} canonical representation of a table at a
point in time for Delta.
+ */
+@Builder
+public class ParquetTableExtractor {
+ @Builder.Default
+ private static final ParquetSchemaExtractor schemaExtractor =
ParquetTableExtractor.getInstance();
+ @Builder.Default
+ private static final ParquetPartitionExtractor partitionExtractor =
ParquetPartitionExtractor.getInstance();
+ @Builder.Default
+ private static final ParquetMetadataExtractor parquetMetadataExtractor =
+ ParquetMetadataExtractor.getInstance();
+ private Map<String, List<String>> initPartitionInfo() {
Review Comment:
//nit new line between 45 and 46 lines.
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetTableExtractor.java:
##########
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.delta;
+
+import java.time.Instant;
+import java.util.List;
+
+import lombok.Builder;
+
+import org.apache.parquet.hadoop.ParquetFileReader;
+
+import org.apache.xtable.model.InternalTable;
+import org.apache.xtable.model.schema.InternalPartitionField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.storage.DataLayoutStrategy;
+import org.apache.xtable.model.storage.TableFormat;
+
+/**
+ * Extracts {@link InternalTable} canonical representation of a table at a
point in time for Delta.
Review Comment:
for Delta may be confusing with the table format, this should be enough I
guess ?
`Extracts {@link InternalTable} canonical representation of a table at a
point in time`
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetSchemaExtractror.java:
##########
@@ -0,0 +1,486 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.parquet;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
+import org.apache.parquet.LogicalType;
+import org.apache.parquet.LogicalTypes;
+import org.apache.parquet.Schema;
+
+import org.apache.xtable.collectors.CustomCollectors;
+import org.apache.xtable.exception.SchemaExtractorException;
+import org.apache.xtable.exception.UnsupportedSchemaTypeException;
+import org.apache.xtable.hudi.idtracking.IdTracker;
+import org.apache.xtable.hudi.idtracking.models.IdMapping;
+import org.apache.xtable.model.schema.InternalField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.schema.InternalType;
+import org.apache.xtable.schema.SchemaUtils;
+
+/**
+ * Class that converts parquet Schema {@link Schema} to Canonical Schema
{@link InternalSchema} and
+ * vice-versa. This conversion is fully reversible and there is a strict 1 to
1 mapping between
+ * parquet data types and canonical data types.
+ */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public class ParquetSchemaConverter {
+ // parquet only supports string keys in maps
+ private static final InternalField MAP_KEY_FIELD =
+ InternalField.builder()
+ .name(InternalField.Constants.MAP_KEY_FIELD_NAME)
+ .schema(
+ InternalSchema.builder()
+ .name("map_key")
+ .dataType(InternalType.STRING)
+ .isNullable(false)
+ .build())
+ .defaultValue("")
+ .build();
+ private static final ParquetSchemaConverter INSTANCE = new
ParquetSchemaConverter();
+ private static final String ELEMENT = "element";
+ private static final String KEY = "key";
+ private static final String VALUE = "value";
+
+ public static parquetSchemaConverter getInstance() {
+ return INSTANCE;
+ }
+
+ public InternalSchema toInternalSchema(Schema schema) {
+ Map<String, IdMapping> fieldNameToIdMapping =
+ IdTracker.getInstance()
+ .getIdTracking(schema)
+ .map(
+ idTracking ->
+ idTracking.getIdMappings().stream()
+ .collect(Collectors.toMap(IdMapping::getName,
Function.identity())))
+ .orElse(Collections.emptyMap());
+ return toInternalSchema(schema, null, fieldNameToIdMapping);
+ }
+
+ /**
+ * Converts the parquet {@link Schema} to {@link InternalSchema}.
+ *
+ * @param schema The schema being converted
+ * @param parentPath If this schema is nested within another, this will be a
dot separated string
+ * representing the path from the top most field to the current schema.
+ * @param fieldNameToIdMapping map of fieldName to IdMapping to track field
IDs provided by the
+ * source schema. If source schema does not contain IdMappings, map will
be empty.
+ * @return a converted schema
+ */
+ private InternalSchema toInternalSchema(
+ Schema schema, String parentPath, Map<String, IdMapping>
fieldNameToIdMapping) {
+ // TODO - Does not handle recursion in parquet schema
+ InternalType newDataType;
+ Map<InternalSchema.MetadataKey, Object> metadata = new HashMap<>();
+ switch (schema.getType()) {
+ case INT:
+ LogicalType logicalType = schema.getLogicalType();
+ if (logicalType instanceof LogicalTypes.Date) {
+ newDataType = InternalType.DATE;
+ } else {
+ newDataType = InternalType.INT;
+ }
+ break;
+ case STRING:
+ newDataType = InternalType.STRING;
+ break;
+ case BOOLEAN:
+ newDataType = InternalType.BOOLEAN;
+ break;
+ case BYTE_ARRAY:
+ logicalType = schema.getLogicalType();
+ // TODO: any metadata to add ?
+ if (logicalType== LogicalTypes.JSON) {
+ newDataType = InternalType.JSON;
+ }
+ else if (logicalType instanceof LogicalTypes.BSON) {
+ newDataType = InternalType.BSON;
+ }
+ else if (logicalType instanceof LogicalTypes.VARIANT) {
+ newDataType = InternalType.VARIANT;
+ }
+ else if (logicalType instanceof LogicalTypes.GEOMETRY) {
+ newDataType = InternalType.GEOMETRY;
+ }
+ else if (logicalType instanceof LogicalTypes.GEOGRAPHY) {
+ newDataType = InternalType.GEOGRAPHY;
Review Comment:
Would it to be simple to map it to `BYTES` ?
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetTableExtractor.java:
##########
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.delta;
+
+import java.time.Instant;
+import java.util.List;
+
+import lombok.Builder;
+
+import org.apache.parquet.hadoop.ParquetFileReader;
+
+import org.apache.xtable.model.InternalTable;
+import org.apache.xtable.model.schema.InternalPartitionField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.storage.DataLayoutStrategy;
+import org.apache.xtable.model.storage.TableFormat;
+
+/**
+ * Extracts {@link InternalTable} canonical representation of a table at a
point in time for Delta.
+ */
+@Builder
+public class ParquetTableExtractor {
+ @Builder.Default
+ private static final ParquetSchemaExtractor schemaExtractor =
ParquetTableExtractor.getInstance();
+ @Builder.Default
+ private static final ParquetPartitionExtractor partitionExtractor =
ParquetPartitionExtractor.getInstance();
+ @Builder.Default
+ private static final ParquetMetadataExtractor parquetMetadataExtractor =
+ ParquetMetadataExtractor.getInstance();
+ private Map<String, List<String>> initPartitionInfo() {
+ return getPartitionFromDirectoryStructure(hadoopConf, basePath,
Collections.emptyMap());
+ }
+
+ public InternalTable table(String tableName, Long version) {
+ ParquetMetadata footer =
+ parquetMetadataExtractor.readParquetMetadata(conf, path);
+ MessageType schema = parquetMetadataExtractor.getSchema(footer);
+ InternalSchema schema = schemaExtractor.toInternalSchema(schema);
+ Set<String> partitionKeys = initPartitionInfo().keySet();
+ List<InternalPartitionField> partitionFields =
+ partitionExtractor
+ .getInternalPartitionField(partitionKeys,schema);
Review Comment:
There seems to be some confusion here.
`initPartitionInfo().keySet()` will return all the unique partitionPaths for
all parquet files combined.
`InternalPartitionField` is the partition field name and not the values for
the table.
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetSchemaExtractror.java:
##########
@@ -0,0 +1,486 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.parquet;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
+import org.apache.parquet.LogicalType;
+import org.apache.parquet.LogicalTypes;
+import org.apache.parquet.Schema;
+
+import org.apache.xtable.collectors.CustomCollectors;
+import org.apache.xtable.exception.SchemaExtractorException;
+import org.apache.xtable.exception.UnsupportedSchemaTypeException;
+import org.apache.xtable.hudi.idtracking.IdTracker;
+import org.apache.xtable.hudi.idtracking.models.IdMapping;
+import org.apache.xtable.model.schema.InternalField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.schema.InternalType;
+import org.apache.xtable.schema.SchemaUtils;
+
+/**
+ * Class that converts parquet Schema {@link Schema} to Canonical Schema
{@link InternalSchema} and
+ * vice-versa. This conversion is fully reversible and there is a strict 1 to
1 mapping between
+ * parquet data types and canonical data types.
+ */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public class ParquetSchemaConverter {
Review Comment:
This class looks okay and thanks for putting this up, can you add unit tests
as well for this ?
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetStatsExtractor.java:
##########
@@ -0,0 +1,115 @@
+
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.column.Encoding;
+import org.apache.parquet.column.statistics.Statistics;
+import org.apache.parquet.hadoop.metadata.BlockMetaData;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.hadoop.metadata.ParquetMetadata;
+import org.apache.parquet.hadoop.util.HiddenFileFilter;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.schema.MessageType;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.column.Encoding;
+import org.apache.parquet.column.statistics.Statistics;
+import java.util.stream.Collectors;
+import org.apache.hadoop.fs.*;
+
+public class ParquetStatsExtractor {
+ @Builder.Default
+ private static final ParquetMetadataExtractor parquetMetadataExtractor =
+ ParquetMetadataExtractor.getInstance();
+ @Builder.Default
+ private static final ParquetPartitionExtractor partitionExtractor =
ParquetPartitionExtractor.getInstance();
+
+ private static Map<ColumnDescriptor, ColStats> stats = new
LinkedHashMap<ColumnDescriptor, ColStats>();
+ private static long recordCount = 0;
+ private Map<String, List<String>> initPartitionInfo() {
+ return getPartitionFromDirectoryStructure(hadoopConf, basePath,
Collections.emptyMap());
+ }
+ private InternalDataFile toInternalDataFile(Configuration hadoopConf,
+ String parentPath, Map<ColumnDescriptor, ColStats> stats) {
+ FileSystem fs = FileSystem.get(hadoopConf);
+ FileStatus file = fs.getFileStatus(new Path(parentPath));
+ Map<String, List<String>> partitionInfo = initPartitionInfo();
+
+ ParquetMetadata footer =
+ parquetMetadataExtractor.readParquetMetadata(hadoopConf,
parentPath);
+ MessageType schema = parquetMetadataExtractor.getSchema(footer);
+ InternalSchema schema = schemaExtractor.toInternalSchema(schema);
+ List<PartitionValue> partitionValues =
+ partitionExtractor
+
.getPartitionValue(parentPath,file.getPath().toString(),schema,partitionInfo);
Review Comment:
The partitionValues are not the actual partition values but it's a list of
partitioning fields in the schema with their range.
https://github.com/apache/incubator-xtable/blob/main/xtable-api/src/main/java/org/apache/xtable/model/storage/InternalDataFile.java#L44
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetPartitionExtractor.java:
##########
@@ -0,0 +1,69 @@
+package org.apache.xtable.parquet;
+
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.xtable.model.schema.InternalPartitionField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.schema.PartitionTransformType;
+import org.apache.xtable.model.stat.PartitionValue;
+import org.apache.xtable.model.stat.Range;
+import org.apache.xtable.schema.SchemaFieldFinder;
+
+public class ParquetPartitionExtractor {
+ private static final ParquetPartitionExtractor INSTANCE = new
ParquetPartitionExtractor();
+
+ public static ParquetPartitionExtractor getInstance() {
+ return INSTANCE;
+ }
+
+ public List<InternalPartitionField> getInternalPartitionField(
+ Set<String> partitionList, InternalSchema schema) {
Review Comment:
I don't think this would work because partitionList is a list of partition
field values and you won't find this in the schema.
##########
xtable-core/src/main/java/org/apache/xtable/parquet/ParquetSchemaExtractror.java:
##########
@@ -0,0 +1,486 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.xtable.parquet;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
+import org.apache.parquet.LogicalType;
+import org.apache.parquet.LogicalTypes;
+import org.apache.parquet.Schema;
+
+import org.apache.xtable.collectors.CustomCollectors;
+import org.apache.xtable.exception.SchemaExtractorException;
+import org.apache.xtable.exception.UnsupportedSchemaTypeException;
+import org.apache.xtable.hudi.idtracking.IdTracker;
+import org.apache.xtable.hudi.idtracking.models.IdMapping;
+import org.apache.xtable.model.schema.InternalField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.schema.InternalType;
+import org.apache.xtable.schema.SchemaUtils;
+
+/**
+ * Class that converts parquet Schema {@link Schema} to Canonical Schema
{@link InternalSchema} and
+ * vice-versa. This conversion is fully reversible and there is a strict 1 to
1 mapping between
+ * parquet data types and canonical data types.
+ */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public class ParquetSchemaConverter {
+ // parquet only supports string keys in maps
+ private static final InternalField MAP_KEY_FIELD =
+ InternalField.builder()
+ .name(InternalField.Constants.MAP_KEY_FIELD_NAME)
+ .schema(
+ InternalSchema.builder()
+ .name("map_key")
+ .dataType(InternalType.STRING)
+ .isNullable(false)
+ .build())
+ .defaultValue("")
+ .build();
+ private static final ParquetSchemaConverter INSTANCE = new
ParquetSchemaConverter();
+ private static final String ELEMENT = "element";
+ private static final String KEY = "key";
+ private static final String VALUE = "value";
+
+ public static parquetSchemaConverter getInstance() {
+ return INSTANCE;
+ }
+
+ public InternalSchema toInternalSchema(Schema schema) {
+ Map<String, IdMapping> fieldNameToIdMapping =
+ IdTracker.getInstance()
+ .getIdTracking(schema)
+ .map(
+ idTracking ->
+ idTracking.getIdMappings().stream()
+ .collect(Collectors.toMap(IdMapping::getName,
Function.identity())))
+ .orElse(Collections.emptyMap());
+ return toInternalSchema(schema, null, fieldNameToIdMapping);
+ }
+
+ /**
+ * Converts the parquet {@link Schema} to {@link InternalSchema}.
+ *
+ * @param schema The schema being converted
+ * @param parentPath If this schema is nested within another, this will be a
dot separated string
+ * representing the path from the top most field to the current schema.
+ * @param fieldNameToIdMapping map of fieldName to IdMapping to track field
IDs provided by the
+ * source schema. If source schema does not contain IdMappings, map will
be empty.
+ * @return a converted schema
+ */
+ private InternalSchema toInternalSchema(
+ Schema schema, String parentPath, Map<String, IdMapping>
fieldNameToIdMapping) {
+ // TODO - Does not handle recursion in parquet schema
+ InternalType newDataType;
+ Map<InternalSchema.MetadataKey, Object> metadata = new HashMap<>();
+ switch (schema.getType()) {
+ case INT:
+ LogicalType logicalType = schema.getLogicalType();
+ if (logicalType instanceof LogicalTypes.Date) {
+ newDataType = InternalType.DATE;
+ } else {
+ newDataType = InternalType.INT;
+ }
+ break;
+ case STRING:
+ newDataType = InternalType.STRING;
+ break;
+ case BOOLEAN:
+ newDataType = InternalType.BOOLEAN;
+ break;
+ case BYTE_ARRAY:
+ logicalType = schema.getLogicalType();
+ // TODO: any metadata to add ?
+ if (logicalType== LogicalTypes.JSON) {
+ newDataType = InternalType.JSON;
+ }
+ else if (logicalType instanceof LogicalTypes.BSON) {
+ newDataType = InternalType.BSON;
+ }
+ else if (logicalType instanceof LogicalTypes.VARIANT) {
+ newDataType = InternalType.VARIANT;
+ }
+ else if (logicalType instanceof LogicalTypes.GEOMETRY) {
+ newDataType = InternalType.GEOMETRY;
+ }
+ else if (logicalType instanceof LogicalTypes.GEOGRAPHY) {
+ newDataType = InternalType.GEOGRAPHY;
Review Comment:
Let me know your thoughts as well, I was asking because the targets
(iceberg, delta and hudi) don't seem to support these types and just map it to
byte array or binary.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]