vaibhavk1992 commented on code in PR #729:
URL: https://github.com/apache/incubator-xtable/pull/729#discussion_r2565841550


##########
pom.xml:
##########
@@ -615,6 +616,20 @@
                 <artifactId>jettison</artifactId>
                 <version>1.5.4</version>
             </dependency>
+            <dependency>
+                <groupId>io.delta</groupId>
+                <artifactId>delta-kernel-api</artifactId>
+                <version>${delta.kernel.version}</version>
+                <scope>provided</scope>

Review Comment:
   @the-other-tim-brown Regarding the kernel scope in POM, if I keep it compile 
it gives me those same errors package doesn't exist. Maven resolves 
hadoop-common with compile scope (from Delta Kernel), overriding existing 
provided declaration so I kept it provided. 
   Let me know if there is any other solution that may work here or if we can 
keep the scope as it is for this PR. 



##########
xtable-core/src/main/java/org/apache/xtable/kernel/DeltaKernelConversionSource.java:
##########
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.xtable.kernel;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import lombok.Builder;
+import lombok.extern.slf4j.Slf4j;
+
+import io.delta.kernel.Snapshot;
+import io.delta.kernel.Table;
+import io.delta.kernel.engine.Engine;
+import io.delta.kernel.internal.SnapshotImpl;
+import io.delta.kernel.internal.actions.AddFile;
+import io.delta.kernel.internal.actions.RemoveFile;
+import io.delta.kernel.internal.actions.RowBackedAction;
+import io.delta.kernel.internal.util.VectorUtils;
+
+import org.apache.xtable.exception.ReadException;
+import org.apache.xtable.model.CommitsBacklog;
+import org.apache.xtable.model.InstantsForIncrementalSync;
+import org.apache.xtable.model.InternalSnapshot;
+import org.apache.xtable.model.InternalTable;
+import org.apache.xtable.model.TableChange;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.storage.FileFormat;
+import org.apache.xtable.model.storage.InternalDataFile;
+import org.apache.xtable.model.storage.InternalFilesDiff;
+import org.apache.xtable.model.storage.PartitionFileGroup;
+import org.apache.xtable.spi.extractor.ConversionSource;
+import org.apache.xtable.spi.extractor.DataFileIterator;
+
+@Slf4j
+@Builder
+public class DeltaKernelConversionSource implements ConversionSource<Long> {
+
+  @Builder.Default
+  private final DeltaKernelDataFileExtractor dataFileExtractor =
+      DeltaKernelDataFileExtractor.builder().build();
+
+  @Builder.Default
+  private final DeltaKernelActionsConverter actionsConverter =
+      DeltaKernelActionsConverter.getInstance();
+
+  private final String basePath;
+  private final String tableName;
+  private final Engine engine;
+
+  @Builder.Default
+  private final DeltaKernelTableExtractor tableExtractor =
+      DeltaKernelTableExtractor.builder().build();
+
+  private Optional<DeltaKernelIncrementalChangesState> 
deltaKernelIncrementalChangesState =
+      Optional.empty();
+
+  @Override
+  public InternalTable getTable(Long version) {
+    try {
+      Table table = Table.forPath(engine, basePath);
+      Snapshot snapshot = table.getSnapshotAsOfVersion(engine, version);
+      return tableExtractor.table(table, snapshot, engine, tableName, 
basePath);
+    } catch (Exception e) {
+      throw new ReadException("Failed to get table at version " + version, e);
+    }
+  }
+
+  @Override
+  public InternalTable getCurrentTable() {
+    Table table = Table.forPath(engine, basePath);
+    Snapshot snapshot = table.getLatestSnapshot(engine);
+    return getTable(snapshot.getVersion());
+  }
+
+  @Override
+  public InternalSnapshot getCurrentSnapshot() {
+    Table table_snapshot = Table.forPath(engine, basePath);
+    Snapshot snapshot = table_snapshot.getLatestSnapshot(engine);
+    InternalTable table = getTable(snapshot.getVersion());
+    return InternalSnapshot.builder()
+        .table(table)
+        .partitionedDataFiles(
+            getInternalDataFiles(snapshot, table_snapshot, engine, 
table.getReadSchema()))
+        .sourceIdentifier(getCommitIdentifier(snapshot.getVersion()))
+        .build();
+  }
+
+  @Override
+  public TableChange getTableChangeForCommit(Long versionNumber) {
+    Table table = Table.forPath(engine, basePath);
+    Snapshot snapshot = table.getSnapshotAsOfVersion(engine, versionNumber);
+    InternalTable tableAtVersion =
+        tableExtractor.table(table, snapshot, engine, tableName, basePath);
+    Map<String, InternalDataFile> addedFiles = new HashMap<>();
+    Map<String, InternalDataFile> removedFiles = new HashMap<>();
+    String provider = ((SnapshotImpl) 
snapshot).getMetadata().getFormat().getProvider();
+    FileFormat fileFormat = actionsConverter.convertToFileFormat(provider);
+
+    List<RowBackedAction> actionsForVersion = 
getChangesState().getActionsForVersion(versionNumber);
+
+    for (RowBackedAction action : actionsForVersion) {
+      if (action instanceof AddFile) {
+        AddFile addFile = (AddFile) action;
+        Map<String, String> partitionValues = 
VectorUtils.toJavaMap(addFile.getPartitionValues());
+        InternalDataFile dataFile =
+            actionsConverter.convertAddActionToInternalDataFile(
+                addFile,
+                table,
+                fileFormat,
+                tableAtVersion.getPartitioningFields(),
+                tableAtVersion.getReadSchema().getFields(),
+                true,
+                DeltaKernelPartitionExtractor.getInstance(),
+                DeltaKernelStatsExtractor.getInstance(),
+                partitionValues);
+        addedFiles.put(dataFile.getPhysicalPath(), dataFile);
+      } else if (action instanceof RemoveFile) {
+        RemoveFile removeFile = (RemoveFile) action;
+        Map<String, String> partitionValues =
+            removeFile
+                .getPartitionValues()
+                .map(VectorUtils::<String, String>toJavaMap)
+                .orElse(Collections.emptyMap());
+        InternalDataFile dataFile =
+            actionsConverter.convertRemoveActionToInternalDataFile(
+                removeFile,
+                table,
+                fileFormat,
+                tableAtVersion.getPartitioningFields(),
+                DeltaKernelPartitionExtractor.getInstance(),
+                partitionValues);
+        removedFiles.put(dataFile.getPhysicalPath(), dataFile);
+      }
+    }
+
+    InternalFilesDiff internalFilesDiff =
+        InternalFilesDiff.builder()
+            .filesAdded(addedFiles.values())
+            .filesRemoved(removedFiles.values())
+            .build();
+    return TableChange.builder()
+        .tableAsOfChange(tableAtVersion)
+        .filesDiff(internalFilesDiff)
+        .sourceIdentifier(getCommitIdentifier(versionNumber))
+        .build();
+  }
+
+  @Override
+  public CommitsBacklog<Long> getCommitsBacklog(
+      InstantsForIncrementalSync instantsForIncrementalSync) {
+    Table table = Table.forPath(engine, basePath);
+    Snapshot snapshot =
+        table.getSnapshotAsOfTimestamp(
+            engine, 
Timestamp.from(instantsForIncrementalSync.getLastSyncInstant()).getTime());
+
+    long versionNumberAtLastSyncInstant = snapshot.getVersion();
+    resetState(versionNumberAtLastSyncInstant + 1, engine, table);
+    return CommitsBacklog.<Long>builder()
+        .commitsToProcess(getChangesState().getVersionsInSortedOrder())
+        .build();
+  }
+
+  @Override
+  public boolean isIncrementalSyncSafeFrom(Instant instant) {
+    try {
+      Table table = Table.forPath(engine, basePath);
+      Snapshot snapshot = table.getSnapshotAsOfTimestamp(engine, 
Timestamp.from(instant).getTime());
+
+      // There is a chance earliest commit of the table is returned if the 
instant is before the
+      // earliest commit of the table, hence the additional check.
+      Instant deltaCommitInstant = 
Instant.ofEpochMilli(snapshot.getTimestamp(engine));
+      return deltaCommitInstant.equals(instant) || 
deltaCommitInstant.isBefore(instant);
+    } catch (Exception e) {
+      log.error("Error checking if incremental sync is safe from " + instant + 
": " + e);

Review Comment:
   Done



##########
xtable-core/src/main/java/org/apache/xtable/kernel/DeltaKernelTableExtractor.java:
##########
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.xtable.kernel;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import lombok.Builder;
+
+import io.delta.kernel.Snapshot;
+import io.delta.kernel.Table;
+import io.delta.kernel.engine.Engine;
+import io.delta.kernel.types.StructField;
+import io.delta.kernel.types.StructType;
+
+import org.apache.xtable.exception.SchemaExtractorException;
+import org.apache.xtable.model.InternalTable;
+import org.apache.xtable.model.schema.InternalPartitionField;
+import org.apache.xtable.model.schema.InternalSchema;
+import org.apache.xtable.model.storage.DataLayoutStrategy;
+import org.apache.xtable.model.storage.TableFormat;
+
+/**
+ * Extracts {@link InternalTable} canonical representation of a table at a 
point in time for Delta.
+ */
+@Builder
+public class DeltaKernelTableExtractor {
+  @Builder.Default
+  private static final DeltaKernelSchemaExtractor schemaExtractor =
+      DeltaKernelSchemaExtractor.getInstance();
+
+  private final String basePath;
+
+  public InternalTable table(
+      Table deltaKernelTable, Snapshot snapshot, Engine engine, String 
tableName, String basePath) {
+    try {
+      // Get schema from Delta Kernel's snapshot
+      StructType schema = snapshot.getSchema();
+      InternalSchema internalSchema = schemaExtractor.toInternalSchema(schema);
+      // Get partition columns
+      StructType fullSchema = snapshot.getSchema(); // The full table schema
+      List<String> partitionColumns = snapshot.getPartitionColumnNames();
+      List<StructField> partitionFieldSchemas =
+          fullSchema.fields().stream()
+              .filter(field -> partitionColumns.contains(field.getName()))
+              .collect(Collectors.toList());
+      StructType partitionSchema = new StructType(partitionFieldSchemas);
+
+      List<InternalPartitionField> partitionFields =
+          DeltaKernelPartitionExtractor.getInstance()
+              .convertFromDeltaPartitionFormat(internalSchema, 
partitionSchema);
+
+      DataLayoutStrategy dataLayoutStrategy =
+          !partitionFields.isEmpty()
+              ? DataLayoutStrategy.HIVE_STYLE_PARTITION
+              : DataLayoutStrategy.FLAT;
+
+      // Get the timestamp
+      long timestamp = snapshot.getTimestamp(engine);
+      return InternalTable.builder()
+          .tableFormat(TableFormat.DELTA)
+          .basePath(basePath)
+          .name(tableName)
+          .layoutStrategy(dataLayoutStrategy)
+          .partitioningFields(partitionFields)
+          .readSchema(internalSchema)
+          .latestCommitTime(Instant.ofEpochMilli(timestamp))
+          .latestMetadataPath(basePath + "/_delta_log")
+          .build();
+    } catch (Exception e) {

Review Comment:
   Done, removed it. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to