Copilot commented on code in PR #9160: URL: https://github.com/apache/gravitino/pull/9160#discussion_r2535809360
########## catalogs/catalog-lakehouse-generic/src/main/java/org/apache/gravitino/catalog/lakehouse/generic/GenericTablePropertiesMetadata.java: ########## @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.generic; + +import static org.apache.gravitino.connector.PropertyEntry.stringOptionalPropertyEntry; +import static org.apache.gravitino.connector.PropertyEntry.stringRequiredPropertyEntry; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.gravitino.connector.BasePropertiesMetadata; +import org.apache.gravitino.connector.PropertyEntry; +import org.apache.gravitino.rel.Table; + +public class GenericTablePropertiesMetadata extends BasePropertiesMetadata { + + private static final Map<String, PropertyEntry<?>> PROPERTIES_METADATA; + + static { + List<PropertyEntry<?>> propertyEntries = + ImmutableList.of( + stringOptionalPropertyEntry( + Table.PROPERTY_LOCATION, + "The root directory of the generic table.", + false /* immutable */, + null, /* defaultValue */ + false /* hidden */), + stringRequiredPropertyEntry( + Table.PROPERTY_TABLE_FORMAT, + "The format of the table", + true /* immutable */, + false /* hidden */)); + + PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries, PropertyEntry::getName); + } + + @Override + protected Map<String, PropertyEntry<?>> specificPropertyEntries() { + Map<String, PropertyEntry<?>> tableSpecificPropertyEntries = + LakehouseTableDelegatorFactory.tableDelegators().entrySet().stream() + .flatMap(kv -> kv.getValue().tablePropertyEntries().stream()) + .collect(Collectors.toMap(PropertyEntry::getName, entry -> entry)); Review Comment: In the `specificPropertyEntries()` method, if two different table delegators define property entries with the same name, the second one will silently overwrite the first due to `Collectors.toMap()` behavior. This could lead to unexpected behavior or loss of property definitions. Consider: 1. Using `Collectors.toMap()` with a merge function that throws an exception on duplicate keys to detect conflicts early 2. Or using a different collection strategy that preserves all entries Example: ```java .collect(Collectors.toMap( PropertyEntry::getName, entry -> entry, (e1, e2) -> { throw new IllegalStateException("Duplicate property entry: " + e1.getName()); })) ``` ```suggestion .collect(Collectors.toMap( PropertyEntry::getName, entry -> entry, (e1, e2) -> { throw new IllegalStateException("Duplicate property entry: " + e1.getName()); } )); ``` ########## catalogs/catalog-lakehouse-generic/src/main/java/org/apache/gravitino/catalog/lakehouse/generic/GenericCatalogOperations.java: ########## @@ -0,0 +1,331 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.generic; + +import static org.apache.gravitino.Entity.EntityType.TABLE; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.Maps; +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.EntityStore; +import org.apache.gravitino.GravitinoEnv; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Namespace; +import org.apache.gravitino.Schema; +import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.catalog.ManagedSchemaOperations; +import org.apache.gravitino.catalog.ManagedTableOperations; +import org.apache.gravitino.connector.CatalogInfo; +import org.apache.gravitino.connector.CatalogOperations; +import org.apache.gravitino.connector.HasPropertyMetadata; +import org.apache.gravitino.connector.SupportsSchemas; +import org.apache.gravitino.exceptions.NoSuchCatalogException; +import org.apache.gravitino.exceptions.NoSuchEntityException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.exceptions.NoSuchTableException; +import org.apache.gravitino.exceptions.NonEmptySchemaException; +import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; +import org.apache.gravitino.exceptions.TableAlreadyExistsException; +import org.apache.gravitino.meta.TableEntity; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.Table; +import org.apache.gravitino.rel.TableCatalog; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distribution; +import org.apache.gravitino.rel.expressions.sorts.SortOrder; +import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.storage.IdGenerator; + +/** Operations for interacting with a generic lakehouse catalog in Apache Gravitino. */ +public class GenericCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { + + private static final String SLASH = "/"; + + private final ManagedSchemaOperations schemaOps; + + private final Map<String, ManagedTableOperations> tableOpsCache; + + private Optional<String> catalogLocation; + + private HasPropertyMetadata propertiesMetadata; + + private final Cache<NameIdentifier, String> tableFormatCache; + + private final EntityStore store; + + public GenericCatalogOperations() { + this(GravitinoEnv.getInstance().entityStore(), GravitinoEnv.getInstance().idGenerator()); + } + + @VisibleForTesting + GenericCatalogOperations(EntityStore store, IdGenerator idGenerator) { + this.store = store; + + this.schemaOps = + new ManagedSchemaOperations() { + @Override + protected EntityStore store() { + return store; + } + }; + + this.tableFormatCache = CacheBuilder.newBuilder().maximumSize(1000).build(); + + Map<String, LakehouseTableDelegator> tableDelegators = + LakehouseTableDelegatorFactory.tableDelegators(); + tableOpsCache = + tableDelegators.entrySet().stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + e -> e.getValue().createTableOps(store, schemaOps, idGenerator))); + } + + @Override + public void initialize( + Map<String, String> conf, CatalogInfo info, HasPropertyMetadata propertiesMetadata) + throws RuntimeException { + String location = + (String) + propertiesMetadata + .catalogPropertiesMetadata() + .getOrDefault(conf, Catalog.PROPERTY_LOCATION); + this.catalogLocation = + StringUtils.isNotBlank(location) + ? Optional.of(location).map(this::ensureTrailingSlash) + : Optional.empty(); + this.propertiesMetadata = propertiesMetadata; + } + + @Override + public void close() { + tableFormatCache.cleanUp(); + } + + @Override + public void testConnection( + NameIdentifier catalogIdent, + Catalog.Type type, + String provider, + String comment, + Map<String, String> properties) { + // No-op for generic lakehouse catalog. + } + + @Override + public NameIdentifier[] listSchemas(Namespace namespace) throws NoSuchCatalogException { + return schemaOps.listSchemas(namespace); + } + + @Override + public Schema createSchema(NameIdentifier ident, String comment, Map<String, String> properties) + throws NoSuchCatalogException, SchemaAlreadyExistsException { + return schemaOps.createSchema(ident, comment, properties); + } + + @Override + public Schema loadSchema(NameIdentifier ident) throws NoSuchSchemaException { + return schemaOps.loadSchema(ident); + } + + @Override + public Schema alterSchema(NameIdentifier ident, SchemaChange... changes) + throws NoSuchSchemaException { + return schemaOps.alterSchema(ident, changes); + } + + @Override + public boolean dropSchema(NameIdentifier ident, boolean cascade) throws NonEmptySchemaException { + Namespace tableNs = + Namespace.of(ident.namespace().level(0), ident.namespace().level(1), ident.name()); + NameIdentifier[] tableIdents; + try { + tableIdents = listTables(tableNs); + } catch (NoSuchSchemaException e) { + // If schema does not exist, return false. + return false; + } + + if (!cascade && tableIdents.length > 0) { + throw new NonEmptySchemaException( + "Schema %s is not empty, cannot drop it without cascade", ident); + } + + // Drop all tables under the schema first if cascade is true. + for (NameIdentifier tableIdent : tableIdents) { + tableOps(tableIdent).dropTable(tableIdent); + } + + return schemaOps.dropSchema(ident, cascade); + } + + @Override + public NameIdentifier[] listTables(Namespace namespace) throws NoSuchSchemaException { + // We get the table operations from any cached table ops, since listing tables is not + // format-specific. + ManagedTableOperations tableOps = tableOpsCache.values().iterator().next(); + return tableOps.listTables(namespace); + } + + @Override + public Table loadTable(NameIdentifier ident) throws NoSuchTableException { + return tableOps(ident).loadTable(ident); + } + + @Override + public Table createTable( + NameIdentifier ident, + Column[] columns, + String comment, + Map<String, String> properties, + Transform[] partitions, + Distribution distribution, + SortOrder[] sortOrders, + Index[] indexes) + throws NoSuchSchemaException, TableAlreadyExistsException { + Schema schema = loadSchema(NameIdentifier.of(ident.namespace().levels())); + String tableLocation = calculateTableLocation(schema, ident, properties); + String format = properties.getOrDefault(Table.PROPERTY_TABLE_FORMAT, null); + Preconditions.checkArgument( + format != null, "Table format must be specified in table properties"); + + Map<String, String> newProperties = Maps.newHashMap(properties); + newProperties.put(Table.PROPERTY_LOCATION, tableLocation); + tableFormatCache.put(ident, format.toLowerCase(Locale.ROOT)); + + try { + return tableOps(ident) + .createTable( + ident, + columns, + comment, + newProperties, + partitions, + distribution, + sortOrders, + indexes); + } finally { + tableFormatCache.invalidate(ident); + } + } + + @Override + public Table alterTable(NameIdentifier ident, TableChange... changes) + throws NoSuchTableException, IllegalArgumentException { + return tableOps(ident).alterTable(ident, changes); + } + + @Override + public boolean purgeTable(NameIdentifier ident) { + boolean purged = tableOps(ident).purgeTable(ident); + tableFormatCache.invalidate(ident); + return purged; + } + + @Override + public boolean dropTable(NameIdentifier ident) throws UnsupportedOperationException { + boolean dropped = tableOps(ident).dropTable(ident); + tableFormatCache.invalidate(ident); + return dropped; + } + + private String calculateTableLocation( + Schema schema, NameIdentifier tableIdent, Map<String, String> tableProperties) { + String tableLocation = + (String) + propertiesMetadata + .tablePropertiesMetadata() + .getOrDefault(tableProperties, Table.PROPERTY_LOCATION); + if (StringUtils.isNotBlank(tableLocation)) { + return ensureTrailingSlash(tableLocation); + } + + String schemaLocation = + schema.properties() == null ? null : schema.properties().get(Schema.PROPERTY_LOCATION); + + // If we do not set location in table properties, and schema location is set, use schema + // location as the base path. + if (StringUtils.isNotBlank(schemaLocation)) { + return ensureTrailingSlash(schemaLocation) + tableIdent.name() + SLASH; + } + + // If the schema location is not set, use catalog lakehouse dir as the base path. Or else, throw + // an exception. + if (catalogLocation.isEmpty()) { + throw new IllegalArgumentException( + "'location' property is neither set in table properties " + + "nor in schema properties, and no location is set in catalog properties either. " + + "Please set the 'location' in either of them to create the table " + + tableIdent); + } + + return ensureTrailingSlash(catalogLocation.get()) + + tableIdent.namespace().level(2) + + SLASH + + tableIdent.name() + + SLASH; + } + + private String ensureTrailingSlash(String path) { + return path.endsWith(SLASH) ? path : path + SLASH; + } + + private ManagedTableOperations tableOps(NameIdentifier tableIdent) { + try { + String tableFormat = + tableFormatCache.get( + tableIdent, + () -> { + try { + TableEntity table = store.get(tableIdent, TABLE, TableEntity.class); + String format = + table.properties().getOrDefault(Table.PROPERTY_TABLE_FORMAT, null); + Preconditions.checkArgument( + format != null, + "Table format for %s is null, this is unexpected", + tableIdent); + + return format.toLowerCase(Locale.ROOT); + } catch (IOException e) { + throw new RuntimeException("Failed to get table format for " + tableIdent, e); + } catch (NoSuchEntityException e) { + throw new NoSuchTableException(e, "Table %s does not exist", tableIdent); + } + }); + + ManagedTableOperations ops = tableOpsCache.get(tableFormat); + Preconditions.checkArgument( + ops != null, "No table operations found for table format %s", tableFormat); + return ops; + + } catch (Exception e) { Review Comment: The method `tableOps()` catches a generic `Exception` on line 327 and wraps it in a `RuntimeException`. This loses exception context and makes debugging harder. Consider handling specific exceptions separately: - `NoSuchEntityException` is already handled - `IOException` from store operations should be handled explicitly - `ExecutionException` from the cache should be unwrapped to provide the actual cause This would provide clearer error messages and better exception handling for different failure scenarios. ```suggestion } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof NoSuchTableException) { throw (NoSuchTableException) cause; } else if (cause instanceof IOException) { throw new RuntimeException("Failed to get table operations for " + tableIdent, cause); } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else { throw new RuntimeException("Failed to get table operations for " + tableIdent, cause); } } catch (IOException e) { ``` ########## catalogs/catalog-lakehouse-generic/src/main/java/org/apache/gravitino/catalog/lakehouse/lance/LanceTableOperations.java: ########## @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.lance; + +import com.google.common.base.Preconditions; +import com.lancedb.lance.Dataset; +import com.lancedb.lance.WriteParams; +import com.lancedb.lance.index.DistanceType; +import com.lancedb.lance.index.IndexParams; +import com.lancedb.lance.index.IndexType; +import com.lancedb.lance.index.vector.VectorIndexParams; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.EntityStore; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.catalog.ManagedSchemaOperations; +import org.apache.gravitino.catalog.ManagedTableOperations; +import org.apache.gravitino.connector.SupportsSchemas; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.exceptions.NoSuchTableException; +import org.apache.gravitino.exceptions.TableAlreadyExistsException; +import org.apache.gravitino.lance.common.ops.gravitino.LanceDataTypeConverter; +import org.apache.gravitino.lance.common.utils.LancePropertiesUtils; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.Table; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distribution; +import org.apache.gravitino.rel.expressions.sorts.SortOrder; +import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.rel.indexes.Indexes; +import org.apache.gravitino.storage.IdGenerator; + +public class LanceTableOperations extends ManagedTableOperations { + + private final EntityStore store; + + private final ManagedSchemaOperations schemaOps; + + private final IdGenerator idGenerator; + + public LanceTableOperations( + EntityStore store, ManagedSchemaOperations schemaOps, IdGenerator idGenerator) { + this.store = store; + this.schemaOps = schemaOps; + this.idGenerator = idGenerator; + } + + @Override + protected EntityStore store() { + return store; + } + + @Override + protected SupportsSchemas schemas() { + return schemaOps; + } + + @Override + protected IdGenerator idGenerator() { + return idGenerator; + } + + @Override + public Table createTable( + NameIdentifier ident, + Column[] columns, + String comment, + Map<String, String> properties, + Transform[] partitions, + Distribution distribution, + SortOrder[] sortOrders, + Index[] indexes) + throws NoSuchSchemaException, TableAlreadyExistsException { + String location = properties.get(Table.PROPERTY_LOCATION); + Preconditions.checkArgument( + StringUtils.isNotBlank(location), "Table location must be specified"); + Map<String, String> storageProps = LancePropertiesUtils.getLanceStorageOptions(properties); + + boolean register = + Optional.ofNullable(properties.get(LanceTableDelegator.PROPERTY_LANCE_TABLE_REGISTER)) + .map(Boolean::parseBoolean) + .orElse(false); + if (register) { + // If this is a registration operation, just create the table metadata without creating a new + // dataset + return super.createTable( + ident, columns, comment, properties, partitions, distribution, sortOrders, indexes); + } + + try (Dataset ignored = + Dataset.create( + new RootAllocator(), + location, + convertColumnsToArrowSchema(columns), + new WriteParams.Builder().withStorageOptions(storageProps).build())) { + // Only the lance table is created will we create the table metadata in Gravitino. Review Comment: The comment on line 119 states "Only the lance table is created will we create the table metadata in Gravitino." This has grammatical issues. Consider rephrasing to: ```java // Only create the table metadata in Gravitino after the Lance dataset is successfully created. ``` ```suggestion // Only create the table metadata in Gravitino after the Lance dataset is successfully created. ``` ########## catalogs/catalog-lakehouse-generic/src/main/java/org/apache/gravitino/catalog/lakehouse/generic/GenericCatalogOperations.java: ########## @@ -0,0 +1,331 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.generic; + +import static org.apache.gravitino.Entity.EntityType.TABLE; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.Maps; +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.Catalog; +import org.apache.gravitino.EntityStore; +import org.apache.gravitino.GravitinoEnv; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.Namespace; +import org.apache.gravitino.Schema; +import org.apache.gravitino.SchemaChange; +import org.apache.gravitino.catalog.ManagedSchemaOperations; +import org.apache.gravitino.catalog.ManagedTableOperations; +import org.apache.gravitino.connector.CatalogInfo; +import org.apache.gravitino.connector.CatalogOperations; +import org.apache.gravitino.connector.HasPropertyMetadata; +import org.apache.gravitino.connector.SupportsSchemas; +import org.apache.gravitino.exceptions.NoSuchCatalogException; +import org.apache.gravitino.exceptions.NoSuchEntityException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.exceptions.NoSuchTableException; +import org.apache.gravitino.exceptions.NonEmptySchemaException; +import org.apache.gravitino.exceptions.SchemaAlreadyExistsException; +import org.apache.gravitino.exceptions.TableAlreadyExistsException; +import org.apache.gravitino.meta.TableEntity; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.Table; +import org.apache.gravitino.rel.TableCatalog; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distribution; +import org.apache.gravitino.rel.expressions.sorts.SortOrder; +import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.storage.IdGenerator; + +/** Operations for interacting with a generic lakehouse catalog in Apache Gravitino. */ +public class GenericCatalogOperations implements CatalogOperations, SupportsSchemas, TableCatalog { + + private static final String SLASH = "/"; + + private final ManagedSchemaOperations schemaOps; + + private final Map<String, ManagedTableOperations> tableOpsCache; + + private Optional<String> catalogLocation; + + private HasPropertyMetadata propertiesMetadata; + + private final Cache<NameIdentifier, String> tableFormatCache; + + private final EntityStore store; + + public GenericCatalogOperations() { + this(GravitinoEnv.getInstance().entityStore(), GravitinoEnv.getInstance().idGenerator()); + } + + @VisibleForTesting + GenericCatalogOperations(EntityStore store, IdGenerator idGenerator) { + this.store = store; + + this.schemaOps = + new ManagedSchemaOperations() { + @Override + protected EntityStore store() { + return store; + } + }; + + this.tableFormatCache = CacheBuilder.newBuilder().maximumSize(1000).build(); + + Map<String, LakehouseTableDelegator> tableDelegators = + LakehouseTableDelegatorFactory.tableDelegators(); + tableOpsCache = + tableDelegators.entrySet().stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + e -> e.getValue().createTableOps(store, schemaOps, idGenerator))); Review Comment: The `tableOpsCache` is initialized as a plain `Map`, but concurrent access could occur if multiple threads create tables with different formats simultaneously. Consider using `ConcurrentHashMap` or making this field final with an immutable map since it's populated once during initialization: ```java private final Map<String, ManagedTableOperations> tableOpsCache; ``` And in the constructor: ```java tableOpsCache = Collections.unmodifiableMap( tableDelegators.entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, e -> e.getValue().createTableOps(store, schemaOps, idGenerator)))); ``` ```suggestion java.util.Collections.unmodifiableMap( tableDelegators.entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, e -> e.getValue().createTableOps(store, schemaOps, idGenerator)))); ``` ########## catalogs/catalog-lakehouse-generic/src/main/java/org/apache/gravitino/catalog/lakehouse/lance/LanceTableOperations.java: ########## @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.catalog.lakehouse.lance; + +import com.google.common.base.Preconditions; +import com.lancedb.lance.Dataset; +import com.lancedb.lance.WriteParams; +import com.lancedb.lance.index.DistanceType; +import com.lancedb.lance.index.IndexParams; +import com.lancedb.lance.index.IndexType; +import com.lancedb.lance.index.vector.VectorIndexParams; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.commons.lang3.StringUtils; +import org.apache.gravitino.EntityStore; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.catalog.ManagedSchemaOperations; +import org.apache.gravitino.catalog.ManagedTableOperations; +import org.apache.gravitino.connector.SupportsSchemas; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.exceptions.NoSuchTableException; +import org.apache.gravitino.exceptions.TableAlreadyExistsException; +import org.apache.gravitino.lance.common.ops.gravitino.LanceDataTypeConverter; +import org.apache.gravitino.lance.common.utils.LancePropertiesUtils; +import org.apache.gravitino.rel.Column; +import org.apache.gravitino.rel.Table; +import org.apache.gravitino.rel.TableChange; +import org.apache.gravitino.rel.expressions.distributions.Distribution; +import org.apache.gravitino.rel.expressions.sorts.SortOrder; +import org.apache.gravitino.rel.expressions.transforms.Transform; +import org.apache.gravitino.rel.indexes.Index; +import org.apache.gravitino.rel.indexes.Indexes; +import org.apache.gravitino.storage.IdGenerator; + +public class LanceTableOperations extends ManagedTableOperations { + + private final EntityStore store; + + private final ManagedSchemaOperations schemaOps; + + private final IdGenerator idGenerator; + + public LanceTableOperations( + EntityStore store, ManagedSchemaOperations schemaOps, IdGenerator idGenerator) { + this.store = store; + this.schemaOps = schemaOps; + this.idGenerator = idGenerator; + } + + @Override + protected EntityStore store() { + return store; + } + + @Override + protected SupportsSchemas schemas() { + return schemaOps; + } + + @Override + protected IdGenerator idGenerator() { + return idGenerator; + } + + @Override + public Table createTable( + NameIdentifier ident, + Column[] columns, + String comment, + Map<String, String> properties, + Transform[] partitions, + Distribution distribution, + SortOrder[] sortOrders, + Index[] indexes) + throws NoSuchSchemaException, TableAlreadyExistsException { + String location = properties.get(Table.PROPERTY_LOCATION); + Preconditions.checkArgument( + StringUtils.isNotBlank(location), "Table location must be specified"); + Map<String, String> storageProps = LancePropertiesUtils.getLanceStorageOptions(properties); + + boolean register = + Optional.ofNullable(properties.get(LanceTableDelegator.PROPERTY_LANCE_TABLE_REGISTER)) + .map(Boolean::parseBoolean) + .orElse(false); + if (register) { + // If this is a registration operation, just create the table metadata without creating a new + // dataset + return super.createTable( + ident, columns, comment, properties, partitions, distribution, sortOrders, indexes); + } + + try (Dataset ignored = + Dataset.create( + new RootAllocator(), + location, + convertColumnsToArrowSchema(columns), + new WriteParams.Builder().withStorageOptions(storageProps).build())) { + // Only the lance table is created will we create the table metadata in Gravitino. + return super.createTable( + ident, columns, comment, properties, partitions, distribution, sortOrders, indexes); + } catch (Exception e) { + throw new RuntimeException("Failed to create Lance dataset at location " + location, e); + } + } + + @Override + public Table alterTable(NameIdentifier ident, TableChange... changes) + throws NoSuchSchemaException, TableAlreadyExistsException { + // Lance only supports adding indexes for now. + boolean onlyAddIndex = + Arrays.stream(changes).allMatch(change -> change instanceof TableChange.AddIndex); + Preconditions.checkArgument(onlyAddIndex, "Only adding indexes is supported for Lance tables"); + + List<Index> addedIndexes = + Arrays.stream(changes) + .filter(change -> change instanceof TableChange.AddIndex) + .map( + change -> { + TableChange.AddIndex addIndexChange = (TableChange.AddIndex) change; + return Indexes.IndexImpl.builder() + .withIndexType(addIndexChange.getType()) + .withName(addIndexChange.getName()) + .withFieldNames(addIndexChange.getFieldNames()) + .build(); + }) + .collect(Collectors.toList()); + + Table loadedTable = super.loadTable(ident); + addLanceIndex(loadedTable, addedIndexes); + // After adding the index to the Lance dataset, we need to update the table metadata in + // Gravitino. If there's any failure during this process, the code will throw an exception + // and the update won't be applied in Gravitino. + return super.alterTable(ident, changes); + } + + @Override + public boolean purgeTable(NameIdentifier ident) { + try { + Table table = loadTable(ident); + String location = table.properties().get(Table.PROPERTY_LOCATION); + + // Delete the Lance dataset at the location + Dataset.drop(location, LancePropertiesUtils.getLanceStorageOptions(table.properties())); + // After deleting the dataset, we can delete the table metadata in Gravitino + return super.purgeTable(ident); + + } catch (NoSuchTableException e) { + return false; + } catch (Exception e) { + throw new RuntimeException("Failed to purge Lance dataset for table " + ident, e); + } + } + + @Override + public boolean dropTable(NameIdentifier ident) { + try { + Table table = loadTable(ident); + boolean external = + Optional.ofNullable(table.properties().get(Table.PROPERTY_EXTERNAL)) + .map(Boolean::parseBoolean) + .orElse(false); + + if (external) { + // If the table is external, we only drop the table metadata in Gravitino + return super.dropTable(ident); + } + + String location = table.properties().get(Table.PROPERTY_LOCATION); + + // Delete the Lance dataset at the location + Dataset.drop(location, LancePropertiesUtils.getLanceStorageOptions(table.properties())); + // After deleting the dataset, we can delete the table metadata in Gravitino + return super.dropTable(ident); + + } catch (NoSuchTableException e) { + return false; + } catch (Exception e) { + throw new RuntimeException("Failed to drop Lance dataset for table " + ident, e); + } + } + + private org.apache.arrow.vector.types.pojo.Schema convertColumnsToArrowSchema(Column[] columns) { + List<Field> fields = + Arrays.stream(columns) + .map( + col -> + LanceDataTypeConverter.CONVERTER.toArrowField( + col.name(), col.dataType(), col.nullable())) + .collect(Collectors.toList()); + return new org.apache.arrow.vector.types.pojo.Schema(fields); + } + + private void addLanceIndex(Table table, List<Index> addedIndexes) { + String location = table.properties().get(Table.PROPERTY_LOCATION); + try (Dataset dataset = Dataset.open(location, new RootAllocator())) { + // For Lance, we only support adding indexes, so in fact, we can't handle drop index here. + for (Index index : addedIndexes) { + IndexType indexType = IndexType.valueOf(index.type().name()); + IndexParams indexParams = getIndexParamsByIndexType(indexType); + + dataset.createIndex( + Arrays.stream(index.fieldNames()) + .map(fieldPath -> String.join(".", fieldPath)) + .collect(Collectors.toList()), + indexType, + Optional.of(index.name()), + indexParams, + true); + } + } catch (Exception e) { + throw new RuntimeException( + "Failed to add indexes to Lance dataset at location " + location, e); + } + } + + private IndexParams getIndexParamsByIndexType(IndexType indexType) { + switch (indexType) { + case SCALAR: + return new IndexParams.Builder().build(); + case VECTOR: + // TODO make these parameters configurable + int numberOfDimensions = 3; // this value should be determined dynamically based on the data + // Add properties to Index to set this value. + return new IndexParams.Builder() + .setVectorIndexParams( + VectorIndexParams.ivfPq(2, 8, numberOfDimensions, DistanceType.L2, 2)) + .build(); Review Comment: The hardcoded `numberOfDimensions = 3` on line 243 with a TODO comment indicates incomplete implementation. This is problematic because: 1. Vector dimensions should match the actual data dimensions 2. Using an incorrect dimension count will cause runtime failures when creating indexes 3. The comment suggests this should be configurable via Index properties, but there's no validation Consider either: - Throwing an UnsupportedOperationException with a clear message until this is properly implemented - Or requiring the dimension to be specified in the index properties and validating it -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
