[GitHub] [flink-table-store] JingsongLi commented on a diff in pull request #145: [FLINK-27875] Introduce TableScan and TableRead as an abstraction layer above FileStore for reading RowData

2022-06-02 Thread GitBox


JingsongLi commented on code in PR #145:
URL: https://github.com/apache/flink-table-store/pull/145#discussion_r887643249


##
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/source/TableScan.java:
##
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.table.source;
+
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.table.data.binary.BinaryRowData;
+import org.apache.flink.table.store.file.data.DataFileMeta;
+import org.apache.flink.table.store.file.operation.FileStoreScan;
+import org.apache.flink.table.store.file.predicate.And;
+import org.apache.flink.table.store.file.predicate.CompoundPredicate;
+import org.apache.flink.table.store.file.predicate.LeafPredicate;
+import org.apache.flink.table.store.file.predicate.Predicate;
+import org.apache.flink.table.store.file.predicate.PredicateBuilder;
+import org.apache.flink.table.store.file.schema.Schema;
+import org.apache.flink.table.store.file.utils.FileStorePathFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+/** An abstraction layer above {@link FileStoreScan} to provide input split 
generation. */
+public abstract class TableScan {
+
+protected final FileStoreScan scan;
+private final int[] fieldIdxToPartitionIdx;

Review Comment:
   The `fieldIdxToPartitionIdx` can be a local field. It looks a bit strange. 
We don't need to reuse it in class member.
   We can just store a schema.



##
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/source/TableScan.java:
##
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.table.source;
+
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.table.data.binary.BinaryRowData;
+import org.apache.flink.table.store.file.data.DataFileMeta;
+import org.apache.flink.table.store.file.operation.FileStoreScan;
+import org.apache.flink.table.store.file.predicate.And;
+import org.apache.flink.table.store.file.predicate.CompoundPredicate;
+import org.apache.flink.table.store.file.predicate.LeafPredicate;
+import org.apache.flink.table.store.file.predicate.Predicate;
+import org.apache.flink.table.store.file.predicate.PredicateBuilder;
+import org.apache.flink.table.store.file.schema.Schema;
+import org.apache.flink.table.store.file.utils.FileStorePathFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+/** An abstraction layer above {@link FileStoreScan} to provide input split 
generation. */
+public abstract class TableScan {
+
+protected final FileStoreScan scan;
+private final int[] fieldIdxToPartitionIdx;
+private final FileStorePathFactory pathFactory;
+
+protected TableScan(FileStoreScan scan, Schema schema, 
FileStorePathFactory pathFactory) {
+this.scan = scan;
+List partitionKeys = schema.partitionKeys();
+this.fieldIdxToPartitionIdx =
+schema.fields().stream().mapToInt(f -> 
partitionKeys.indexOf(f.name())).toArray();
+this.pathFactory = pathFactory;
+}
+
+public TableScan withSnapshot(long snapshotId) {
+scan.withSnapshot(snapshotId);
+return this;
+}
+
+public TableScan withFilter(Predicate predicate) {
+List partitionFilters = new ArrayList<>();
+

[GitHub] [flink-table-store] JingsongLi commented on a diff in pull request #145: [FLINK-27875] Introduce TableScan and TableRead as an abstraction layer above FileStore for reading RowData

2022-06-02 Thread GitBox


JingsongLi commented on code in PR #145:
URL: https://github.com/apache/flink-table-store/pull/145#discussion_r887754365


##
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/AppendOnlyFileStoreTable.java:
##
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.table;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.store.file.FileStore;
+import org.apache.flink.table.store.file.FileStoreImpl;
+import org.apache.flink.table.store.file.FileStoreOptions;
+import org.apache.flink.table.store.file.KeyValue;
+import org.apache.flink.table.store.file.WriteMode;
+import org.apache.flink.table.store.file.operation.FileStoreRead;
+import org.apache.flink.table.store.file.operation.FileStoreScan;
+import org.apache.flink.table.store.file.predicate.Predicate;
+import org.apache.flink.table.store.file.schema.Schema;
+import org.apache.flink.table.store.table.source.TableRead;
+import org.apache.flink.table.store.table.source.TableScan;
+import org.apache.flink.table.store.table.source.ValueContentRowDataIterator;
+import org.apache.flink.table.types.logical.RowType;
+
+import java.util.Iterator;
+
+/** {@link FileStoreTable} for {@link WriteMode#APPEND_ONLY} write mode. */
+public class AppendOnlyFileStoreTable implements FileStoreTable {
+
+private final Schema schema;
+private final FileStoreImpl store;
+
+AppendOnlyFileStoreTable(Schema schema, Configuration conf, String user) {
+this.schema = schema;
+this.store =
+new FileStoreImpl(
+schema.id(),
+new FileStoreOptions(conf),
+WriteMode.APPEND_ONLY,
+user,
+schema.logicalPartitionType(),
+RowType.of(),
+schema.logicalRowType(),
+null);
+}
+
+@Override
+public TableScan newScan(boolean isStreaming) {
+FileStoreScan scan = store.newScan();
+if (isStreaming) {
+scan.withIncremental(true);
+}
+
+return new TableScan(scan, schema, store.pathFactory()) {
+@Override
+protected void withNonPartitionFilter(Predicate predicate) {
+scan.withValueFilter(predicate);
+}
+};
+}
+
+@Override
+public TableRead newRead(boolean isStreaming) {
+FileStoreRead read = store.newRead();
+if (isStreaming) {
+read.withDropDelete(false);

Review Comment:
   Yes, but there is a check in `FileStoreReadImpl`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org