Copilot commented on code in PR #2086:
URL: https://github.com/apache/auron/pull/2086#discussion_r2924029898


##########
auron-flink-extension/auron-flink-runtime/src/main/java/org/apache/auron/flink/arrow/writers/TimeWriter.java:
##########
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.auron.flink.arrow.writers;
+
+import org.apache.arrow.vector.BaseFixedWidthVector;
+import org.apache.arrow.vector.TimeMicroVector;
+import org.apache.arrow.vector.TimeMilliVector;
+import org.apache.arrow.vector.TimeNanoVector;
+import org.apache.arrow.vector.TimeSecVector;
+import org.apache.arrow.vector.ValueVector;
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.RowData;
+
+/**
+ * {@link ArrowFieldWriter} for time values stored in Arrow time vectors.
+ *
+ * <p>Supports all four Arrow time precisions: {@link TimeSecVector}, {@link 
TimeMilliVector}, {@link
+ * TimeMicroVector}, and {@link TimeNanoVector}. Flink internally stores TIME 
values as milliseconds
+ * in an {@code int}; this writer converts to the target precision on write.
+ *
+ * <p>Use {@link #forRow(ValueVector)} when writing from {@link RowData} and 
{@link
+ * #forArray(ValueVector)} when writing from {@link ArrayData}.
+ *
+ * @param <T> the input data type
+ */
+public abstract class TimeWriter<T> extends ArrowFieldWriter<T> {
+
+    /** Creates a TimeWriter that reads from {@link RowData}. */
+    public static TimeWriter<RowData> forRow(ValueVector valueVector) {
+        return new TimeWriterForRow(valueVector);
+    }
+
+    /** Creates a TimeWriter that reads from {@link ArrayData}. */
+    public static TimeWriter<ArrayData> forArray(ValueVector valueVector) {
+        return new TimeWriterForArray(valueVector);
+    }
+
+    private TimeWriter(ValueVector valueVector) {
+        super(valueVector);
+    }
+
+    abstract boolean isNullAt(T in, int ordinal);
+
+    abstract int readTime(T in, int ordinal);
+
+    @Override
+    public void doWrite(T in, int ordinal) {
+        ValueVector vector = getValueVector();
+        if (isNullAt(in, ordinal)) {
+            ((BaseFixedWidthVector) vector).setNull(getCount());
+        } else {
+            int millis = readTime(in, ordinal);

Review Comment:
   TimeWriter currently assumes the provided ValueVector is a time vector: it 
unconditionally casts to BaseFixedWidthVector for nulls, and if the vector 
isn't one of the 4 supported Time*Vector types the non-null branch will 
silently write nothing. It would be safer to validate the vector type up-front 
(similar to TimestampWriter's Preconditions check) and/or throw an exception in 
the final else branch to avoid silent data loss or ClassCastException when 
misused.



##########
auron-flink-extension/auron-flink-runtime/src/main/java/org/apache/auron/flink/arrow/writers/MapWriter.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.auron.flink.arrow.writers;
+
+import java.util.Objects;
+import org.apache.arrow.vector.complex.MapVector;
+import org.apache.arrow.vector.complex.StructVector;
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.MapData;
+import org.apache.flink.table.data.RowData;
+
+/**
+ * {@link ArrowFieldWriter} for maps ({@link MapVector}).
+ *
+ * <p>Arrow represents maps as {@code List<Struct{key, value}>}. This writer 
holds separate key and
+ * value writers that operate on {@link ArrayData} (from {@link 
MapData#keyArray()} and {@link
+ * MapData#valueArray()}).
+ *
+ * @param <T> the input data type
+ */
+public abstract class MapWriter<T> extends ArrowFieldWriter<T> {
+
+    /** Creates a MapWriter that reads from {@link RowData}. */
+    public static MapWriter<RowData> forRow(
+            MapVector mapVector, ArrowFieldWriter<ArrayData> keyWriter, 
ArrowFieldWriter<ArrayData> valueWriter) {
+        return new MapWriterForRow(mapVector, keyWriter, valueWriter);
+    }
+
+    /** Creates a MapWriter that reads from {@link ArrayData}. */
+    public static MapWriter<ArrayData> forArray(
+            MapVector mapVector, ArrowFieldWriter<ArrayData> keyWriter, 
ArrowFieldWriter<ArrayData> valueWriter) {
+        return new MapWriterForArray(mapVector, keyWriter, valueWriter);
+    }
+
+    // 
------------------------------------------------------------------------------------------
+
+    private final ArrowFieldWriter<ArrayData> keyWriter;
+    private final ArrowFieldWriter<ArrayData> valueWriter;
+
+    private MapWriter(
+            MapVector mapVector, ArrowFieldWriter<ArrayData> keyWriter, 
ArrowFieldWriter<ArrayData> valueWriter) {
+        super(mapVector);
+        this.keyWriter = Objects.requireNonNull(keyWriter);
+        this.valueWriter = Objects.requireNonNull(valueWriter);
+    }
+
+    abstract boolean isNullAt(T in, int ordinal);
+
+    abstract MapData readMap(T in, int ordinal);
+
+    @Override
+    public void doWrite(T in, int ordinal) {
+        if (!isNullAt(in, ordinal)) {
+            ((MapVector) getValueVector()).startNewValue(getCount());
+
+            StructVector structVector = (StructVector) ((MapVector) 
getValueVector()).getDataVector();
+            MapData map = readMap(in, ordinal);
+            ArrayData keys = map.keyArray();
+            ArrayData values = map.valueArray();
+            for (int i = 0; i < map.size(); i++) {
+                structVector.setIndexDefined(keyWriter.getCount());
+                keyWriter.write(keys, i);
+                valueWriter.write(values, i);
+            }
+
+            ((MapVector) getValueVector()).endValue(getCount(), map.size());
+        }

Review Comment:
   MapWriter.doWrite() skips writing entirely when the input map is null. For 
MapVector/ListVector semantics this can leave the current position undefined 
with stale validity/offsets from earlier writes (especially across reset()), 
causing incorrect non-null values or invalid offsets. Add an explicit null 
branch that marks the current index as null (and keeps offsets consistent) 
before returning.
   ```suggestion
           MapVector mapVector = (MapVector) getValueVector();
   
           // Explicitly mark null entries to avoid leaving stale 
validity/offset state.
           if (isNullAt(in, ordinal)) {
               mapVector.setNull(getCount());
               return;
           }
   
           mapVector.startNewValue(getCount());
   
           StructVector structVector = (StructVector) mapVector.getDataVector();
           MapData map = readMap(in, ordinal);
           ArrayData keys = map.keyArray();
           ArrayData values = map.valueArray();
           for (int i = 0; i < map.size(); i++) {
               structVector.setIndexDefined(keyWriter.getCount());
               keyWriter.write(keys, i);
               valueWriter.write(values, i);
           }
   
           mapVector.endValue(getCount(), map.size());
   ```



##########
auron-flink-extension/auron-flink-runtime/src/main/java/org/apache/auron/flink/arrow/writers/ArrayWriter.java:
##########
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.auron.flink.arrow.writers;
+
+import java.util.Objects;
+import org.apache.arrow.vector.complex.ListVector;
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.RowData;
+
+/**
+ * {@link ArrowFieldWriter} for arrays ({@link ListVector}).
+ *
+ * <p>Holds an {@code elementWriter} that writes each array element. The 
element writer operates on
+ * {@link ArrayData} since array elements are accessed via {@link ArrayData} 
interface.
+ *
+ * @param <T> the input data type
+ */
+public abstract class ArrayWriter<T> extends ArrowFieldWriter<T> {
+
+    /** Creates an ArrayWriter that reads from {@link RowData}. */
+    public static ArrayWriter<RowData> forRow(ListVector listVector, 
ArrowFieldWriter<ArrayData> elementWriter) {
+        return new ArrayWriterForRow(listVector, elementWriter);
+    }
+
+    /** Creates an ArrayWriter that reads from {@link ArrayData}. */
+    public static ArrayWriter<ArrayData> forArray(ListVector listVector, 
ArrowFieldWriter<ArrayData> elementWriter) {
+        return new ArrayWriterForArray(listVector, elementWriter);
+    }
+
+    // 
------------------------------------------------------------------------------------------
+
+    private final ArrowFieldWriter<ArrayData> elementWriter;
+
+    private ArrayWriter(ListVector listVector, ArrowFieldWriter<ArrayData> 
elementWriter) {
+        super(listVector);
+        this.elementWriter = Objects.requireNonNull(elementWriter);
+    }
+
+    abstract boolean isNullAt(T in, int ordinal);
+
+    abstract ArrayData readArray(T in, int ordinal);
+
+    @Override
+    public void doWrite(T in, int ordinal) {
+        if (!isNullAt(in, ordinal)) {
+            ((ListVector) getValueVector()).startNewValue(getCount());
+            ArrayData array = readArray(in, ordinal);
+            for (int i = 0; i < array.size(); i++) {

Review Comment:
   ArrayWriter.doWrite() does nothing when the input value is null. For 
ListVector this is unsafe: you still need to explicitly mark the current index 
as null (and ensure offsets for index+1 are updated), otherwise old 
validity/offset buffer contents from a previous batch can leak after reset(), 
producing incorrect non-null reads or invalid offsets. Add an explicit null 
branch (e.g., listVector.setNull(getCount()) / setSafeNull-style handling) so 
every written position is defined.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to