This is an automated email from the ASF dual-hosted git repository.

kerwin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new 88df0f0480 [csv] Csv Parser should not depends on jackson (#6508)
88df0f0480 is described below

commit 88df0f0480d94483bdfb378de18b41686d0bf061
Author: Jingsong Lee <[email protected]>
AuthorDate: Mon Nov 3 11:27:18 2025 +0800

    [csv] Csv Parser should not depends on jackson (#6508)
---
 .../apache/paimon/format/csv/CsvFileReader.java    | 174 +--------------
 .../org/apache/paimon/format/csv/CsvParser.java    | 236 +++++++++++++++++++++
 .../apache/paimon/format/json/JsonFileReader.java  |  10 -
 .../paimon/format/text/BaseTextFileReader.java     |  46 ++--
 .../paimon/format/csv/CsvFileFormatTest.java       |  35 ++-
 5 files changed, 305 insertions(+), 196 deletions(-)

diff --git 
a/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvFileReader.java 
b/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvFileReader.java
index daa9dc5659..d8c9eef35a 100644
--- 
a/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvFileReader.java
+++ 
b/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvFileReader.java
@@ -18,43 +18,20 @@
 
 package org.apache.paimon.format.csv;
 
-import org.apache.paimon.casting.CastExecutor;
-import org.apache.paimon.casting.CastExecutors;
-import org.apache.paimon.data.BinaryString;
-import org.apache.paimon.data.GenericRow;
 import org.apache.paimon.data.InternalRow;
 import org.apache.paimon.format.text.BaseTextFileReader;
 import org.apache.paimon.fs.FileIO;
 import org.apache.paimon.fs.Path;
-import org.apache.paimon.types.DataType;
-import org.apache.paimon.types.DataTypeRoot;
-import org.apache.paimon.types.DataTypes;
 import org.apache.paimon.types.RowType;
 
-import 
org.apache.paimon.shade.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper;
-import 
org.apache.paimon.shade.jackson2.com.fasterxml.jackson.dataformat.csv.CsvSchema;
-
 import java.io.IOException;
-import java.util.Base64;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 
 /** CSV file reader implementation. */
 public class CsvFileReader extends BaseTextFileReader {
 
-    private static final Base64.Decoder BASE64_DECODER = Base64.getDecoder();
-    private static final CsvMapper CSV_MAPPER = new CsvMapper();
-    private static final InternalRow DROP_ROW = new GenericRow(1);
-
-    // Performance optimization: Cache frequently used cast executors
-    private static final Map<String, CastExecutor<?, ?>> CAST_EXECUTOR_CACHE =
-            new ConcurrentHashMap<>(32);
+    private final boolean includeHeader;
+    private final CsvParser csvParser;
 
-    private final CsvOptions formatOptions;
-    private final CsvSchema schema;
-    private final RowType dataSchemaRowType;
-    private final RowType projectedRowType;
-    private final int[] projectionMapping;
     private boolean headerSkipped = false;
 
     public CsvFileReader(
@@ -65,69 +42,28 @@ public class CsvFileReader extends BaseTextFileReader {
             CsvOptions options)
             throws IOException {
         super(fileIO, filePath, projectedRowType);
-        this.dataSchemaRowType = rowReadType;
-        this.projectedRowType = projectedRowType;
-        this.formatOptions = options;
-        this.projectionMapping = createProjectionMapping(rowReadType, 
projectedRowType);
-        this.schema =
-                CsvSchema.emptySchema()
-                        
.withQuoteChar(formatOptions.quoteCharacter().charAt(0))
-                        
.withColumnSeparator(formatOptions.fieldDelimiter().charAt(0))
-                        
.withEscapeChar(formatOptions.escapeCharacter().charAt(0));
-        if (!formatOptions.includeHeader()) {
-            this.schema.withoutHeader();
-        }
-    }
-
-    @Override
-    protected BaseTextRecordIterator createRecordIterator() {
-        return new CsvRecordIterator();
+        this.includeHeader = options.includeHeader();
+        this.csvParser =
+                new CsvParser(
+                        rowReadType,
+                        createProjectionMapping(rowReadType, projectedRowType),
+                        options);
     }
 
     @Override
-    protected InternalRow parseLine(String line) throws IOException {
-        return parseCsvLine(line, schema);
+    protected InternalRow parseLine(String line) {
+        return csvParser.parse(line);
     }
 
     @Override
     protected void setupReading() throws IOException {
         // Skip header if needed
-        if (formatOptions.includeHeader() && !headerSkipped) {
+        if (includeHeader && !headerSkipped) {
             bufferedReader.readLine();
             headerSkipped = true;
         }
     }
 
-    private class CsvRecordIterator extends BaseTextRecordIterator {
-        @Override
-        public InternalRow next() throws IOException {
-            while (true) {
-                if (readerClosed) {
-                    return null;
-                }
-                String nextLine = bufferedReader.readLine();
-                if (nextLine == null) {
-                    end = true;
-                    return null;
-                }
-
-                currentPosition++;
-                InternalRow row = parseLine(nextLine);
-                if (row != DROP_ROW) {
-                    return row;
-                }
-            }
-        }
-    }
-
-    protected static String[] parseCsvLineToArray(String line, CsvSchema 
schema)
-            throws IOException {
-        if (line == null || line.isEmpty()) {
-            return new String[] {};
-        }
-        return 
CSV_MAPPER.readerFor(String[].class).with(schema).readValue(line);
-    }
-
     /**
      * Creates a mapping array from read schema to projected schema. Returns 
indices of projected
      * columns in the read schema.
@@ -147,92 +83,4 @@ public class CsvFileReader extends BaseTextFileReader {
         }
         return mapping;
     }
-
-    private InternalRow parseCsvLine(String line, CsvSchema schema) throws 
IOException {
-        String[] fields = parseCsvLineToArray(line, schema);
-        int fieldCount = fields.length;
-
-        // Directly parse only projected fields to avoid unnecessary parsing
-        Object[] projectedValues = new 
Object[projectedRowType.getFieldCount()];
-        for (int i = 0; i < projectedRowType.getFieldCount(); i++) {
-            int readIndex = projectionMapping[i];
-            // Check if the field exists in the CSV line
-            if (readIndex < fieldCount) {
-                String field = fields[readIndex];
-                // Fast path for null values - check if field is null or empty 
first
-                if (field == null || field.isEmpty() || 
field.equals(formatOptions.nullLiteral())) {
-                    projectedValues[i] = null;
-                    continue;
-                }
-
-                // Optimized field parsing with cached cast executors
-                try {
-                    projectedValues[i] =
-                            parseFieldOptimized(
-                                    field.trim(), 
dataSchemaRowType.getTypeAt(readIndex));
-                } catch (Exception e) {
-                    switch (formatOptions.mode()) {
-                        case PERMISSIVE:
-                            projectedValues[i] = null;
-                            break;
-                        case DROPMALFORMED:
-                            return DROP_ROW;
-                        case FAILFAST:
-                            throw e;
-                    }
-                }
-            } else {
-                projectedValues[i] = null; // Field not present in the CSV line
-            }
-        }
-
-        return GenericRow.of(projectedValues);
-    }
-
-    /** Optimized field parsing with caching and fast paths for common types. 
*/
-    private Object parseFieldOptimized(String field, DataType dataType) {
-        if (field == null || field.equals(formatOptions.nullLiteral())) {
-            return null;
-        }
-
-        DataTypeRoot typeRoot = dataType.getTypeRoot();
-        switch (typeRoot) {
-            case TINYINT:
-                return Byte.parseByte(field);
-            case SMALLINT:
-                return Short.parseShort(field);
-            case INTEGER:
-                return Integer.parseInt(field);
-            case BIGINT:
-                return Long.parseLong(field);
-            case FLOAT:
-                return Float.parseFloat(field);
-            case DOUBLE:
-                return Double.parseDouble(field);
-            case BOOLEAN:
-                return Boolean.parseBoolean(field);
-            case CHAR:
-            case VARCHAR:
-                return BinaryString.fromString(field);
-            case BINARY:
-            case VARBINARY:
-                return BASE64_DECODER.decode(field);
-            default:
-                return useCachedCastExecutor(field, dataType);
-        }
-    }
-
-    private Object useCachedCastExecutor(String field, DataType dataType) {
-        String cacheKey = dataType.toString();
-        @SuppressWarnings("unchecked")
-        CastExecutor<BinaryString, Object> cast =
-                (CastExecutor<BinaryString, Object>)
-                        CAST_EXECUTOR_CACHE.computeIfAbsent(
-                                cacheKey, k -> 
CastExecutors.resolve(DataTypes.STRING(), dataType));
-
-        if (cast != null) {
-            return cast.cast(BinaryString.fromString(field));
-        }
-        return BinaryString.fromString(field);
-    }
 }
diff --git 
a/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvParser.java 
b/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvParser.java
new file mode 100644
index 0000000000..a74908f73c
--- /dev/null
+++ b/paimon-format/src/main/java/org/apache/paimon/format/csv/CsvParser.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.paimon.format.csv;
+
+import org.apache.paimon.casting.CastExecutor;
+import org.apache.paimon.casting.CastExecutors;
+import org.apache.paimon.data.BinaryString;
+import org.apache.paimon.data.GenericRow;
+import org.apache.paimon.format.csv.CsvOptions.Mode;
+import org.apache.paimon.types.DataType;
+import org.apache.paimon.types.DataTypeRoot;
+import org.apache.paimon.types.DataTypes;
+import org.apache.paimon.types.RowType;
+
+import javax.annotation.Nullable;
+
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.paimon.utils.Preconditions.checkArgument;
+import static org.apache.paimon.utils.StringUtils.isNullOrWhitespaceOnly;
+
+/** Csv parser for CSV format. */
+public class CsvParser {
+
+    private static final Base64.Decoder BASE64_DECODER = Base64.getDecoder();
+    private static final Map<String, CastExecutor<?, ?>> CAST_EXECUTOR_CACHE =
+            new ConcurrentHashMap<>();
+
+    private final RowType dataSchemaRowType;
+    private final int[] projectMapping;
+    private final GenericRow emptyRow;
+    private final char separatorChar;
+    private final char quoteChar;
+    private final char escapeChar;
+    private final String nullLiteral;
+    private final Mode mode;
+    private final StringBuilder buffer;
+    private final String[] rowValues;
+
+    public CsvParser(RowType dataSchemaRowType, int[] projectMapping, 
CsvOptions options) {
+        this.dataSchemaRowType = dataSchemaRowType;
+        this.projectMapping = projectMapping;
+        this.emptyRow = new GenericRow(projectMapping.length);
+        this.nullLiteral = options.nullLiteral();
+        this.mode = options.mode();
+        this.buffer = new StringBuilder(1024);
+        int columnCount = Arrays.stream(projectMapping).max().orElse(-1) + 1;
+        this.rowValues = new String[columnCount];
+
+        this.separatorChar = options.fieldDelimiter().charAt(0);
+        this.quoteChar = options.quoteCharacter().charAt(0);
+        this.escapeChar = options.escapeCharacter().charAt(0);
+
+        checkArgument(separatorChar != '\0', "Separator cannot be the null 
character (ASCII 0)");
+        checkArgument(
+                separatorChar != quoteChar, "Separator and quote character 
cannot be the same");
+        checkArgument(
+                separatorChar != escapeChar, "Separator and escape character 
cannot be the same");
+
+        // Quote and escape character can be the same when both are the null 
character (quoting and
+        // escaping are disabled)
+        if (quoteChar != '\0' || escapeChar != '\0') {
+            checkArgument(quoteChar != escapeChar, "Quote and escape character 
cannot be the same");
+        }
+    }
+
+    @Nullable
+    public GenericRow parse(String line) {
+        Arrays.fill(rowValues, null);
+        buffer.setLength(0);
+
+        // empty line results in all null values
+        if (isNullOrWhitespaceOnly(line) || projectMapping.length == 0) {
+            return emptyRow;
+        }
+
+        int columnIndex = 0;
+        boolean inQuotes = false;
+        boolean inField = false;
+
+        int position = 0;
+        while (position < line.length() && columnIndex < rowValues.length) {
+            char c = line.charAt(position);
+            if (c == escapeChar) {
+                // if the next character is special, process it here as to not 
trigger the special
+                // handling
+                if (inQuotes || inField) {
+                    int nextCharacter = peekNextCharacter(line, position);
+                    if (nextCharacter == quoteChar || nextCharacter == 
escapeChar) {
+                        buffer.append(line.charAt(position + 1));
+                        position++;
+                    }
+                }
+            } else if (c == quoteChar) {
+                // a quote character can be escaped with another quote 
character
+                if ((inQuotes || inField) && peekNextCharacter(line, position) 
== quoteChar) {
+                    buffer.append(line.charAt(position + 1));
+                    position++;
+                } else {
+                    // the tricky case of an embedded quote in the middle: 
a,bc"d"ef,g
+                    // Embedded quote is not for first 3 characters of the 
line, and is not allowed
+                    // immediately before a separator
+                    if (position > 2
+                            && line.charAt(position - 1) != separatorChar
+                            && line.length() > (position + 1)
+                            && line.charAt(position + 1) != separatorChar) {
+                        // if field starts begins whitespace, skip the 
whitespace and quote
+                        if (buffer.length() != 0 && isAllWhitespace(buffer)) {
+                            buffer.setLength(0);
+                        } else {
+                            // otherwise write the quote as a literal value
+                            buffer.append(c);
+                        }
+                    }
+                    inQuotes = !inQuotes;
+                }
+                inField = !inField;
+            } else if (c == separatorChar && !inQuotes) {
+                // end of a value
+                rowValues[columnIndex] = buffer.toString();
+                columnIndex++;
+                buffer.setLength(0);
+                inField = false;
+            } else {
+                buffer.append(c);
+                inField = true;
+            }
+            position++;
+        }
+
+        // if last field is an unterminated field, ignore the value
+        if (columnIndex < rowValues.length && !inQuotes) {
+            rowValues[columnIndex] = buffer.toString();
+        }
+        buffer.setLength(0);
+
+        GenericRow row = new GenericRow(projectMapping.length);
+        for (int i = 0; i < projectMapping.length; i++) {
+            int ordinal = projectMapping[i];
+            DataType type = dataSchemaRowType.getTypeAt(ordinal);
+            Object field = null;
+            try {
+                field = parseField(rowValues[ordinal], type);
+            } catch (Exception e) {
+                switch (mode) {
+                    case PERMISSIVE:
+                        break;
+                    case DROPMALFORMED:
+                        return null;
+                    case FAILFAST:
+                        throw e;
+                }
+            }
+            row.setField(i, field);
+        }
+        return row;
+    }
+
+    private static int peekNextCharacter(String line, int position) {
+        return line.length() > position + 1 ? line.charAt(position + 1) : -1;
+    }
+
+    private static boolean isAllWhitespace(CharSequence sequence) {
+        for (int i = 0; i < sequence.length(); i++) {
+            if (!Character.isWhitespace(sequence.charAt(i))) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    private Object parseField(String field, DataType dataType) {
+        if (field == null || field.equals(nullLiteral)) {
+            return null;
+        }
+
+        DataTypeRoot typeRoot = dataType.getTypeRoot();
+        switch (typeRoot) {
+            case TINYINT:
+                return Byte.parseByte(field);
+            case SMALLINT:
+                return Short.parseShort(field);
+            case INTEGER:
+                return Integer.parseInt(field);
+            case BIGINT:
+                return Long.parseLong(field);
+            case FLOAT:
+                return Float.parseFloat(field);
+            case DOUBLE:
+                return Double.parseDouble(field);
+            case BOOLEAN:
+                return Boolean.parseBoolean(field);
+            case CHAR:
+            case VARCHAR:
+                return BinaryString.fromString(field);
+            case BINARY:
+            case VARBINARY:
+                return BASE64_DECODER.decode(field);
+            default:
+                return parseByCastExecutor(field, dataType);
+        }
+    }
+
+    private Object parseByCastExecutor(String field, DataType dataType) {
+        String cacheKey = dataType.toString();
+        @SuppressWarnings("unchecked")
+        CastExecutor<BinaryString, Object> cast =
+                (CastExecutor<BinaryString, Object>)
+                        CAST_EXECUTOR_CACHE.computeIfAbsent(
+                                cacheKey, k -> 
CastExecutors.resolve(DataTypes.STRING(), dataType));
+
+        if (cast != null) {
+            return cast.cast(BinaryString.fromString(field));
+        }
+        return BinaryString.fromString(field);
+    }
+}
diff --git 
a/paimon-format/src/main/java/org/apache/paimon/format/json/JsonFileReader.java 
b/paimon-format/src/main/java/org/apache/paimon/format/json/JsonFileReader.java
index 0a5fe8ba0d..497d5eca43 100644
--- 
a/paimon-format/src/main/java/org/apache/paimon/format/json/JsonFileReader.java
+++ 
b/paimon-format/src/main/java/org/apache/paimon/format/json/JsonFileReader.java
@@ -59,11 +59,6 @@ public class JsonFileReader extends BaseTextFileReader {
         this.options = options;
     }
 
-    @Override
-    protected BaseTextRecordIterator createRecordIterator() {
-        return new JsonRecordIterator();
-    }
-
     @Override
     protected InternalRow parseLine(String line) throws IOException {
         try {
@@ -84,11 +79,6 @@ public class JsonFileReader extends BaseTextFileReader {
         }
     }
 
-    private class JsonRecordIterator extends BaseTextRecordIterator {
-        // Inherits all functionality from BaseTextRecordIterator
-        // No additional JSON-specific iterator logic needed
-    }
-
     private Object convertJsonValue(JsonNode node, DataType dataType, 
JsonOptions options) {
         if (node == null || node.isNull()) {
             return null;
diff --git 
a/paimon-format/src/main/java/org/apache/paimon/format/text/BaseTextFileReader.java
 
b/paimon-format/src/main/java/org/apache/paimon/format/text/BaseTextFileReader.java
index 4205390a64..87504379a1 100644
--- 
a/paimon-format/src/main/java/org/apache/paimon/format/text/BaseTextFileReader.java
+++ 
b/paimon-format/src/main/java/org/apache/paimon/format/text/BaseTextFileReader.java
@@ -36,12 +36,14 @@ import java.nio.charset.StandardCharsets;
 /** Base class for text-based file readers that provides common functionality. 
*/
 public abstract class BaseTextFileReader implements 
FileRecordReader<InternalRow> {
 
-    protected final Path filePath;
+    private final Path filePath;
+    private final InputStream decompressedStream;
+    private final TextRecordIterator reader;
+
     protected final RowType rowType;
-    protected final InputStream decompressedStream;
     protected final BufferedReader bufferedReader;
+
     protected boolean readerClosed = false;
-    protected BaseTextRecordIterator reader;
 
     protected BaseTextFileReader(FileIO fileIO, Path filePath, RowType 
rowType) throws IOException {
         this.filePath = filePath;
@@ -52,19 +54,14 @@ public abstract class BaseTextFileReader implements 
FileRecordReader<InternalRow
         this.bufferedReader =
                 new BufferedReader(
                         new InputStreamReader(this.decompressedStream, 
StandardCharsets.UTF_8));
-        this.reader = createRecordIterator();
+        this.reader = new TextRecordIterator();
     }
 
-    /**
-     * Creates the specific record iterator for this file reader type. 
Subclasses should implement
-     * this method to return their specific iterator.
-     */
-    protected abstract BaseTextRecordIterator createRecordIterator();
-
     /**
      * Parses a single line of text into an InternalRow. Subclasses must 
implement this method to
      * handle their specific format.
      */
+    @Nullable
     protected abstract InternalRow parseLine(String line) throws IOException;
 
     /**
@@ -106,25 +103,30 @@ public abstract class BaseTextFileReader implements 
FileRecordReader<InternalRow
         }
     }
 
-    /** Base record iterator for text-based file readers. */
-    protected abstract class BaseTextRecordIterator implements 
FileRecordIterator<InternalRow> {
+    /** Record iterator for text-based file readers. */
+    private class TextRecordIterator implements 
FileRecordIterator<InternalRow> {
 
         protected long currentPosition = 0;
         protected boolean end = false;
 
         @Override
         public InternalRow next() throws IOException {
-            if (readerClosed) {
-                return null;
+            while (true) {
+                if (readerClosed) {
+                    return null;
+                }
+                String nextLine = bufferedReader.readLine();
+                if (nextLine == null) {
+                    end = true;
+                    return null;
+                }
+
+                currentPosition++;
+                InternalRow row = parseLine(nextLine);
+                if (row != null) {
+                    return row;
+                }
             }
-            String nextLine = bufferedReader.readLine();
-            if (nextLine == null) {
-                end = true;
-                return null;
-            }
-
-            currentPosition++;
-            return parseLine(nextLine);
         }
 
         @Override
diff --git 
a/paimon-format/src/test/java/org/apache/paimon/format/csv/CsvFileFormatTest.java
 
b/paimon-format/src/test/java/org/apache/paimon/format/csv/CsvFileFormatTest.java
index a64f30fcb5..a9a0c845ef 100644
--- 
a/paimon-format/src/test/java/org/apache/paimon/format/csv/CsvFileFormatTest.java
+++ 
b/paimon-format/src/test/java/org/apache/paimon/format/csv/CsvFileFormatTest.java
@@ -39,6 +39,7 @@ import org.apache.paimon.reader.RecordReader;
 import org.apache.paimon.types.DataTypes;
 import org.apache.paimon.types.RowType;
 
+import 
org.apache.paimon.shade.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper;
 import 
org.apache.paimon.shade.jackson2.com.fasterxml.jackson.dataformat.csv.CsvSchema;
 
 import org.junit.jupiter.api.Test;
@@ -494,6 +495,38 @@ public class CsvFileFormatTest extends FormatReadWriteTest 
{
                 .isInstanceOf(IllegalArgumentException.class);
     }
 
+    @Test
+    public void testSpecialCases() throws IOException {
+        RowType rowType =
+                DataTypes.ROW(DataTypes.INT().notNull(), DataTypes.STRING(), 
DataTypes.DOUBLE());
+
+        FileFormat format =
+                new CsvFileFormatFactory().create(new FormatContext(new 
Options(), 1024, 1024));
+        Path testFile = new Path(parent, "test_mode_" + UUID.randomUUID() + 
".csv");
+
+        fileIO.writeFile(
+                testFile,
+                "1,Alice,aaaa,100.23\n"
+                        + "2,\"Bob\",200.75\n"
+                        + "3,\"Json\"v,300.64\n"
+                        + "4,Jack\"o\"n,400.81",
+                false);
+        List<InternalRow> permissiveResult = read(format, rowType, rowType, 
testFile);
+        assertThat(permissiveResult).hasSize(4);
+        assertThat(permissiveResult.get(0).getInt(0)).isEqualTo(1);
+        
assertThat(permissiveResult.get(0).getString(1).toString()).isEqualTo("Alice");
+        assertThat(permissiveResult.get(0).isNullAt(2)).isTrue();
+        assertThat(permissiveResult.get(1).getInt(0)).isEqualTo(2);
+        
assertThat(permissiveResult.get(1).getString(1).toString()).isEqualTo("Bob");
+        assertThat(permissiveResult.get(1).getDouble(2)).isEqualTo(200.75);
+        assertThat(permissiveResult.get(2).getInt(0)).isEqualTo(3);
+        
assertThat(permissiveResult.get(2).getString(1).toString()).isEqualTo("Json\"v");
+        assertThat(permissiveResult.get(2).getDouble(2)).isEqualTo(300.64);
+        assertThat(permissiveResult.get(3).getInt(0)).isEqualTo(4);
+        
assertThat(permissiveResult.get(3).getString(1).toString()).isEqualTo("Jack\"o\"n");
+        assertThat(permissiveResult.get(3).getDouble(2)).isEqualTo(400.81);
+    }
+
     private List<InternalRow> read(
             FileFormat format, RowType fullRowType, RowType readRowType, Path 
testFile)
             throws IOException {
@@ -622,7 +655,7 @@ public class CsvFileFormatTest extends FormatReadWriteTest {
                         .withColumnSeparator(',')
                         .withoutHeader()
                         .withNullValue("null");
-        return CsvFileReader.parseCsvLineToArray(csvLine, schema);
+        return new 
CsvMapper().readerFor(String[].class).with(schema).readValue(csvLine);
     }
 
     /**

Reply via email to