Github user sv71294 commented on a diff in the pull request: https://github.com/apache/carbondata/pull/2412#discussion_r199066588 --- Diff: integration/presto/src/main/java/org/apache/carbondata/presto/CarbonVectorBatch.java --- @@ -20,50 +20,81 @@ import java.util.HashSet; import java.util.Set; +import org.apache.carbondata.core.cache.dictionary.Dictionary; +import org.apache.carbondata.core.metadata.datatype.DataType; +import org.apache.carbondata.core.metadata.datatype.DataTypes; +import org.apache.carbondata.core.metadata.datatype.DecimalType; import org.apache.carbondata.core.metadata.datatype.StructField; import org.apache.carbondata.core.scan.result.vector.impl.CarbonColumnVectorImpl; +import org.apache.carbondata.presto.readers.BooleanStreamReader; +import org.apache.carbondata.presto.readers.DecimalSliceStreamReader; +import org.apache.carbondata.presto.readers.DoubleStreamReader; +import org.apache.carbondata.presto.readers.IntegerStreamReader; +import org.apache.carbondata.presto.readers.LongStreamReader; +import org.apache.carbondata.presto.readers.ObjectStreamReader; +import org.apache.carbondata.presto.readers.ShortStreamReader; +import org.apache.carbondata.presto.readers.SliceStreamReader; +import org.apache.carbondata.presto.readers.TimestampStreamReader; + +import com.facebook.presto.spi.block.SliceArrayBlock; public class CarbonVectorBatch { - private static final int DEFAULT_BATCH_SIZE = 4 * 1024; + private static final int DEFAULT_BATCH_SIZE = 4 * 1024; - private final StructField[] schema; private final int capacity; - private int numRows; private final CarbonColumnVectorImpl[] columns; - // True if the row is filtered. private final boolean[] filteredRows; - // Column indices that cannot have null values. private final Set<Integer> nullFilteredColumns; - + private int numRows; // Total number of rows that have been filtered. private int numRowsFiltered = 0; - - private CarbonVectorBatch(StructField[] schema, int maxRows) { - this.schema = schema; + private CarbonVectorBatch(StructField[] schema, CarbonDictionaryDecodeReadSupport readSupport, + int maxRows) { this.capacity = maxRows; this.columns = new CarbonColumnVectorImpl[schema.length]; this.nullFilteredColumns = new HashSet<>(); this.filteredRows = new boolean[maxRows]; + Dictionary[] dictionaries = readSupport.getDictionaries(); + DataType[] dataTypes = readSupport.getDataTypes(); for (int i = 0; i < schema.length; ++i) { - StructField field = schema[i]; - columns[i] = new CarbonColumnVectorImpl(maxRows, field.getDataType()); + columns[i] = createDirectStreamReader(maxRows, dataTypes[i], schema[i], dictionaries[i], + readSupport.getSliceArrayBlock(i)); } - } - - public static CarbonVectorBatch allocate(StructField[] schema) { - return new CarbonVectorBatch(schema, DEFAULT_BATCH_SIZE); + public static CarbonVectorBatch allocate(StructField[] schema, --- End diff -- added the dictionary read support for the stream readers
---