[ 
https://issues.apache.org/jira/browse/DRILL-5356?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15947295#comment-15947295
 ] 

ASF GitHub Bot commented on DRILL-5356:
---------------------------------------

Github user ppadma commented on a diff in the pull request:

    https://github.com/apache/drill/pull/789#discussion_r108559596
  
    --- Diff: 
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetSchema.java
 ---
    @@ -0,0 +1,207 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.drill.exec.store.parquet.columnreaders;
    +
    +import java.util.ArrayList;
    +import java.util.Arrays;
    +import java.util.Collection;
    +import java.util.HashMap;
    +import java.util.List;
    +import java.util.Map;
    +
    +import org.apache.drill.common.expression.SchemaPath;
    +import org.apache.drill.common.types.TypeProtos;
    +import org.apache.drill.common.types.Types;
    +import org.apache.drill.common.types.TypeProtos.DataMode;
    +import org.apache.drill.exec.exception.SchemaChangeException;
    +import org.apache.drill.exec.expr.TypeHelper;
    +import org.apache.drill.exec.physical.impl.OutputMutator;
    +import org.apache.drill.exec.record.MaterializedField;
    +import org.apache.drill.exec.server.options.OptionManager;
    +import org.apache.drill.exec.store.parquet.ParquetReaderUtility;
    +import org.apache.drill.exec.vector.NullableIntVector;
    +import org.apache.parquet.column.ColumnDescriptor;
    +import org.apache.parquet.format.SchemaElement;
    +import org.apache.parquet.hadoop.metadata.BlockMetaData;
    +import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
    +import org.apache.parquet.hadoop.metadata.ParquetMetadata;
    +
    +import com.google.common.collect.Lists;
    +
    +/**
    + * Mapping from the schema of the Parquet file to that of the record reader
    + * to the schema that Drill and the Parquet reader uses.
    + */
    +
    +public class ParquetSchema {
    +  private Collection<SchemaPath> selectedCols;
    +  // This is a parallel list to the columns list above, it is used to 
determine the subset of the project
    +  // pushdown columns that do not appear in this file
    +  private boolean[] columnsFound;
    +  private ParquetMetadata footer;
    +  private Map<String, SchemaElement> schemaElements;
    +  private int columnsToScan;
    +  private List<ColumnDescriptor> columns;
    +  private List<ParquetColumnMetadata> columnMd = new ArrayList<>();
    +  private int bitWidthAllFixedFields;
    +  private boolean allFieldsFixedLength = true;
    +  private long groupRecordCount;
    +  private int recordsPerBatch;
    +  private int rowGroupIndex;
    +  private final OptionManager options;
    +
    +  public ParquetSchema(OptionManager options, int rowGroupIndex) {
    +    selectedCols = null;
    +    this.rowGroupIndex = rowGroupIndex;
    +    this.options = options;
    +  }
    +
    +  public ParquetSchema(OptionManager options, Collection<SchemaPath> 
selectedCols) {
    +    this.options = options;
    +    this.selectedCols = selectedCols;
    +    columnsFound = new boolean[selectedCols.size()];
    +  }
    +
    +  public void buildSchema(ParquetMetadata footer, long batchSize) throws 
Exception {
    +    this.footer = footer;
    +    columns = footer.getFileMetaData().getSchema().getColumns();
    +    groupRecordCount = footer.getBlocks().get(rowGroupIndex).getRowCount();
    +    loadParquetSchema();
    +    computeFixedPart();
    +//    rowGroupOffset = 
footer.getBlocks().get(rowGroupIndex).getColumns().get(0).getFirstDataPageOffset();
    +
    +    if (columnsToScan != 0  && allFieldsFixedLength) {
    +      recordsPerBatch = (int) Math.min(Math.min(batchSize / 
bitWidthAllFixedFields,
    +          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), 
ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH);
    +    }
    +    else {
    +      recordsPerBatch = 
ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
    +    }
    +  }
    +
    +  private void loadParquetSchema() {
    +    // TODO - figure out how to deal with this better once we add nested 
reading, note also look where this map is used below
    +    // store a map from column name to converted types if they are non-null
    +    schemaElements = 
ParquetReaderUtility.getColNameToSchemaElementMapping(footer);
    +
    +    // loop to add up the length of the fixed width columns and build the 
schema
    +    for (ColumnDescriptor column : columns) {
    +      ParquetColumnMetadata colMd = new ParquetColumnMetadata(column);
    +      colMd.resolveDrillType(schemaElements, options);
    +      if (! fieldSelected(colMd.field)) {
    +        continue;
    +      }
    +      columnMd.add(colMd);
    --- End diff --
    
    I suggest that we rename columnMd as columnsMetadata and colMd as 
columnMetadata. It is confusing to infer that columnMd is column metadata for 
all columns. 


> Refactor Parquet Record Reader
> ------------------------------
>
>                 Key: DRILL-5356
>                 URL: https://issues.apache.org/jira/browse/DRILL-5356
>             Project: Apache Drill
>          Issue Type: Improvement
>    Affects Versions: 1.10.0, 1.11.0
>            Reporter: Paul Rogers
>            Assignee: Paul Rogers
>            Priority: Minor
>             Fix For: 1.11.0
>
>
> The Parquet record reader class is a key part of Drill that has evolved over 
> time to become somewhat hard to follow.
> A number of us are working on Parquet-related tasks and find we have to spend 
> an uncomfortable amount of time trying to understand the code. In particular, 
> this writer needs to figure out how to convince the reader to provide 
> higher-density record batches.
> Rather than continue to decypher the complex code multiple times, this ticket 
> requests to refactor the code to make it functionally identical, but 
> structurally cleaner. The result will be faster time to value when working 
> with this code.
> This is a lower-priority change and will be coordinated with others working 
> on this code base. This ticket is only for the record reader class itself; it 
> does not include the various readers and writers that Parquet uses since 
> another project is actively modifying those classes.



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to