[ 
https://issues.apache.org/jira/browse/DRILL-7177?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16959176#comment-16959176
 ] 

ASF GitHub Bot commented on DRILL-7177:
---------------------------------------

cgivre commented on pull request #1749: DRILL-7177: Format Plugin for Excel 
Files
URL: https://github.com/apache/drill/pull/1749#discussion_r338761283
 
 

 ##########
 File path: 
contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java
 ##########
 @@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.excel;
+
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.poi.ss.usermodel.Cell;
+import org.apache.poi.ss.usermodel.CellValue;
+import org.apache.poi.ss.usermodel.DateUtil;
+import org.apache.poi.ss.usermodel.FormulaEvaluator;
+import org.apache.poi.ss.usermodel.Row;
+import org.apache.poi.xssf.usermodel.XSSFSheet;
+import org.apache.poi.xssf.usermodel.XSSFWorkbook;
+import 
org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.joda.time.Instant;
+import java.util.Iterator;
+import java.io.IOException;
+import java.util.ArrayList;
+
+public class ExcelBatchReader implements ManagedReader<FileSchemaNegotiator> {
+  private ExcelReaderConfig readerConfig;
+
+  private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(ExcelBatchReader.class);
+
+  private static final String SAFE_WILDCARD = "_$";
+
+  private static final String SAFE_SEPARATOR = "_";
+
+  private static final String PARSER_WILDCARD = ".*";
+
+  private static final String HEADER_NEW_LINE_REPLACEMENT = "__";
+
+  private static final String MISSING_FIELD_NAME_HEADER = "field_";
+
+  private XSSFSheet sheet;
+
+  private XSSFWorkbook workbook;
+
+  private FSDataInputStream fsStream;
+
+  private FormulaEvaluator evaluator;
+
+  private ArrayList<String> excelFieldNames;
+
+  private ArrayList<ScalarWriter> columnWriters;
+
+  private Iterator<Row> rowIterator;
+
+  private RowSetLoader rowWriter;
+
+  private int totalColumnCount;
+
+  private int lineCount;
+
+  private boolean firstLine;
+
+  private FileSplit split;
+
+  private ResultSetLoader loader;
+
+  private int recordCount;
+
+  public static class ExcelReaderConfig {
+    protected final ExcelFormatPlugin plugin;
+
+    protected final int headerRow;
+
+    protected final int lastRow;
+
+    protected final int firstColumn;
+
+    protected final int lastColumn;
+
+    protected final boolean readAllFieldsAsVarChar;
+
+    protected String sheetName;
+
+    public ExcelReaderConfig(ExcelFormatPlugin plugin) {
+      this.plugin = plugin;
+      headerRow = plugin.getConfig().getHeaderRow();
+      lastRow = plugin.getConfig().getLastRow();
+      firstColumn = plugin.getConfig().getFirstColumn();
+      lastColumn = plugin.getConfig().getLastColumn();
+      readAllFieldsAsVarChar = plugin.getConfig().getReadAllFieldsAsVarChar();
+      sheetName = plugin.getConfig().getSheetName();
+    }
+  }
+
+  public ExcelBatchReader(ExcelReaderConfig readerConfig) {
+    this.readerConfig = readerConfig;
+    firstLine = true;
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    verifyConfigOptions();
+    split = negotiator.split();
+    loader = negotiator.build();
+    rowWriter = loader.writer();
+    openFile(negotiator);
+    defineSchema();
+    return true;
+  }
+
+  private void openFile(FileScanFramework.FileSchemaNegotiator negotiator) {
+    try {
+      fsStream = negotiator.fileSystem().open(split.getPath());
+      workbook = new XSSFWorkbook(fsStream.getWrappedStream());
+    } catch (Exception e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Failed to open open input file: %s", 
split.getPath().toString())
+        .message(e.getMessage())
+        .build(logger);
+    }
+
+    // Evaluate formulae
+    evaluator = workbook.getCreationHelper().createFormulaEvaluator();
+
+    workbook.setMissingCellPolicy(Row.MissingCellPolicy.CREATE_NULL_AS_BLANK);
+    sheet = getSheet();
+  }
+
+  /**
+   * This function defines the schema from the header row.
+   * @return TupleMedata of the discovered schema
+   */
+  private TupleMetadata defineSchema() {
+    SchemaBuilder builder = new SchemaBuilder();
+    return getColumnHeaders(builder);
+  }
+
+  protected TupleMetadata getColumnHeaders(SchemaBuilder builder) {
+    //Get the field names
+    int columnCount = 0;
+    if (readerConfig.headerRow >= 0) {
+      columnCount = 
sheet.getRow(readerConfig.headerRow).getPhysicalNumberOfCells();
+    } else {
+      columnCount = sheet.getRow(0).getPhysicalNumberOfCells();
+    }
+    excelFieldNames = new ArrayList<>(columnCount);
+    rowIterator = sheet.iterator();
+
+    //If there are no headers, create columns names of field_n
+    if (readerConfig.headerRow == -1) {
+      String missingFieldName;
+      for (int i = 0; i < columnCount; i++) {
+        missingFieldName = MISSING_FIELD_NAME_HEADER + (i + 1);
+        makeColumn(builder, missingFieldName, TypeProtos.MinorType.VARCHAR);
+        excelFieldNames.add(i, missingFieldName);
+      }
+      columnWriters = new ArrayList<ScalarWriter>(excelFieldNames.size());
+      return builder.buildSchema();
+    } else if (rowIterator.hasNext()) {
+      //Find the header row
+      while (this.lineCount < readerConfig.headerRow) {
+        Row row = rowIterator.next();
+        this.lineCount++;
+      }
+      //Get the header row and column count
+      Row row = rowIterator.next();
+      this.totalColumnCount = row.getLastCellNum();
+
+      //Read the header row
+      Iterator<Cell> cellIterator = row.cellIterator();
+      int colPosition = 0;
+      String tempColumnName = "";
+
+      while (cellIterator.hasNext()) {
+        Cell cell = cellIterator.next();
+
+        CellValue cellValue = evaluator.evaluate(cell);
+        switch (cellValue.getCellTypeEnum()) {
+          case STRING:
+            tempColumnName = cell.getStringCellValue()
+              .replace(PARSER_WILDCARD, SAFE_WILDCARD)
+              .replaceAll("\\.", SAFE_SEPARATOR)
+              .replaceAll("\\n", HEADER_NEW_LINE_REPLACEMENT);
+            makeColumn(builder, tempColumnName, TypeProtos.MinorType.VARCHAR);
+            excelFieldNames.add(colPosition, tempColumnName);
+            break;
+          case NUMERIC:
+            tempColumnName = String.valueOf(cell.getNumericCellValue());
+            makeColumn(builder, tempColumnName, TypeProtos.MinorType.FLOAT8);
+            excelFieldNames.add(colPosition, tempColumnName);
+            break;
+        }
+        colPosition++;
+      }
+    }
+    columnWriters = new ArrayList<ScalarWriter>(excelFieldNames.size());
+    return builder.buildSchema();
+  }
+
+  /**
+   * Helper function to get the selected sheet from the configuration
+   *
+   * @return XSSFSheet The selected sheet
+   */
+  private XSSFSheet getSheet() {
+    int sheetIndex = 0;
+    if (!readerConfig.sheetName.isEmpty()) {
+      sheetIndex = workbook.getSheetIndex(readerConfig.sheetName);
+    }
+
+    //If the sheet name is not valid, throw user exception
+    if (sheetIndex == -1) {
+      throw UserException
+        .validationError()
+        .message("Could not open sheet " + readerConfig.sheetName)
+        .build(logger);
+    } else {
+      return workbook.getSheetAt(sheetIndex);
+    }
+  }
+
+  @Override
+  public boolean next() {
+    recordCount = 0;
+    while (!rowWriter.isFull()) {
+      if (!nextLine(rowWriter)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  public boolean nextLine(RowSetLoader rowWriter) {
+    if (!rowIterator.hasNext()) {
+      return false;
+    } else if (recordCount >= readerConfig.lastRow) {
+      return false;
+    }
+
+    int lastRow = readerConfig.lastRow;
+    while (recordCount < lastRow && rowIterator.hasNext()) {
+
+      lineCount++;
+
+      Row row = rowIterator.next();
+      // If the user specified that there are no headers, get the column count
+      if (readerConfig.headerRow == -1 && recordCount == 0) {
+        this.totalColumnCount = row.getLastCellNum();
+      }
+
+      String fieldName;
+      if (row.getLastCellNum() < totalColumnCount) {
+        throw UserException.dataReadError().message("Wrong number of columns 
in row: %d", row.getLastCellNum()).build(logger);
+      }
+      int colPosition = 0;
+      if (readerConfig.firstColumn != 0) {
+        colPosition = readerConfig.firstColumn - 1;
+      }
+
+      int finalColumn = totalColumnCount;
+      if (readerConfig.lastColumn != 0) {
+        finalColumn = readerConfig.lastColumn - 1;
+      }
+      rowWriter.start();
+      for (int colWriterIndex = 0; colPosition < finalColumn; colPosition++) {
+        Cell cell = row.getCell(colPosition);
+
+        /*if (readerConfig.firstColumn != 0) {
+          colWriterIndex += readerConfig.firstColumn - 1;
+        }*/
+
+        CellValue cellValue = evaluator.evaluate(cell);
+        if (cellValue == null) {
+          String fieldValue = "";
+          if (firstLine) {
+            addColumnToArray(rowWriter, excelFieldNames.get(colPosition), 
MinorType.VARCHAR);
+          }
+          columnWriters.get(colWriterIndex).setString(fieldValue);
+
+        } else {
+          switch (cellValue.getCellTypeEnum()) {
 
 Review comment:
   Hi @paul-rogers 
   I see where you're going with this, however, there are a few issues and I'm 
not sure that it's going to be possible to remove the switch statement.  The 
main issue is that the column names are created by reading the header line.  
This is done here:
   
https://github.com/apache/drill/blob/a23ca66bc5da2e89b3957ad0b3cabb8c4ebdfcc4/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java#L166-L237.
   
   However, the actual data types aren't assigned until the reader actually 
reads the first line of data which occurs here:
   
https://github.com/apache/drill/blob/a23ca66bc5da2e89b3957ad0b3cabb8c4ebdfcc4/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java#L320-#L330
   
   There has to be additional logic in the second part so that the reader can 
add the columns to the schema (see below) and that requires the `switch` 
statement because the ways of accessing the different columns from the Excel 
sheet are different.
   
   
https://github.com/apache/drill/blob/a23ca66bc5da2e89b3957ad0b3cabb8c4ebdfcc4/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java#L332-L356
   
   If the columns were already defined, what you are describing makes sense, 
but I'm not seeing how to do it with getting rid of the `switch` statement.
   
 
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Format Plugin for Excel Files
> -----------------------------
>
>                 Key: DRILL-7177
>                 URL: https://issues.apache.org/jira/browse/DRILL-7177
>             Project: Apache Drill
>          Issue Type: Improvement
>    Affects Versions: 1.17.0
>            Reporter: Charles Givre
>            Assignee: Charles Givre
>            Priority: Major
>              Labels: doc-impacting
>             Fix For: 1.17.0
>
>
> This pull request adds the functionality which enables Drill to query 
> Microsoft Excel files. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to