Author: szehon
Date: Tue Mar 31 17:28:24 2015
New Revision: 1670402

URL: http://svn.apache.org/r1670402
Log:
HIVE-10053 : Override new init API fom ReadSupport instead of the deprecated 
one (Ferdinand Xu via Szehon)

Modified:
    
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
    
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java

Modified: 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java?rev=1670402&r1=1670401&r2=1670402&view=diff
==============================================================================
--- 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
 (original)
+++ 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
 Tue Mar 31 17:28:24 2015
@@ -32,6 +32,7 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.util.StringUtils;
 
+import parquet.hadoop.api.InitContext;
 import parquet.hadoop.api.ReadSupport;
 import parquet.io.api.RecordMaterializer;
 import parquet.schema.GroupType;
@@ -190,13 +191,13 @@ public class DataWritableReadSupport ext
   /**
    * It creates the readContext for Parquet side with the requested schema 
during the init phase.
    *
-   * @param configuration    needed to get the wanted columns
-   * @param keyValueMetaData // unused
-   * @param fileSchema       parquet file schema
+   * @param context
    * @return the parquet ReadContext
    */
   @Override
-  public ReadContext init(final Configuration configuration, final Map<String, 
String> keyValueMetaData, final MessageType fileSchema) {
+  public parquet.hadoop.api.ReadSupport.ReadContext init(InitContext context) {
+    Configuration configuration = context.getConfiguration();
+    MessageType fileSchema = context.getFileSchema();
     String columnNames = configuration.get(IOConstants.COLUMNS);
     Map<String, String> contextMetadata = new HashMap<String, String>();
     boolean indexAccess = 
configuration.getBoolean(PARQUET_COLUMN_INDEX_ACCESS, false);

Modified: 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java?rev=1670402&r1=1670401&r2=1670402&view=diff
==============================================================================
--- 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
 (original)
+++ 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
 Tue Mar 31 17:28:24 2015
@@ -15,7 +15,12 @@ package org.apache.hadoop.hive.ql.io.par
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,6 +47,7 @@ import parquet.filter2.predicate.FilterP
 import parquet.hadoop.ParquetFileReader;
 import parquet.hadoop.ParquetInputFormat;
 import parquet.hadoop.ParquetInputSplit;
+import parquet.hadoop.api.InitContext;
 import parquet.hadoop.api.ReadSupport.ReadContext;
 import parquet.hadoop.metadata.BlockMetaData;
 import parquet.hadoop.metadata.FileMetaData;
@@ -243,8 +249,8 @@ public class ParquetRecordReaderWrapper
       final List<BlockMetaData> blocks = parquetMetadata.getBlocks();
       final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
 
-      final ReadContext readContext = new DataWritableReadSupport()
-          .init(jobConf, fileMetaData.getKeyValueMetaData(), 
fileMetaData.getSchema());
+      final ReadContext readContext = new DataWritableReadSupport().init(new 
InitContext(jobConf,
+          null, fileMetaData.getSchema()));
       schemaSize = 
MessageTypeParser.parseMessageType(readContext.getReadSupportMetadata()
           
.get(DataWritableReadSupport.HIVE_TABLE_AS_PARQUET_SCHEMA)).getFieldCount();
       final List<BlockMetaData> splitGroup = new ArrayList<BlockMetaData>();


Reply via email to