DRILL-957: Fix schema failures when querying a partitioned Avro table in Hive.

When initializing SerDe in HiveRecordReader, we are not passing the properties
from Table to SerDe. As schema is in table properties, Avro SerDe fails to find
the schema and throws errors. For non-partitioned tables this is ok, as the
MetaStoreUtils.getTableMetadata already adds Table properties, but not for
partitioned Avro tables. Fix is to pass table properties to SerDe if the
table is partitioned.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/aaaca6a7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/aaaca6a7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/aaaca6a7

Branch: refs/heads/master
Commit: aaaca6a7829733a120018e1388c68672d035820b
Parents: 3bcead7
Author: vkorukanti <[email protected]>
Authored: Sat Jun 14 18:32:18 2014 -0700
Committer: Jacques Nadeau <[email protected]>
Committed: Mon Jun 16 07:53:13 2014 -0700

----------------------------------------------------------------------
 .../apache/drill/exec/store/hive/HiveRecordReader.java    | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaaca6a7/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
index ac0f036..c062f8c 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.sql.Timestamp;
 import java.sql.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 
 import org.apache.drill.common.exceptions.DrillRuntimeException;
@@ -119,6 +120,15 @@ public class HiveRecordReader implements RecordReader {
     JobConf job = new JobConf();
     if (partition != null) {
       properties = MetaStoreUtils.getPartitionMetadata(partition, table);
+
+      // SerDe expects properties from Table, but above call doesn't add Table 
properties.
+      // Include Table properties in final list in order to not to break 
SerDes that depend on
+      // Table properties. For example AvroSerDe gets the schema from 
properties (passed as second argument)
+      for (Map.Entry<String, String> entry : table.getParameters().entrySet()) 
{
+        if (entry.getKey() != null && entry.getKey() != null) {
+          properties.put(entry.getKey(), entry.getValue());
+        }
+      }
     } else {
       properties = MetaStoreUtils.getTableMetadata(table);
     }

Reply via email to