Repository: hive
Updated Branches:
  refs/heads/master 353781ccf -> 6af30bf2b


http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 31921f1..ed1a328 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -87,19 +88,27 @@ public class TestTxnCommands2 {
   protected Driver d;
   protected static enum Table {
     ACIDTBL("acidTbl"),
-    ACIDTBLPART("acidTblPart"),
+    ACIDTBLPART("acidTblPart", "p"),
     NONACIDORCTBL("nonAcidOrcTbl"),
-    NONACIDPART("nonAcidPart"),
-    NONACIDPART2("nonAcidPart2"),
-    ACIDNESTEDPART("acidNestedPart");
+    NONACIDPART("nonAcidPart", "p"),
+    NONACIDPART2("nonAcidPart2", "p2"),
+    ACIDNESTEDPART("acidNestedPart", "p,q");
 
     private final String name;
+    private final String partitionColumns;
     @Override
     public String toString() {
       return name;
     }
+    String getPartitionColumns() {
+      return partitionColumns;
+    }
     Table(String name) {
+      this(name, null);
+    }
+    Table(String name, String partitionColumns) {
       this.name = name;
+      this.partitionColumns = partitionColumns;
     }
   }
 
@@ -353,14 +362,14 @@ public class TestTxnCommands2 {
      */
     String[][] expected = {
       {"{\"transactionid\":0,\"bucketid\":0,\"rowid\":0}\t0\t13",  
"bucket_00000"},
-      {"{\"transactionid\":18,\"bucketid\":0,\"rowid\":0}\t0\t15", 
"bucket_00000"},
-      {"{\"transactionid\":20,\"bucketid\":0,\"rowid\":0}\t0\t17", 
"bucket_00000"},
+      {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\t0\t15", 
"bucket_00000"},
+      {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t17", 
"bucket_00000"},
       {"{\"transactionid\":0,\"bucketid\":0,\"rowid\":1}\t0\t120", 
"bucket_00000"},
       {"{\"transactionid\":0,\"bucketid\":1,\"rowid\":1}\t1\t2",   
"bucket_00001"},
       {"{\"transactionid\":0,\"bucketid\":1,\"rowid\":3}\t1\t4",   
"bucket_00001"},
       {"{\"transactionid\":0,\"bucketid\":1,\"rowid\":2}\t1\t5",   
"bucket_00001"},
       {"{\"transactionid\":0,\"bucketid\":1,\"rowid\":4}\t1\t6",   
"bucket_00001"},
-      {"{\"transactionid\":18,\"bucketid\":1,\"rowid\":0}\t1\t16", 
"bucket_00001"}
+      {"{\"transactionid\":18,\"bucketid\":536936448,\"rowid\":0}\t1\t16", 
"bucket_00001"}
     };
     Assert.assertEquals("Unexpected row count before compaction", 
expected.length, rs.size());
     for(int i = 0; i < expected.length; i++) {
@@ -539,7 +548,7 @@ public class TestTxnCommands2 {
     List<String> rs = runStatementOnDriver("select a,b from " + 
Table.NONACIDORCTBL);
     int [][] resultData = new int[][] {{1, 2}};
     Assert.assertEquals(stringifyValues(resultData), rs);
-    rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+    rs = runStatementOnDriver("select count(*) from " + 
Table.NONACIDORCTBL);//todo: what is the point of this if we just did select *?
     int resultCount = 1;
     Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
 
@@ -555,7 +564,7 @@ public class TestTxnCommands2 {
     rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL);
     resultData = new int[][] {{1, 2}};
     Assert.assertEquals(stringifyValues(resultData), rs);
-    rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+    rs = runStatementOnDriver("select count(*) from " + 
Table.NONACIDORCTBL);//todo: what is the point of this if we just did select *?
     resultCount = 1;
     Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
 
@@ -748,7 +757,7 @@ public class TestTxnCommands2 {
     rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL);
     resultData = new int[][] {{1, 3}, {3, 4}};
     Assert.assertEquals(stringifyValues(resultData), rs);
-    rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+    rs = runStatementOnDriver("select count(*) from " + 
Table.NONACIDORCTBL);//todo: what is the point of this if we just did select *?
     resultCount = 2;
     Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
 
@@ -784,7 +793,7 @@ public class TestTxnCommands2 {
     rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL);
     resultData = new int[][] {{1, 3}, {3, 4}};
     Assert.assertEquals(stringifyValues(resultData), rs);
-    rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+    rs = runStatementOnDriver("select count(*) from " + 
Table.NONACIDORCTBL);//todo: what is the point of this if we just did select *?
     resultCount = 2;
     Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
 
@@ -1500,12 +1509,13 @@ public class TestTxnCommands2 {
     String query = "merge into " + Table.ACIDTBL +
       " t using " + Table.NONACIDPART2 + " s ON t.a = s.a2 " +
       "WHEN MATCHED AND t.b between 1 and 3 THEN UPDATE set b = s.b2 " +
-      "WHEN NOT MATCHED and s.b2 >= 11 THEN INSERT VALUES(s.a2, s.b2)";
+      "WHEN NOT MATCHED and s.b2 >= 8 THEN INSERT VALUES(s.a2, s.b2)";
     runStatementOnDriver(query);
 
     r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
-    int[][] rExpected = {{2,2},{4,3},{5,6},{7,8},{11,11}};
+    int[][] rExpected = {{2,2},{4,3},{5,6},{7,8},{8,8},{11,11}};
     Assert.assertEquals(stringifyValues(rExpected), r);
+    assertUniqueID(Table.ACIDTBL);
   }
 
   /**
@@ -1533,6 +1543,7 @@ public class TestTxnCommands2 {
     r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
     int[][] rExpected = {{2,2},{4,44},{5,5},{7,8},{11,11}};
     Assert.assertEquals(stringifyValues(rExpected), r);
+    assertUniqueID(Table.ACIDTBL);
   }
 
   /**
@@ -1559,27 +1570,34 @@ public class TestTxnCommands2 {
     int[][] rExpected = {{7,8},{11,11}};
     Assert.assertEquals(stringifyValues(rExpected), r);
   }
-  /**
-   * https://hortonworks.jira.com/browse/BUG-66580
-   * @throws Exception
-   */
-  @Ignore
   @Test
   public void testMultiInsert() throws Exception {
-    runStatementOnDriver("create table if not exists  srcpart (a int, b int, c 
int) " +
-      "partitioned by (z int) clustered by (a) into 2 buckets " +
-      "stored as orc tblproperties('transactional'='true')");
     runStatementOnDriver("create temporary table if not exists data1 (x int)");
-//    runStatementOnDriver("create temporary table if not exists data2 (x 
int)");
-
-    runStatementOnDriver("insert into data1 values (1),(2),(3)");
-//    runStatementOnDriver("insert into data2 values (4),(5),(6)");
+    runStatementOnDriver("insert into data1 values (1),(2),(1)");
     d.destroy();
     hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
     d = new Driver(hiveConf);
-    List<String> r = runStatementOnDriver(" from data1 " +
-      "insert into srcpart partition(z) select 0,0,1,x  " +
-      "insert into srcpart partition(z=1) select 0,0,1");
+
+    runStatementOnDriver(" from data1 " +
+      "insert into " + Table.ACIDTBLPART + " partition(p) select 0, 0, 'p' || 
x  "
+      +
+      "insert into " + Table.ACIDTBLPART + " partition(p='p1') select 0, 1");
+    /**
+     * Using {@link BucketCodec.V0} the output
+     * is missing 1 of the (p1,0,1) rows because they have the same ROW__ID 
and only differ by
+     * StatementId so {@link 
org.apache.hadoop.hive.ql.io.orc.OrcRawRecordMerger} skips one.
+     * With split update (and V0), the data is read correctly (insert deltas 
are now the base) but we still
+     * should get duplicate ROW__IDs.
+     */
+    List<String> r = runStatementOnDriver("select p,a,b from " + 
Table.ACIDTBLPART + " order by p, a, b");
+    Assert.assertEquals("[p1\t0\t0, p1\t0\t0, p1\t0\t1, p1\t0\t1, p1\t0\t1, 
p2\t0\t0]", r.toString());
+    assertUniqueID(Table.ACIDTBLPART);
+    /**
+     * this delete + select covers VectorizedOrcAcidRowBatchReader
+     */
+    runStatementOnDriver("delete from " + Table.ACIDTBLPART);
+    r = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " 
order by p, a, b");
+    Assert.assertEquals("[]", r.toString());
   }
   /**
    * Investigating DP and WriteEntity, etc
@@ -1645,6 +1663,8 @@ public class TestTxnCommands2 {
     r1 = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " 
order by p, a, b");
     String result= r1.toString();
     Assert.assertEquals("[new part\t5\t5, new part\t11\t11, p1\t1\t1, 
p1\t2\t15, p1\t3\t3, p2\t4\t44]", result);
+    //note: inserts go into 'new part'... so this won't fail
+    assertUniqueID(Table.ACIDTBLPART);
   }
   /**
    * Using nested partitions and thus DummyPartition
@@ -1667,6 +1687,8 @@ public class TestTxnCommands2 {
       "when not matched then insert values(s.a, s.b, 3,4)");
     r1 = runStatementOnDriver("select p,q,a,b from " + Table.ACIDNESTEDPART + 
" order by p,q, a, b");
     Assert.assertEquals(stringifyValues(new int[][] 
{{1,1,1,1},{1,1,3,3},{1,2,2,15},{1,2,4,44},{3,4,5,5},{3,4,11,11}}), r1);
+    //insert of merge lands in part (3,4) - no updates land there
+    assertUniqueID(Table.ACIDNESTEDPART);
   }
   @Ignore("Covered elsewhere")
   @Test
@@ -1703,6 +1725,41 @@ public class TestTxnCommands2 {
     Assert.assertEquals(stringifyValues(rExpected), r);
   }
 
+  @Test
+  public void testBucketCodec() throws Exception {
+    d.destroy();
+    //insert data in "legacy" format
+    hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 0);
+    d = new Driver(hiveConf);
+
+    int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}};
+    runStatementOnDriver("insert into " + Table.ACIDTBL + " " + 
makeValuesClause(targetVals));
+
+    d.destroy();
+    hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 1);
+    d = new Driver(hiveConf);
+    //do some operations with new format
+    runStatementOnDriver("update " + Table.ACIDTBL + " set b=11 where a in 
(5,7)");
+    runStatementOnDriver("insert into " + Table.ACIDTBL + " values(11,11)");
+    runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 7");
+
+    //make sure we get the right data back before/after compactions
+    List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + 
" order by a,b");
+    int[][] rExpected = {{2,1},{4,3},{5,11},{11,11}};
+    Assert.assertEquals(stringifyValues(rExpected), r);
+
+    runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MINOR'");
+    runWorker(hiveConf);
+
+    r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
+    Assert.assertEquals(stringifyValues(rExpected), r);
+
+    runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MAJOR'");
+    runWorker(hiveConf);
+
+    r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
+    Assert.assertEquals(stringifyValues(rExpected), r);
+  }
   /**
    * takes raw data and turns it into a string as if from Driver.getResults()
    * sorts rows in dictionary order
@@ -1723,7 +1780,7 @@ public class TestTxnCommands2 {
     }
     return rs;
   }
-  private static final class RowComp implements Comparator<int[]> {
+  static class RowComp implements Comparator<int[]> {
     @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
@@ -1736,7 +1793,7 @@ public class TestTxnCommands2 {
       return 0;
     }
   }
-  String makeValuesClause(int[][] rows) {
+  static String makeValuesClause(int[][] rows) {
     assert rows.length > 0;
     StringBuilder sb = new StringBuilder("values");
     for(int[] row : rows) {
@@ -1767,4 +1824,19 @@ public class TestTxnCommands2 {
     d.getResults(rs);
     return rs;
   }
+  final void assertUniqueID(Table table) throws Exception {
+    String partCols = table.getPartitionColumns();
+    //check to make sure there are no duplicate ROW__IDs - HIVE-16832
+    StringBuilder sb = new StringBuilder("select ");
+    if(partCols != null && partCols.length() > 0) {
+      sb.append(partCols).append(",");
+    }
+    sb.append(" ROW__ID, count(*) from ").append(table).append(" group by ");
+    if(partCols != null && partCols.length() > 0) {
+      sb.append(partCols).append(",");
+    }
+    sb.append("ROW__ID having count(*) > 1");
+    List<String> r = runStatementOnDriver(sb.toString());
+    Assert.assertTrue("Duplicate ROW__ID: " + r.toString(),r.size() == 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
index ea5ecbc..520e958 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
@@ -542,45 +542,4 @@ public class TestTxnCommands2WithSplitUpdate extends 
TestTxnCommands2 {
     resultCount = 2;
     Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
   }
-  @Ignore("HIVE-14947")
-  @Test
-  @Override
-  public void testDynamicPartitionsMerge() throws Exception {}
-  @Ignore("HIVE-14947")
-  @Test
-  @Override
-  public void testDynamicPartitionsMerge2() throws Exception {}
-  @Ignore("HIVE-14947")
-  @Test
-  @Override
-  public void testMerge() throws Exception {}
-
-  /**
-   * todo: remove this test once HIVE-14947 is done (parent class has a better 
version)
-   */
-  @Test
-  @Override
-  public void testMerge2() throws Exception {
-    int[][] baseValsOdd = {{5,5},{11,11}};
-    int[][] baseValsEven = {{2,2},{4,44}};
-    runStatementOnDriver("insert into " + Table.NONACIDPART2 + " 
PARTITION(p2='odd') " + makeValuesClause(baseValsOdd));
-    runStatementOnDriver("insert into " + Table.NONACIDPART2 + " 
PARTITION(p2='even') " + makeValuesClause(baseValsEven));
-    int[][] vals = {{2,1},{4,3},{5,6},{7,8}};
-    runStatementOnDriver("insert into " + Table.ACIDTBL + " " + 
makeValuesClause(vals));
-    List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + 
" order by a,b");
-    Assert.assertEquals(stringifyValues(vals), r);
-    String query = "merge into " + Table.ACIDTBL +
-      " using " + Table.NONACIDPART2 + " source ON " + Table.ACIDTBL + ".a = 
source.a2 " +
-      "WHEN MATCHED THEN UPDATE set b = source.b2 ";
-    r = runStatementOnDriver(query);
-
-    r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a,b");
-    int[][] rExpected = {{2,2},{4,44},{5,5},{7,8}};
-    Assert.assertEquals(stringifyValues(rExpected), r);
-
-  }
-  @Ignore("HIVE-14947")
-  @Test
-  @Override
-  public void testMergeWithPredicate() throws Exception {}
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
index c928732..44ff65c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
@@ -114,25 +114,25 @@ public class TestAcidUtils {
     assertEquals(true, opts.isWritingBase());
     assertEquals(567, opts.getMaximumTransactionId());
     assertEquals(0, opts.getMinimumTransactionId());
-    assertEquals(123, opts.getBucket());
+    assertEquals(123, opts.getBucketId());
     opts = AcidUtils.parseBaseOrDeltaBucketFilename(new Path(dir, 
"delta_000005_000006/bucket_00001"),
         conf);
     assertEquals(false, opts.getOldStyle());
     assertEquals(false, opts.isWritingBase());
     assertEquals(6, opts.getMaximumTransactionId());
     assertEquals(5, opts.getMinimumTransactionId());
-    assertEquals(1, opts.getBucket());
+    assertEquals(1, opts.getBucketId());
     opts = AcidUtils.parseBaseOrDeltaBucketFilename(new Path(dir, 
"delete_delta_000005_000006/bucket_00001"),
         conf);
     assertEquals(false, opts.getOldStyle());
     assertEquals(false, opts.isWritingBase());
     assertEquals(6, opts.getMaximumTransactionId());
     assertEquals(5, opts.getMinimumTransactionId());
-    assertEquals(1, opts.getBucket());
+    assertEquals(1, opts.getBucketId());
     opts = AcidUtils.parseBaseOrDeltaBucketFilename(new Path(dir, "000123_0"), 
conf);
     assertEquals(true, opts.getOldStyle());
     assertEquals(true, opts.isWritingBase());
-    assertEquals(123, opts.getBucket());
+    assertEquals(123, opts.getBucketId());
     assertEquals(0, opts.getMinimumTransactionId());
     assertEquals(0, opts.getMaximumTransactionId());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 43ed238..b004cf5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -81,7 +81,6 @@ import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
-import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.SplitStrategy;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
@@ -2445,14 +2444,14 @@ public class TestInputOutputFormat {
     assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000",
         split.getPath().toString());
     assertEquals(0, split.getStart());
-    assertEquals(607, split.getLength());
+    assertEquals(648, split.getLength());
     split = (HiveInputFormat.HiveInputSplit) splits[1];
     assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat",
         split.inputFormatClassName());
     assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001",
         split.getPath().toString());
     assertEquals(0, split.getStart());
-    assertEquals(629, split.getLength());
+    assertEquals(674, split.getLength());
     CombineHiveInputFormat.CombineHiveInputSplit combineSplit =
         (CombineHiveInputFormat.CombineHiveInputSplit) splits[2];
     assertEquals(BUCKETS, combineSplit.getNumPaths());
@@ -3858,7 +3857,7 @@ public class TestInputOutputFormat {
     OrcStruct struct = reader.createValue();
     while (reader.next(id, struct)) {
       assertEquals("id " + record, record, id.getRowId());
-      assertEquals("bucket " + record, 0, id.getBucketId());
+      assertEquals("bucket " + record, 0, id.getBucketProperty());
       assertEquals("trans " + record, 1, id.getTransactionId());
       assertEquals("a " + record,
           42 * record, ((IntWritable) struct.getFieldValue(0)).get());
@@ -3885,7 +3884,7 @@ public class TestInputOutputFormat {
     struct = reader.createValue();
     while (reader.next(id, struct)) {
       assertEquals("id " + record, record, id.getRowId());
-      assertEquals("bucket " + record, 0, id.getBucketId());
+      assertEquals("bucket " + record, 0, id.getBucketProperty());
       assertEquals("trans " + record, 1, id.getTransactionId());
       assertEquals("a " + record,
           42 * record, ((IntWritable) struct.getFieldValue(0)).get());

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index 584bd3b..2406af5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.io.orc;
 
+import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.orc.CompressionKind;
 import org.apache.orc.MemoryManager;
 import org.apache.orc.StripeInformation;
@@ -38,10 +39,8 @@ import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.ql.io.orc.OrcRawRecordMerger.OriginalReaderPair;
 import org.apache.hadoop.hive.ql.io.orc.OrcRawRecordMerger.ReaderKey;
 import org.apache.hadoop.hive.ql.io.orc.OrcRawRecordMerger.ReaderPair;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.io.IntWritable;
@@ -192,14 +191,14 @@ public class TestOrcRawRecordMerger {
     pair.advnaceToMinKey();
     RecordReader recordReader = pair.recordReader;
     assertEquals(10, key.getTransactionId());
-    assertEquals(20, key.getBucketId());
+    assertEquals(20, key.getBucketProperty());
     assertEquals(40, key.getRowId());
     assertEquals(120, key.getCurrentTransactionId());
     assertEquals("third", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(40, key.getTransactionId());
-    assertEquals(50, key.getBucketId());
+    assertEquals(50, key.getBucketProperty());
     assertEquals(60, key.getRowId());
     assertEquals(130, key.getCurrentTransactionId());
     assertEquals("fourth", value(pair.nextRecord));
@@ -219,35 +218,35 @@ public class TestOrcRawRecordMerger {
     pair.advnaceToMinKey();
     RecordReader recordReader = pair.recordReader;
     assertEquals(10, key.getTransactionId());
-    assertEquals(20, key.getBucketId());
+    assertEquals(20, key.getBucketProperty());
     assertEquals(20, key.getRowId());
     assertEquals(100, key.getCurrentTransactionId());
     assertEquals("first", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(10, key.getTransactionId());
-    assertEquals(20, key.getBucketId());
+    assertEquals(20, key.getBucketProperty());
     assertEquals(30, key.getRowId());
     assertEquals(110, key.getCurrentTransactionId());
     assertEquals("second", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(10, key.getTransactionId());
-    assertEquals(20, key.getBucketId());
+    assertEquals(20, key.getBucketProperty());
     assertEquals(40, key.getRowId());
     assertEquals(120, key.getCurrentTransactionId());
     assertEquals("third", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(40, key.getTransactionId());
-    assertEquals(50, key.getBucketId());
+    assertEquals(50, key.getBucketProperty());
     assertEquals(60, key.getRowId());
     assertEquals(130, key.getCurrentTransactionId());
     assertEquals("fourth", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(40, key.getTransactionId());
-    assertEquals(50, key.getBucketId());
+    assertEquals(50, key.getBucketProperty());
     assertEquals(61, key.getRowId());
     assertEquals(140, key.getCurrentTransactionId());
     assertEquals("fifth", value(pair.nextRecord));
@@ -302,14 +301,14 @@ public class TestOrcRawRecordMerger {
     pair.advnaceToMinKey();
     RecordReader recordReader = pair.recordReader;
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(2, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
     assertEquals("third", value(pair.nextRecord));
 
     pair.next(pair.nextRecord);
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(3, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
     assertEquals("fourth", value(pair.nextRecord));
@@ -337,35 +336,35 @@ public class TestOrcRawRecordMerger {
     pair.advnaceToMinKey();
     assertEquals("first", value(pair.nextRecord));
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(0, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
 
     pair.next(pair.nextRecord);
     assertEquals("second", value(pair.nextRecord));
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(1, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
 
     pair.next(pair.nextRecord);
     assertEquals("third", value(pair.nextRecord));
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(2, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
 
     pair.next(pair.nextRecord);
     assertEquals("fourth", value(pair.nextRecord));
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(3, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
 
     pair.next(pair.nextRecord);
     assertEquals("fifth", value(pair.nextRecord));
     assertEquals(0, key.getTransactionId());
-    assertEquals(10, key.getBucketId());
+    assertEquals(10, key.getBucketProperty());
     assertEquals(4, key.getRowId());
     assertEquals(0, key.getCurrentTransactionId());
 
@@ -448,13 +447,13 @@ public class TestOrcRawRecordMerger {
 
     assertEquals(true, merger.next(id, event));
     assertEquals(10, id.getTransactionId());
-    assertEquals(20, id.getBucketId());
+    assertEquals(20, id.getBucketProperty());
     assertEquals(40, id.getRowId());
     assertEquals("third", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(40, id.getTransactionId());
-    assertEquals(50, id.getBucketId());
+    assertEquals(50, id.getBucketProperty());
     assertEquals(60, id.getRowId());
     assertEquals("fourth", getValue(event));
 
@@ -580,6 +579,7 @@ public class TestOrcRawRecordMerger {
     // write the base
     AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
         .inspector(inspector).bucket(BUCKET).finalDestination(root);
+    final int BUCKET_PROPERTY = BucketCodec.V1.encode(options);
     if(!use130Format) {
       options.statementId(-1);
     }
@@ -593,11 +593,11 @@ public class TestOrcRawRecordMerger {
     // write a delta
     ru = of.getRecordUpdater(root, options.writingBase(false)
         
.minimumTransactionId(200).maximumTransactionId(200).recordIdColumn(1));
-    ru.update(200, new MyRow("update 1", 0, 0, BUCKET));
-    ru.update(200, new MyRow("update 2", 2, 0, BUCKET));
-    ru.update(200, new MyRow("update 3", 3, 0, BUCKET));
-    ru.delete(200, new MyRow("", 7, 0, BUCKET));
-    ru.delete(200, new MyRow("", 8, 0, BUCKET));
+    ru.update(200, new MyRow("update 1", 0, 0, BUCKET_PROPERTY));
+    ru.update(200, new MyRow("update 2", 2, 0, BUCKET_PROPERTY));
+    ru.update(200, new MyRow("update 3", 3, 0, BUCKET_PROPERTY));
+    ru.delete(200, new MyRow("", 7, 0, BUCKET_PROPERTY));
+    ru.delete(200, new MyRow("", 8, 0, BUCKET_PROPERTY));
     ru.close(false);
 
     ValidTxnList txnList = new ValidReadTxnList("200:" + Long.MAX_VALUE);
@@ -629,64 +629,64 @@ public class TestOrcRawRecordMerger {
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 0, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
     assertEquals("update 1", getValue(event));
     assertFalse(merger.isDelete(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 1, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 1, 0), id);
     assertEquals("second", getValue(event));
     assertFalse(merger.isDelete(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 2, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
     assertEquals("update 2", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 3, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
     assertEquals("update 3", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 4, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 4, 0), id);
     assertEquals("fifth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 5, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 5, 0), id);
     assertEquals("sixth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 6, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 6, 0), id);
     assertEquals("seventh", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 7, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
     assertNull(OrcRecordUpdater.getRow(event));
     assertTrue(merger.isDelete(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 8, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
     assertNull(OrcRecordUpdater.getRow(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 9, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 9, 0), id);
     assertEquals("tenth", getValue(event));
 
     assertEquals(false, merger.next(id, event));
@@ -700,90 +700,90 @@ public class TestOrcRawRecordMerger {
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 0, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
     assertEquals("update 1", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 0, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 0), id);
     assertEquals("first", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 1, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 1, 0), id);
     assertEquals("second", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 2, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
     assertEquals("update 2", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 2, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 0), id);
     assertEquals("third", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 3, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
     assertEquals("update 3", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 3, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 0), id);
     assertEquals("fourth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 4, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 4, 0), id);
     assertEquals("fifth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 5, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 5, 0), id);
     assertEquals("sixth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 6, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 6, 0), id);
     assertEquals("seventh", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 7, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
     assertNull(OrcRecordUpdater.getRow(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 7, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 0), id);
     assertEquals("eighth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.DELETE_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 8, 200), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
     assertNull(OrcRecordUpdater.getRow(event));
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 8, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 0), id);
     assertEquals("ninth", getValue(event));
 
     assertEquals(true, merger.next(id, event));
     assertEquals(OrcRecordUpdater.INSERT_OPERATION,
         OrcRecordUpdater.getOperation(event));
-    assertEquals(new ReaderKey(0, BUCKET, 9, 0), id);
+    assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 9, 0), id);
     assertEquals("tenth", getValue(event));
 
     assertEquals(false, merger.next(id, event));
@@ -800,7 +800,7 @@ public class TestOrcRawRecordMerger {
       LOG.info("id = " + id + "event = " + event);
       assertEquals(OrcRecordUpdater.INSERT_OPERATION,
           OrcRecordUpdater.getOperation(event));
-      assertEquals(new ReaderKey(0, BUCKET, i, 0), id);
+      assertEquals(new ReaderKey(0, BUCKET_PROPERTY, i, 0), id);
       assertEquals(values[i], getValue(event));
     }
 
@@ -988,6 +988,9 @@ public class TestOrcRawRecordMerger {
         new OrcRecordUpdater.OrcOptions(conf)
         .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
         .bucket(BUCKET).inspector(inspector).filesystem(fs);
+
+    final int BUCKET_PROPERTY = BucketCodec.V1.encode(options);
+
     options.orcOptions(OrcFile.writerOptions(conf)
       .stripeSize(1).blockPadding(false).compress(CompressionKind.NONE)
       .memory(mgr).batchSize(2));
@@ -1008,10 +1011,10 @@ public class TestOrcRawRecordMerger {
         "ignore.7"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
+        ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY));
       }
     }
-    ru.delete(100, new BigRow(9, 0, BUCKET));
+    ru.delete(100, new BigRow(9, 0, BUCKET_PROPERTY));
     ru.close(false);
 
     // write a delta
@@ -1020,10 +1023,10 @@ public class TestOrcRawRecordMerger {
     values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
     for(int i=0; i < values.length; ++i) {
       if (values[i] != null) {
-        ru.update(2, new BigRow(i, i, values[i], i, i, i, 0, BUCKET));
+        ru.update(2, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY));
       }
     }
-    ru.delete(100, new BigRow(8, 0, BUCKET));
+    ru.delete(100, new BigRow(8, 0, BUCKET_PROPERTY));
     ru.close(false);
 
     InputFormat inf = new OrcInputFormat();

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
index 67c473e..be15517 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -143,27 +144,27 @@ public class TestOrcRecordUpdater {
         OrcRecordUpdater.getOperation(row));
     assertEquals(11, OrcRecordUpdater.getCurrentTransaction(row));
     assertEquals(11, OrcRecordUpdater.getOriginalTransaction(row));
-    assertEquals(10, OrcRecordUpdater.getBucket(row));
+    assertEquals(10, getBucketId(row));
     assertEquals(0, OrcRecordUpdater.getRowId(row));
     assertEquals("first",
         OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
     assertEquals(true, rows.hasNext());
     row = (OrcStruct) rows.next(null);
     assertEquals(1, OrcRecordUpdater.getRowId(row));
-    assertEquals(10, OrcRecordUpdater.getBucket(row));
+    assertEquals(10, getBucketId(row));
     assertEquals("second",
         OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
     assertEquals(true, rows.hasNext());
     row = (OrcStruct) rows.next(null);
     assertEquals(2, OrcRecordUpdater.getRowId(row));
-    assertEquals(10, OrcRecordUpdater.getBucket(row));
+    assertEquals(10, getBucketId(row));
     assertEquals("third",
         OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
     assertEquals(true, rows.hasNext());
     row = (OrcStruct) rows.next(null);
     assertEquals(12, OrcRecordUpdater.getCurrentTransaction(row));
     assertEquals(12, OrcRecordUpdater.getOriginalTransaction(row));
-    assertEquals(10, OrcRecordUpdater.getBucket(row));
+    assertEquals(10, getBucketId(row));
     assertEquals(0, OrcRecordUpdater.getRowId(row));
     assertEquals("fourth",
         OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
@@ -184,7 +185,11 @@ public class TestOrcRecordUpdater {
 
     assertEquals(false, fs.exists(sidePath));
   }
-
+  private static int getBucketId(OrcStruct row) {
+    int bucketValue = OrcRecordUpdater.getBucket(row);
+    return
+      BucketCodec.determineVersion(bucketValue).decodeWriterId(bucketValue);
+  }
   @Test
   public void testWriterTblProperties() throws Exception {
     Path root = new Path(workDir, "testWriterTblProperties");

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index 73bc1ab..439ec9b 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
@@ -72,6 +73,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
 
     DummyRow(long val, long rowId, long origTxn, int bucket) {
       field = new LongWritable(val);
+      bucket = BucketCodec.V1.encode(new 
AcidOutputFormat.Options(null).bucket(bucket));
       ROW__ID = new RecordIdentifier(origTxn, bucket, rowId);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/queries/clientpositive/acid_bucket_pruning.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_bucket_pruning.q 
b/ql/src/test/queries/clientpositive/acid_bucket_pruning.q
index 24f8de1..d8d59b2 100644
--- a/ql/src/test/queries/clientpositive/acid_bucket_pruning.q
+++ b/ql/src/test/queries/clientpositive/acid_bucket_pruning.q
@@ -18,4 +18,10 @@ INSERT INTO TABLE acidTblDefault VALUES (1);
 -- Exactly one of the buckets should be selected out of the 16 buckets
 -- by the following selection query.
 EXPLAIN EXTENDED
-SELECT * FROM acidTblDefault WHERE a = 1;
\ No newline at end of file
+SELECT * FROM acidTblDefault WHERE a = 1;
+
+select count(*) from acidTblDefault WHERE a = 1;
+
+set hive.tez.bucket.pruning=false;
+
+select count(*) from acidTblDefault WHERE a = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/results/clientpositive/acid_table_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out 
b/ql/src/test/results/clientpositive/acid_table_stats.q.out
index 195278a..6ab6b43 100644
--- a/ql/src/test/results/clientpositive/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out
@@ -98,7 +98,7 @@ Partition Parameters:
        numFiles                2                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               3852                
+       totalSize               3950                
 #### A masked pattern was here ####
                 
 # Storage Information           
@@ -136,9 +136,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid
-            Statistics: Num rows: 1 Data size: 3852 Basic stats: PARTIAL 
Column stats: NONE
+            Statistics: Num rows: 1 Data size: 3950 Basic stats: PARTIAL 
Column stats: NONE
             Select Operator
-              Statistics: Num rows: 1 Data size: 3852 Basic stats: PARTIAL 
Column stats: NONE
+              Statistics: Num rows: 1 Data size: 3950 Basic stats: PARTIAL 
Column stats: NONE
               Group By Operator
                 aggregations: count()
                 mode: hash
@@ -215,7 +215,7 @@ Partition Parameters:
        numFiles                2                   
        numRows                 1000                
        rawDataSize             208000              
-       totalSize               3852                
+       totalSize               3950                
 #### A masked pattern was here ####
                 
 # Storage Information           
@@ -264,7 +264,7 @@ Partition Parameters:
        numFiles                2                   
        numRows                 1000                
        rawDataSize             208000              
-       totalSize               3852                
+       totalSize               3950                
 #### A masked pattern was here ####
                 
 # Storage Information           
@@ -391,7 +391,7 @@ Partition Parameters:
        numFiles                4                   
        numRows                 1000                
        rawDataSize             208000              
-       totalSize               7718                
+       totalSize               7904                
 #### A masked pattern was here ####
                 
 # Storage Information           
@@ -440,7 +440,7 @@ Partition Parameters:
        numFiles                4                   
        numRows                 2000                
        rawDataSize             416000              
-       totalSize               7718                
+       totalSize               7904                
 #### A masked pattern was here ####
                 
 # Storage Information           

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index c3ad192..fe3b9e5 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -201,7 +201,7 @@ Table Parameters:
        numFiles                2                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               1724                
+       totalSize               1798                
        transactional           true                
 #### A masked pattern was here ####
                 
@@ -244,7 +244,7 @@ Table Parameters:
        numFiles                4                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               2763                
+       totalSize               2909                
        transactional           true                
 #### A masked pattern was here ####
                 

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
 
b/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
index bcf33d4..6df425f 100644
--- 
a/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
+++ 
b/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
@@ -171,7 +171,7 @@ Table Parameters:
        numFiles                1                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               295426              
+       totalSize               295483              
        transactional           true                
 #### A masked pattern was here ####
                 
@@ -199,9 +199,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_ivot
-            Statistics: Num rows: 1 Data size: 295426 Basic stats: COMPLETE 
Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 295483 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 295426 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 295483 Basic stats: COMPLETE 
Column stats: COMPLETE
               Group By Operator
                 aggregations: count()
                 mode: hash
@@ -364,7 +364,7 @@ Table Parameters:
        numFiles                1                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               1512                
+       totalSize               1554                
        transactional           true                
 #### A masked pattern was here ####
                 
@@ -392,9 +392,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_ivot
-            Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE 
Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 1554 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 1554 Basic stats: COMPLETE 
Column stats: COMPLETE
               Group By Operator
                 aggregations: count()
                 mode: hash
@@ -486,7 +486,7 @@ Table Parameters:
        numFiles                2                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               3024                
+       totalSize               3109                
        transactional           true                
 #### A masked pattern was here ####
                 
@@ -514,9 +514,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_ivot
-            Statistics: Num rows: 1 Data size: 3024 Basic stats: COMPLETE 
Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 3109 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 3024 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 3109 Basic stats: COMPLETE 
Column stats: COMPLETE
               Group By Operator
                 aggregations: count()
                 mode: hash
@@ -606,7 +606,7 @@ Table Parameters:
        numFiles                3                   
        numRows                 0                   
        rawDataSize             0                   
-       totalSize               298450              
+       totalSize               298592              
        transactional           true                
 #### A masked pattern was here ####
                 
@@ -634,9 +634,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: acid_ivot
-            Statistics: Num rows: 1 Data size: 298450 Basic stats: COMPLETE 
Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 298592 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 298450 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 298592 Basic stats: COMPLETE 
Column stats: COMPLETE
               Group By Operator
                 aggregations: count()
                 mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out 
b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index 357ae7b..97f8d6b 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -43,22 +43,22 @@ STAGE PLANS:
                   alias: acidtbldefault
                   filterExpr: (a = 1) (type: boolean)
                   buckets included: [1,] of 16
-                  Statistics: Num rows: 7972 Data size: 31888 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 8143 Data size: 32572 Basic stats: 
COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: (a = 1) (type: boolean)
-                    Statistics: Num rows: 3986 Data size: 15944 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 4071 Data size: 16284 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3986 Data size: 15944 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 4071 Data size: 16284 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
                         GlobalTableId: 0
 #### A masked pattern was here ####
                         NumFilesPerFileSink: 1
-                        Statistics: Num rows: 3986 Data size: 15944 Basic 
stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 4071 Data size: 16284 Basic 
stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -100,7 +100,7 @@ STAGE PLANS:
                     serialization.ddl struct acidtbldefault { i32 a}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                    totalSize 31888
+                    totalSize 32572
                     transactional true
                     transactional_properties default
 #### A masked pattern was here ####
@@ -123,7 +123,7 @@ STAGE PLANS:
                       serialization.ddl struct acidtbldefault { i32 a}
                       serialization.format 1
                       serialization.lib 
org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                      totalSize 31888
+                      totalSize 32572
                       transactional true
                       transactional_properties default
 #### A masked pattern was here ####
@@ -139,3 +139,21 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: select count(*) from acidTblDefault WHERE a = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbldefault
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acidTblDefault WHERE a = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbldefault
+#### A masked pattern was here ####
+1
+PREHOOK: query: select count(*) from acidTblDefault WHERE a = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbldefault
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acidTblDefault WHERE a = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbldefault
+#### A masked pattern was here ####
+1

http://git-wip-us.apache.org/repos/asf/hive/blob/6af30bf2/ql/src/test/results/clientpositive/row__id.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/row__id.q.out 
b/ql/src/test/results/clientpositive/row__id.q.out
index 43c9b60..059ace9 100644
--- a/ql/src/test/results/clientpositive/row__id.q.out
+++ b/ql/src/test/results/clientpositive/row__id.q.out
@@ -56,23 +56,23 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hello_acid
-            Statistics: Num rows: 1 Data size: 2936 Basic stats: PARTIAL 
Column stats: NONE
+            Statistics: Num rows: 1 Data size: 3054 Basic stats: PARTIAL 
Column stats: NONE
             Select Operator
               expressions: ROW__ID.transactionid (type: bigint)
               outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 2936 Basic stats: PARTIAL 
Column stats: NONE
+              Statistics: Num rows: 1 Data size: 3054 Basic stats: PARTIAL 
Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: bigint)
                 sort order: +
-                Statistics: Num rows: 1 Data size: 2936 Basic stats: PARTIAL 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3054 Basic stats: PARTIAL 
Column stats: NONE
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: bigint)
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 2936 Basic stats: PARTIAL Column 
stats: NONE
+          Statistics: Num rows: 1 Data size: 3054 Basic stats: PARTIAL Column 
stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 2936 Basic stats: COMPLETE 
Column stats: NONE
+            Statistics: Num rows: 1 Data size: 3054 Basic stats: COMPLETE 
Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -117,17 +117,17 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hello_acid
-            Statistics: Num rows: 1 Data size: 2936 Basic stats: PARTIAL 
Column stats: NONE
+            Statistics: Num rows: 1 Data size: 3054 Basic stats: PARTIAL 
Column stats: NONE
             Filter Operator
               predicate: (ROW__ID.transactionid = 3) (type: boolean)
-              Statistics: Num rows: 1 Data size: 2936 Basic stats: COMPLETE 
Column stats: NONE
+              Statistics: Num rows: 1 Data size: 3054 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: ROW__ID.transactionid (type: bigint)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 2936 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 3054 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 2936 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 3054 Basic stats: 
COMPLETE Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

Reply via email to