HIVE-17488 Move first set of classes to standalone metastore.  This closes 
#244.  (Alan Gates, reviewed by Owen O'Malley)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b0b6db73
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b0b6db73
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b0b6db73

Branch: refs/heads/master
Commit: b0b6db7307767710d58bdf59234959ffc17445b1
Parents: 1c84e0c
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu Sep 14 15:08:53 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Thu Sep 14 15:08:53 2017 -0700

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 .../org/apache/hive/beeline/HiveSchemaTool.java |    5 +-
 .../apache/hive/beeline/TestHiveSchemaTool.java |    9 +-
 .../common/classification/RetrySemantics.java   |   54 -
 .../hive/hcatalog/streaming/TestStreaming.java  |    2 +-
 .../hadoop/hive/metastore/TestFilterHooks.java  |    3 +-
 .../hive/ql/txn/compactor/TestCompactor.java    |   40 +-
 metastore/pom.xml                               |   39 -
 .../apache/hadoop/hive/metastore/Metastore.java | 1331 ------------------
 .../hive/metastore/AggregateStatsCache.java     |  573 --------
 .../hadoop/hive/metastore/DatabaseProduct.java  |   75 -
 .../hive/metastore/DefaultHiveMetaHook.java     |   51 -
 .../DefaultMetaStoreFilterHookImpl.java         |  102 --
 .../hadoop/hive/metastore/FileFormatProxy.java  |   64 -
 .../hive/metastore/HiveMetaException.java       |   42 -
 .../hadoop/hive/metastore/HiveMetaHook.java     |   96 --
 .../hive/metastore/HiveMetaHookLoader.java      |   39 -
 .../hadoop/hive/metastore/HiveMetaStore.java    |    2 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    3 +-
 .../hive/metastore/HiveMetaStoreFsImpl.java     |   52 -
 .../hive/metastore/IExtrapolatePartStatus.java  |   85 --
 .../hadoop/hive/metastore/IHMSHandler.java      |   28 -
 .../hive/metastore/IMetaStoreSchemaInfo.java    |  107 --
 .../metastore/LinearExtrapolatePartStatus.java  |  106 --
 .../hive/metastore/LockComponentBuilder.java    |  121 --
 .../hive/metastore/LockRequestBuilder.java      |  168 ---
 .../hive/metastore/MetaStoreDirectSql.java      |    4 +-
 .../metastore/MetaStoreEndFunctionContext.java  |   59 -
 .../metastore/MetaStoreEndFunctionListener.java |   58 -
 .../hadoop/hive/metastore/MetaStoreFS.java      |   43 -
 .../hive/metastore/MetaStoreFilterHook.java     |  132 --
 .../hadoop/hive/metastore/MetaStoreInit.java    |  112 --
 .../hive/metastore/MetaStoreSchemaInfo.java     |  234 ---
 .../metastore/MetaStoreSchemaInfoFactory.java   |   65 -
 .../hadoop/hive/metastore/MetaStoreThread.java  |   62 -
 .../hive/metastore/PartitionDropOptions.java    |   54 -
 .../metastore/PartitionExpressionProxy.java     |   73 -
 .../hive/metastore/TServerSocketKeepAlive.java  |   47 -
 .../apache/hadoop/hive/metastore/TableType.java |   26 -
 .../hive/metastore/annotation/NoReconnect.java  |   29 -
 .../metastore/hooks/JDOConnectionURLHook.java   |   53 -
 .../spec/CompositePartitionSpecProxy.java       |  228 ---
 .../spec/PartitionListComposingSpecProxy.java   |  171 ---
 .../partition/spec/PartitionSpecProxy.java      |  199 ---
 .../spec/PartitionSpecWithSharedSDProxy.java    |  172 ---
 .../hive/metastore/tools/HiveSchemaHelper.java  |  640 ---------
 .../hadoop/hive/metastore/metastore.proto       |   29 -
 .../hive/metastore/TestAggregateStatsCache.java |  266 ----
 .../hive/metastore/TestLockRequestBuilder.java  |  584 --------
 .../metastore/TestMetaStoreSchemaFactory.java   |   67 -
 .../hive/metastore/TestMetaStoreSchemaInfo.java |   52 -
 .../AuthorizationMetaStoreFilterHook.java       |    4 +-
 .../hive/ql/txn/compactor/CompactorThread.java  |   23 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   18 +-
 .../hive/ql/txn/compactor/CompactorTest.java    |    2 +-
 standalone-metastore/pom.xml                    |   50 +
 .../common/classification/RetrySemantics.java   |   57 +
 .../hive/metastore/AggregateStatsCache.java     |  567 ++++++++
 .../hadoop/hive/metastore/DatabaseProduct.java  |   75 +
 .../hive/metastore/DefaultHiveMetaHook.java     |   51 +
 .../DefaultMetaStoreFilterHookImpl.java         |  102 ++
 .../hadoop/hive/metastore/FileFormatProxy.java  |   64 +
 .../hive/metastore/HiveMetaException.java       |   42 +
 .../hadoop/hive/metastore/HiveMetaHook.java     |   95 ++
 .../hive/metastore/HiveMetaHookLoader.java      |   39 +
 .../hive/metastore/HiveMetaStoreFsImpl.java     |   53 +
 .../hive/metastore/IExtrapolatePartStatus.java  |   85 ++
 .../hadoop/hive/metastore/IHMSHandler.java      |   28 +
 .../hive/metastore/IMetaStoreSchemaInfo.java    |  108 ++
 .../metastore/LinearExtrapolatePartStatus.java  |  106 ++
 .../hive/metastore/LockComponentBuilder.java    |  121 ++
 .../hive/metastore/LockRequestBuilder.java      |  168 +++
 .../metastore/MetaStoreEndFunctionContext.java  |   59 +
 .../metastore/MetaStoreEndFunctionListener.java |   58 +
 .../hadoop/hive/metastore/MetaStoreFS.java      |   43 +
 .../hive/metastore/MetaStoreFilterHook.java     |  132 ++
 .../hadoop/hive/metastore/MetaStoreInit.java    |  109 ++
 .../hive/metastore/MetaStoreSchemaInfo.java     |  234 +++
 .../metastore/MetaStoreSchemaInfoFactory.java   |   64 +
 .../hadoop/hive/metastore/MetaStoreThread.java  |   57 +
 .../hive/metastore/PartitionDropOptions.java    |   54 +
 .../metastore/PartitionExpressionProxy.java     |   73 +
 .../hive/metastore/TServerSocketKeepAlive.java  |   47 +
 .../apache/hadoop/hive/metastore/TableType.java |   26 +
 .../annotation/MetastoreVersionAnnotation.java  |   85 ++
 .../hive/metastore/annotation/NoReconnect.java  |   29 +
 .../metastore/hooks/JDOConnectionURLHook.java   |   53 +
 .../spec/CompositePartitionSpecProxy.java       |  226 +++
 .../spec/PartitionListComposingSpecProxy.java   |  171 +++
 .../partition/spec/PartitionSpecProxy.java      |  199 +++
 .../spec/PartitionSpecWithSharedSDProxy.java    |  172 +++
 .../hive/metastore/tools/HiveSchemaHelper.java  |  641 +++++++++
 .../hadoop/hive/metastore/utils/FileUtils.java  |   66 +
 .../hadoop/hive/metastore/utils/JavaUtils.java  |   37 +
 .../hive/metastore/utils/MetaStoreUtils.java    |   41 +
 .../metastore/utils/MetastoreVersionInfo.java   |  133 ++
 .../hadoop/hive/metastore/metastore.proto       |   29 +
 .../src/main/resources/saveVersion.sh           |   91 ++
 .../hive/metastore/TestAggregateStatsCache.java |  267 ++++
 .../hive/metastore/TestLockRequestBuilder.java  |  584 ++++++++
 .../metastore/TestMetaStoreSchemaFactory.java   |   69 +
 .../hive/metastore/TestMetaStoreSchemaInfo.java |   52 +
 102 files changed, 5778 insertions(+), 6638 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 89169b0..d56f31f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,4 @@ conf/hive-default.xml.template
 itests/hive-blobstore/src/test/resources/blobstore-conf.xml
 .DS_Store
 patchprocess
+standalone-metastore/src/gen/org/apache/hadoop/hive/metastore/annotation/package-info.java

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 4d7373a..84963af 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser;
@@ -1016,8 +1017,8 @@ public class HiveSchemaTool {
     private String[] argsWith(String password) throws IOException {
       return new String[]
         {
-          "-u", url == null ? 
HiveSchemaHelper.getValidConfVar(ConfVars.METASTORECONNECTURLKEY, hiveConf) : 
url,
-          "-d", driver == null ? 
HiveSchemaHelper.getValidConfVar(ConfVars.METASTORE_CONNECTION_DRIVER, 
hiveConf) : driver,
+          "-u", url == null ? 
HiveSchemaHelper.getValidConfVar(MetastoreConf.ConfVars.CONNECTURLKEY, 
hiveConf) : url,
+          "-d", driver == null ? 
HiveSchemaHelper.getValidConfVar(MetastoreConf.ConfVars.CONNECTION_DRIVER, 
hiveConf) : driver,
           "-n", userName,
           "-p", password,
           "-f", sqlScriptFile

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java 
b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
index d9b8f42..03c9f7c 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
@@ -18,6 +18,7 @@
 package org.apache.hive.beeline;
 
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
 import org.junit.After;
 import org.junit.Before;
@@ -55,10 +56,10 @@ public class TestHiveSchemaTool {
   public void setup() throws IOException {
     mockStatic(HiveSchemaHelper.class);
     when(HiveSchemaHelper
-        .getValidConfVar(eq(HiveConf.ConfVars.METASTORECONNECTURLKEY), 
same(hiveConf)))
+        .getValidConfVar(eq(MetastoreConf.ConfVars.CONNECTURLKEY), 
same(hiveConf)))
         .thenReturn("someURL");
     when(HiveSchemaHelper
-        .getValidConfVar(eq(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER), 
same(hiveConf)))
+        .getValidConfVar(eq(MetastoreConf.ConfVars.CONNECTION_DRIVER), 
same(hiveConf)))
         .thenReturn("someDriver");
 
     File file = new File(scriptFile);
@@ -71,9 +72,9 @@ public class TestHiveSchemaTool {
   @After
   public void globalAssert() throws IOException {
     verifyStatic();
-    
HiveSchemaHelper.getValidConfVar(eq(HiveConf.ConfVars.METASTORECONNECTURLKEY), 
same(hiveConf));
+    HiveSchemaHelper.getValidConfVar(eq(MetastoreConf.ConfVars.CONNECTURLKEY), 
same(hiveConf));
     HiveSchemaHelper
-        .getValidConfVar(eq(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER), 
same(hiveConf));
+        .getValidConfVar(eq(MetastoreConf.ConfVars.CONNECTION_DRIVER), 
same(hiveConf));
 
     new File(scriptFile).delete();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/common/src/java/org/apache/hadoop/hive/common/classification/RetrySemantics.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/classification/RetrySemantics.java
 
b/common/src/java/org/apache/hadoop/hive/common/classification/RetrySemantics.java
deleted file mode 100644
index f1c3946..0000000
--- 
a/common/src/java/org/apache/hadoop/hive/common/classification/RetrySemantics.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.common.classification;
-
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * These annotations are meant to indicate how to handle retry logic.
- * Initially meant for Metastore API when made across a network, i.e. 
asynchronously where
- * the response may not reach the caller and thus it cannot know if the 
operation was actually
- * performed on the server.
- */
-@InterfaceStability.Evolving
-@InterfaceAudience.LimitedPrivate("Hive developer")
-public class RetrySemantics {
-  @Retention(RetentionPolicy.RUNTIME)
-  @Target(ElementType.METHOD)
-  public @interface Idempotent {
-    String[] value() default "";
-    int maxRetryCount() default Integer.MAX_VALUE;
-    int delayMs() default 100;
-  }
-  @Retention(RetentionPolicy.RUNTIME)
-  @Target(ElementType.METHOD)
-  public @interface ReadOnly {/*trivially retry-able*/}
-  @Retention(RetentionPolicy.RUNTIME)
-  @Target(ElementType.METHOD)
-  public @interface CannotRetry {}
-  @Retention(RetentionPolicy.RUNTIME)
-  @Target(ElementType.METHOD)
-  public @interface SafeToRetry {
-    /*may not be Idempotent but is safe to retry*/
-    String[] value() default "";
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 49520ef..1e73a4b 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -414,7 +414,7 @@ public class TestStreaming {
     AtomicBoolean stop = new AtomicBoolean(true);
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(hiveConf);
+    t.setConf(hiveConf);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
index fc85d86..69efc98 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.UtilsForTest;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -49,7 +50,7 @@ public class TestFilterHooks {
   public static class DummyMetaStoreFilterHookImpl extends 
DefaultMetaStoreFilterHookImpl {
     public static boolean blockResults = false;
 
-    public DummyMetaStoreFilterHookImpl(HiveConf conf) {
+    public DummyMetaStoreFilterHookImpl(Configuration conf) {
       super(conf);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 44b244b..4bfc5be 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -200,7 +200,7 @@ public class TestCompactor {
     Initiator initiator = new Initiator();
     initiator.setThreadId((int)initiator.getId());
     conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0);
-    initiator.setHiveConf(conf);
+    initiator.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     stop.set(true);
     initiator.init(stop, new AtomicBoolean());
@@ -303,7 +303,7 @@ public class TestCompactor {
     initiator.setThreadId((int)initiator.getId());
     // Set to 1 so insert doesn't set it off but update does
     conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 1);
-    initiator.setHiveConf(conf);
+    initiator.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     stop.set(true);
     initiator.init(stop, new AtomicBoolean());
@@ -455,7 +455,7 @@ public class TestCompactor {
     txnHandler.compact(rqst);
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     AtomicBoolean looped = new AtomicBoolean();
     stop.set(true);
@@ -511,7 +511,7 @@ public class TestCompactor {
     Initiator initiator = new Initiator();
     initiator.setThreadId((int)initiator.getId());
     conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0);
-    initiator.setHiveConf(conf);
+    initiator.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     stop.set(true);
     initiator.init(stop, new AtomicBoolean());
@@ -551,7 +551,7 @@ public class TestCompactor {
     initiator.setThreadId((int)initiator.getId());
     // Set to 1 so insert doesn't set it off but update does
     conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 1);
-    initiator.setHiveConf(conf);
+    initiator.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     stop.set(true);
     initiator.init(stop, new AtomicBoolean());
@@ -593,7 +593,7 @@ public class TestCompactor {
     initiator.setThreadId((int)initiator.getId());
     // Set to 2 so insert and update don't set it off but delete does
     conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2);
-    initiator.setHiveConf(conf);
+    initiator.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean();
     stop.set(true);
     initiator.init(stop, new AtomicBoolean());
@@ -643,7 +643,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MINOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -705,7 +705,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MAJOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -762,7 +762,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MINOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -828,7 +828,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MAJOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -886,7 +886,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MAJOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -939,7 +939,7 @@ public class TestCompactor {
     txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MINOR));
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean(true);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
@@ -1016,7 +1016,7 @@ public class TestCompactor {
     txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MINOR));
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     AtomicBoolean stop = new AtomicBoolean(true);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
@@ -1095,7 +1095,7 @@ public class TestCompactor {
       txnHandler.compact(new CompactionRequest(dbName, tblName, 
CompactionType.MINOR));
       Worker t = new Worker();
       t.setThreadId((int) t.getId());
-      t.setHiveConf(conf);
+      t.setConf(conf);
       AtomicBoolean stop = new AtomicBoolean(true);
       AtomicBoolean looped = new AtomicBoolean();
       t.init(stop, looped);
@@ -1203,7 +1203,7 @@ public class TestCompactor {
     AtomicBoolean stop = new AtomicBoolean(true);
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();
@@ -1213,7 +1213,7 @@ public class TestCompactor {
     stop = new AtomicBoolean(true);
     t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();
@@ -1265,7 +1265,7 @@ public class TestCompactor {
     stop = new AtomicBoolean(true);
     t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(conf);
+    t.setConf(conf);
     looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();
@@ -1416,7 +1416,7 @@ public class TestCompactor {
     AtomicBoolean stop = new AtomicBoolean(true);
     Initiator t = new Initiator();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(hiveConf);
+    t.setConf(hiveConf);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();
@@ -1426,7 +1426,7 @@ public class TestCompactor {
     AtomicBoolean stop = new AtomicBoolean(true);
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(hiveConf);
+    t.setConf(hiveConf);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();
@@ -1436,7 +1436,7 @@ public class TestCompactor {
     AtomicBoolean stop = new AtomicBoolean(true);
     Cleaner t = new Cleaner();
     t.setThreadId((int) t.getId());
-    t.setHiveConf(hiveConf);
+    t.setConf(hiveConf);
     AtomicBoolean looped = new AtomicBoolean();
     t.init(stop, looped);
     t.run();

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 62bddd1..8c35649 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -61,11 +61,6 @@
       <version>${guava.version}</version>
     </dependency>
     <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-      <version>${protobuf.version}</version>
-    </dependency>
-    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
       <version>${hbase.version}</version>
@@ -265,39 +260,6 @@
   </dependencies>
 
   <profiles>
-    <profile>
-      <id>protobuf</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>generate-protobuf-sources</id>
-                <phase>generate-sources</phase>
-                <configuration>
-                  <target>
-                    <property name="protobuf.src.dir"  
location="${basedir}/src/protobuf"/>
-                    <property name="protobuf.build.dir"  
location="${basedir}/src/gen/protobuf/gen-java"/>
-                    <echo>Building HBase Metastore Protobuf</echo>
-                    <mkdir dir="${protobuf.build.dir}"/>
-                    <exec executable="protoc" failonerror="true">
-                      <arg value="--java_out=${protobuf.build.dir}"/>
-                      <arg 
value="-I=${protobuf.src.dir}/org/apache/hadoop/hive/metastore"/>
-                      <arg 
value="${protobuf.src.dir}/org/apache/hadoop/hive/metastore/metastore.proto"/>
-                    </exec>
-                  </target>
-                </configuration>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
   </profiles>
 
   <build>
@@ -343,7 +305,6 @@
             <configuration>
               <sources>
                 <source>src/model</source>
-                <source>src/gen/protobuf/gen-java</source>
               </sources>
             </configuration>
           </execution>

http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
 
b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
deleted file mode 100644
index ca928b9..0000000
--- 
a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java
+++ /dev/null
@@ -1,1331 +0,0 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: metastore.proto
-
-package org.apache.hadoop.hive.metastore;
-
-public final class Metastore {
-  private Metastore() {}
-  public static void registerAllExtensions(
-      com.google.protobuf.ExtensionRegistry registry) {
-  }
-  public interface SplitInfoOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // required int64 offset = 1;
-    /**
-     * <code>required int64 offset = 1;</code>
-     */
-    boolean hasOffset();
-    /**
-     * <code>required int64 offset = 1;</code>
-     */
-    long getOffset();
-
-    // required int64 length = 2;
-    /**
-     * <code>required int64 length = 2;</code>
-     */
-    boolean hasLength();
-    /**
-     * <code>required int64 length = 2;</code>
-     */
-    long getLength();
-
-    // required int32 index = 3;
-    /**
-     * <code>required int32 index = 3;</code>
-     */
-    boolean hasIndex();
-    /**
-     * <code>required int32 index = 3;</code>
-     */
-    int getIndex();
-  }
-  /**
-   * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
-   */
-  public static final class SplitInfo extends
-      com.google.protobuf.GeneratedMessage
-      implements SplitInfoOrBuilder {
-    // Use SplitInfo.newBuilder() to construct.
-    private SplitInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) 
{
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private SplitInfo(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final SplitInfo defaultInstance;
-    public static SplitInfo getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public SplitInfo getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private SplitInfo(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 8: {
-              bitField0_ |= 0x00000001;
-              offset_ = input.readInt64();
-              break;
-            }
-            case 16: {
-              bitField0_ |= 0x00000002;
-              length_ = input.readInt64();
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000004;
-              index_ = input.readInt32();
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hive.metastore.Metastore.SplitInfo.class, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<SplitInfo> PARSER =
-        new com.google.protobuf.AbstractParser<SplitInfo>() {
-      public SplitInfo parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new SplitInfo(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<SplitInfo> getParserForType() {
-      return PARSER;
-    }
-
-    private int bitField0_;
-    // required int64 offset = 1;
-    public static final int OFFSET_FIELD_NUMBER = 1;
-    private long offset_;
-    /**
-     * <code>required int64 offset = 1;</code>
-     */
-    public boolean hasOffset() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>required int64 offset = 1;</code>
-     */
-    public long getOffset() {
-      return offset_;
-    }
-
-    // required int64 length = 2;
-    public static final int LENGTH_FIELD_NUMBER = 2;
-    private long length_;
-    /**
-     * <code>required int64 length = 2;</code>
-     */
-    public boolean hasLength() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>required int64 length = 2;</code>
-     */
-    public long getLength() {
-      return length_;
-    }
-
-    // required int32 index = 3;
-    public static final int INDEX_FIELD_NUMBER = 3;
-    private int index_;
-    /**
-     * <code>required int32 index = 3;</code>
-     */
-    public boolean hasIndex() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>required int32 index = 3;</code>
-     */
-    public int getIndex() {
-      return index_;
-    }
-
-    private void initFields() {
-      offset_ = 0L;
-      length_ = 0L;
-      index_ = 0;
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      if (!hasOffset()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasLength()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasIndex()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeInt64(1, offset_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeInt64(2, length_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeInt32(3, index_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(1, offset_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(2, length_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt32Size(3, index_);
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder 
newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfo}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hive.metastore.Metastore.SplitInfo.class, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder.class);
-      }
-
-      // Construct using 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        offset_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000001);
-        length_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000002);
-        index_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
getDefaultInstanceForType() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo build() {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = 
buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
buildPartial() {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = new 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        result.offset_ = offset_;
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        result.length_ = length_;
-        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.index_ = index_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo) {
-          return 
mergeFrom((org.apache.hadoop.hive.metastore.Metastore.SplitInfo)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder 
mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfo other) {
-        if (other == 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()) 
return this;
-        if (other.hasOffset()) {
-          setOffset(other.getOffset());
-        }
-        if (other.hasLength()) {
-          setLength(other.getLength());
-        }
-        if (other.hasIndex()) {
-          setIndex(other.getIndex());
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (!hasOffset()) {
-          
-          return false;
-        }
-        if (!hasLength()) {
-          
-          return false;
-        }
-        if (!hasIndex()) {
-          
-          return false;
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfo parsedMessage = 
null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = 
(org.apache.hadoop.hive.metastore.Metastore.SplitInfo) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // required int64 offset = 1;
-      private long offset_ ;
-      /**
-       * <code>required int64 offset = 1;</code>
-       */
-      public boolean hasOffset() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>required int64 offset = 1;</code>
-       */
-      public long getOffset() {
-        return offset_;
-      }
-      /**
-       * <code>required int64 offset = 1;</code>
-       */
-      public Builder setOffset(long value) {
-        bitField0_ |= 0x00000001;
-        offset_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required int64 offset = 1;</code>
-       */
-      public Builder clearOffset() {
-        bitField0_ = (bitField0_ & ~0x00000001);
-        offset_ = 0L;
-        onChanged();
-        return this;
-      }
-
-      // required int64 length = 2;
-      private long length_ ;
-      /**
-       * <code>required int64 length = 2;</code>
-       */
-      public boolean hasLength() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>required int64 length = 2;</code>
-       */
-      public long getLength() {
-        return length_;
-      }
-      /**
-       * <code>required int64 length = 2;</code>
-       */
-      public Builder setLength(long value) {
-        bitField0_ |= 0x00000002;
-        length_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required int64 length = 2;</code>
-       */
-      public Builder clearLength() {
-        bitField0_ = (bitField0_ & ~0x00000002);
-        length_ = 0L;
-        onChanged();
-        return this;
-      }
-
-      // required int32 index = 3;
-      private int index_ ;
-      /**
-       * <code>required int32 index = 3;</code>
-       */
-      public boolean hasIndex() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      /**
-       * <code>required int32 index = 3;</code>
-       */
-      public int getIndex() {
-        return index_;
-      }
-      /**
-       * <code>required int32 index = 3;</code>
-       */
-      public Builder setIndex(int value) {
-        bitField0_ |= 0x00000004;
-        index_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required int32 index = 3;</code>
-       */
-      public Builder clearIndex() {
-        bitField0_ = (bitField0_ & ~0x00000004);
-        index_ = 0;
-        onChanged();
-        return this;
-      }
-
-      // 
@@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.SplitInfo)
-    }
-
-    static {
-      defaultInstance = new SplitInfo(true);
-      defaultInstance.initFields();
-    }
-
-    // 
@@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.SplitInfo)
-  }
-
-  public interface SplitInfosOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> 
-        getInfosList();
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int index);
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    int getInfosCount();
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    java.util.List<? extends 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> 
-        getInfosOrBuilderList();
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder 
getInfosOrBuilder(
-        int index);
-  }
-  /**
-   * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
-   */
-  public static final class SplitInfos extends
-      com.google.protobuf.GeneratedMessage
-      implements SplitInfosOrBuilder {
-    // Use SplitInfos.newBuilder() to construct.
-    private SplitInfos(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private SplitInfos(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final SplitInfos defaultInstance;
-    public static SplitInfos getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public SplitInfos getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private SplitInfos(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-                infos_ = new 
java.util.ArrayList<org.apache.hadoop.hive.metastore.Metastore.SplitInfo>();
-                mutable_bitField0_ |= 0x00000001;
-              }
-              
infos_.add(input.readMessage(org.apache.hadoop.hive.metastore.Metastore.SplitInfo.PARSER,
 extensionRegistry));
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-          infos_ = java.util.Collections.unmodifiableList(infos_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hive.metastore.Metastore.SplitInfos.class, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<SplitInfos> PARSER =
-        new com.google.protobuf.AbstractParser<SplitInfos>() {
-      public SplitInfos parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new SplitInfos(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<SplitInfos> getParserForType() {
-      return PARSER;
-    }
-
-    // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
-    public static final int INFOS_FIELD_NUMBER = 1;
-    private 
java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> infos_;
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    public 
java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> 
getInfosList() {
-      return infos_;
-    }
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    public java.util.List<? extends 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> 
-        getInfosOrBuilderList() {
-      return infos_;
-    }
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    public int getInfosCount() {
-      return infos_.size();
-    }
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int 
index) {
-      return infos_.get(index);
-    }
-    /**
-     * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-     */
-    public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder 
getInfosOrBuilder(
-        int index) {
-      return infos_.get(index);
-    }
-
-    private void initFields() {
-      infos_ = java.util.Collections.emptyList();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      for (int i = 0; i < getInfosCount(); i++) {
-        if (!getInfos(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      for (int i = 0; i < infos_.size(); i++) {
-        output.writeMessage(1, infos_.get(i));
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      for (int i = 0; i < infos_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, infos_.get(i));
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder 
newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code org.apache.hadoop.hive.metastore.SplitInfos}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements 
org.apache.hadoop.hive.metastore.Metastore.SplitInfosOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hive.metastore.Metastore.SplitInfos.class, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos.Builder.class);
-      }
-
-      // Construct using 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getInfosFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        if (infosBuilder_ == null) {
-          infos_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-        } else {
-          infosBuilder_.clear();
-        }
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
getDefaultInstanceForType() {
-        return 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos build() {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = 
buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfos 
buildPartial() {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = new 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos(this);
-        int from_bitField0_ = bitField0_;
-        if (infosBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001)) {
-            infos_ = java.util.Collections.unmodifiableList(infos_);
-            bitField0_ = (bitField0_ & ~0x00000001);
-          }
-          result.infos_ = infos_;
-        } else {
-          result.infos_ = infosBuilder_.build();
-        }
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos) {
-          return 
mergeFrom((org.apache.hadoop.hive.metastore.Metastore.SplitInfos)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder 
mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos other) {
-        if (other == 
org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance()) 
return this;
-        if (infosBuilder_ == null) {
-          if (!other.infos_.isEmpty()) {
-            if (infos_.isEmpty()) {
-              infos_ = other.infos_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-            } else {
-              ensureInfosIsMutable();
-              infos_.addAll(other.infos_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.infos_.isEmpty()) {
-            if (infosBuilder_.isEmpty()) {
-              infosBuilder_.dispose();
-              infosBuilder_ = null;
-              infos_ = other.infos_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-              infosBuilder_ = 
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getInfosFieldBuilder() : null;
-            } else {
-              infosBuilder_.addAllMessages(other.infos_);
-            }
-          }
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        for (int i = 0; i < getInfosCount(); i++) {
-          if (!getInfos(i).isInitialized()) {
-            
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hive.metastore.Metastore.SplitInfos parsedMessage = 
null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = 
(org.apache.hadoop.hive.metastore.Metastore.SplitInfos) 
e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;
-      private 
java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> infos_ =
-        java.util.Collections.emptyList();
-      private void ensureInfosIsMutable() {
-        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
-          infos_ = new 
java.util.ArrayList<org.apache.hadoop.hive.metastore.Metastore.SplitInfo>(infos_);
-          bitField0_ |= 0x00000001;
-         }
-      }
-
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hive.metastore.Metastore.SplitInfo, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> infosBuilder_;
-
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public 
java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo> 
getInfosList() {
-        if (infosBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(infos_);
-        } else {
-          return infosBuilder_.getMessageList();
-        }
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public int getInfosCount() {
-        if (infosBuilder_ == null) {
-          return infos_.size();
-        } else {
-          return infosBuilder_.getCount();
-        }
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getInfos(int 
index) {
-        if (infosBuilder_ == null) {
-          return infos_.get(index);
-        } else {
-          return infosBuilder_.getMessage(index);
-        }
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder setInfos(
-          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
value) {
-        if (infosBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureInfosIsMutable();
-          infos_.set(index, value);
-          onChanged();
-        } else {
-          infosBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder setInfos(
-          int index, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
-        if (infosBuilder_ == null) {
-          ensureInfosIsMutable();
-          infos_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          infosBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder 
addInfos(org.apache.hadoop.hive.metastore.Metastore.SplitInfo value) {
-        if (infosBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureInfosIsMutable();
-          infos_.add(value);
-          onChanged();
-        } else {
-          infosBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder addInfos(
-          int index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo 
value) {
-        if (infosBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureInfosIsMutable();
-          infos_.add(index, value);
-          onChanged();
-        } else {
-          infosBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder addInfos(
-          org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder 
builderForValue) {
-        if (infosBuilder_ == null) {
-          ensureInfosIsMutable();
-          infos_.add(builderForValue.build());
-          onChanged();
-        } else {
-          infosBuilder_.addMessage(builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder addInfos(
-          int index, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder builderForValue) {
-        if (infosBuilder_ == null) {
-          ensureInfosIsMutable();
-          infos_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          infosBuilder_.addMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder addAllInfos(
-          java.lang.Iterable<? extends 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo> values) {
-        if (infosBuilder_ == null) {
-          ensureInfosIsMutable();
-          super.addAll(values, infos_);
-          onChanged();
-        } else {
-          infosBuilder_.addAllMessages(values);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder clearInfos() {
-        if (infosBuilder_ == null) {
-          infos_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-          onChanged();
-        } else {
-          infosBuilder_.clear();
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public Builder removeInfos(int index) {
-        if (infosBuilder_ == null) {
-          ensureInfosIsMutable();
-          infos_.remove(index);
-          onChanged();
-        } else {
-          infosBuilder_.remove(index);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder 
getInfosBuilder(
-          int index) {
-        return getInfosFieldBuilder().getBuilder(index);
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder 
getInfosOrBuilder(
-          int index) {
-        if (infosBuilder_ == null) {
-          return infos_.get(index);  } else {
-          return infosBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public java.util.List<? extends 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> 
-           getInfosOrBuilderList() {
-        if (infosBuilder_ != null) {
-          return infosBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(infos_);
-        }
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder 
addInfosBuilder() {
-        return getInfosFieldBuilder().addBuilder(
-            
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder 
addInfosBuilder(
-          int index) {
-        return getInfosFieldBuilder().addBuilder(
-            index, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 
1;</code>
-       */
-      public 
java.util.List<org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder> 
-           getInfosBuilderList() {
-        return getInfosFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hive.metastore.Metastore.SplitInfo, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> 
-          getInfosFieldBuilder() {
-        if (infosBuilder_ == null) {
-          infosBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.hadoop.hive.metastore.Metastore.SplitInfo, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, 
org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder>(
-                  infos_,
-                  ((bitField0_ & 0x00000001) == 0x00000001),
-                  getParentForChildren(),
-                  isClean());
-          infos_ = null;
-        }
-        return infosBuilder_;
-      }
-
-      // 
@@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.SplitInfos)
-    }
-
-    static {
-      defaultInstance = new SplitInfos(true);
-      defaultInstance.initFields();
-    }
-
-    // 
@@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.SplitInfos)
-  }
-
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      
internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      
internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable;
-
-  public static com.google.protobuf.Descriptors.FileDescriptor
-      getDescriptor() {
-    return descriptor;
-  }
-  private static com.google.protobuf.Descriptors.FileDescriptor
-      descriptor;
-  static {
-    java.lang.String[] descriptorData = {
-      "\n\017metastore.proto\022 org.apache.hadoop.hiv" +
-      "e.metastore\":\n\tSplitInfo\022\016\n\006offset\030\001 \002(\003" +
-      "\022\016\n\006length\030\002 \002(\003\022\r\n\005index\030\003 
\002(\005\"H\n\nSplit" +
-      "Infos\022:\n\005infos\030\001 \003(\0132+.org.apache.hadoop" +
-      ".hive.metastore.SplitInfo"
-    };
-    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
-      new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
-        public com.google.protobuf.ExtensionRegistry assignDescriptors(
-            com.google.protobuf.Descriptors.FileDescriptor root) {
-          descriptor = root;
-          
internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor =
-            getDescriptor().getMessageTypes().get(0);
-          
internal_static_org_apache_hadoop_hive_metastore_SplitInfo_fieldAccessorTable = 
new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              
internal_static_org_apache_hadoop_hive_metastore_SplitInfo_descriptor,
-              new java.lang.String[] { "Offset", "Length", "Index", });
-          
internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor =
-            getDescriptor().getMessageTypes().get(1);
-          
internal_static_org_apache_hadoop_hive_metastore_SplitInfos_fieldAccessorTable 
= new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              
internal_static_org_apache_hadoop_hive_metastore_SplitInfos_descriptor,
-              new java.lang.String[] { "Infos", });
-          return null;
-        }
-      };
-    com.google.protobuf.Descriptors.FileDescriptor
-      .internalBuildGeneratedFileFrom(descriptorData,
-        new com.google.protobuf.Descriptors.FileDescriptor[] {
-        }, assigner);
-  }
-
-  // @@protoc_insertion_point(outer_class_scope)
-}

Reply via email to