http://git-wip-us.apache.org/repos/asf/hbase/blob/b4a729ed/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
index 9e2bd9c..d7b5221 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
@@ -6,118 +6,115 @@ package org.apache.hadoop.hbase.shaded.protobuf.generated;
 public final class ZooKeeperProtos {
   private ZooKeeperProtos() {}
   public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistryLite registry) {
+  }
+
+  public static void registerAllExtensions(
       com.google.protobuf.ExtensionRegistry registry) {
+    registerAllExtensions(
+        (com.google.protobuf.ExtensionRegistryLite) registry);
   }
-  public interface MetaRegionServerOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
+  public interface MetaRegionServerOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.MetaRegionServer)
+      com.google.protobuf.MessageOrBuilder {
 
-    // required .hbase.pb.ServerName server = 1;
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     boolean hasServer();
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServer();
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerOrBuilder();
 
-    // optional uint32 rpc_version = 2;
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * The major version of the rpc the server speaks.  This is used so that
      * clients connecting to the cluster can have prior knowledge of what 
version
      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     boolean hasRpcVersion();
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * The major version of the rpc the server speaks.  This is used so that
      * clients connecting to the cluster can have prior knowledge of what 
version
      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     int getRpcVersion();
 
-    // optional .hbase.pb.RegionState.State state = 3;
     /**
-     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-     *
      * <pre>
      * State of the region transition. OPEN means fully operational 
'hbase:meta'
      * </pre>
+     *
+     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
      */
     boolean hasState();
     /**
-     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-     *
      * <pre>
      * State of the region transition. OPEN means fully operational 
'hbase:meta'
      * </pre>
+     *
+     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
      */
     
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 getState();
   }
   /**
-   * Protobuf type {@code hbase.pb.MetaRegionServer}
-   *
    * <pre>
    **
    * Content of the meta-region-server znode.
    * </pre>
+   *
+   * Protobuf type {@code hbase.pb.MetaRegionServer}
    */
-  public static final class MetaRegionServer extends
-      com.google.protobuf.GeneratedMessage
-      implements MetaRegionServerOrBuilder {
+  public  static final class MetaRegionServer extends
+      com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.MetaRegionServer)
+      MetaRegionServerOrBuilder {
     // Use MetaRegionServer.newBuilder() to construct.
-    private MetaRegionServer(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+    private MetaRegionServer(com.google.protobuf.GeneratedMessageV3.Builder<?> 
builder) {
       super(builder);
-      this.unknownFields = builder.getUnknownFields();
     }
-    private MetaRegionServer(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final MetaRegionServer defaultInstance;
-    public static MetaRegionServer getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public MetaRegionServer getDefaultInstanceForType() {
-      return defaultInstance;
+    private MetaRegionServer() {
+      rpcVersion_ = 0;
+      state_ = 0;
     }
 
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
     @java.lang.Override
     public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
+    getUnknownFields() {
       return this.unknownFields;
     }
     private MetaRegionServer(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
+      this();
       int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
@@ -161,7 +158,7 @@ public final class ZooKeeperProtos {
                 unknownFields.mergeVarintField(3, rawValue);
               } else {
                 bitField0_ |= 0x00000004;
-                state_ = value;
+                state_ = rawValue;
               }
               break;
             }
@@ -171,7 +168,7 @@ public final class ZooKeeperProtos {
         throw e.setUnfinishedMessage(this);
       } catch (java.io.IOException e) {
         throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
+            e).setUnfinishedMessage(this);
       } finally {
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
@@ -182,127 +179,106 @@ public final class ZooKeeperProtos {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
     }
 
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
               
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<MetaRegionServer> PARSER =
-        new com.google.protobuf.AbstractParser<MetaRegionServer>() {
-      public MetaRegionServer parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new MetaRegionServer(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<MetaRegionServer> getParserForType() {
-      return PARSER;
-    }
-
     private int bitField0_;
-    // required .hbase.pb.ServerName server = 1;
     public static final int SERVER_FIELD_NUMBER = 1;
     private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
server_;
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     public boolean hasServer() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServer() {
-      return server_;
+      return server_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : server_;
     }
     /**
-     * <code>required .hbase.pb.ServerName server = 1;</code>
-     *
      * <pre>
      * The ServerName hosting the meta region currently, or destination server,
      * if meta region is in transition.
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName server = 1;</code>
      */
     public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerOrBuilder() {
-      return server_;
+      return server_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : server_;
     }
 
-    // optional uint32 rpc_version = 2;
     public static final int RPC_VERSION_FIELD_NUMBER = 2;
     private int rpcVersion_;
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * The major version of the rpc the server speaks.  This is used so that
      * clients connecting to the cluster can have prior knowledge of what 
version
      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     public boolean hasRpcVersion() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * The major version of the rpc the server speaks.  This is used so that
      * clients connecting to the cluster can have prior knowledge of what 
version
      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     public int getRpcVersion() {
       return rpcVersion_;
     }
 
-    // optional .hbase.pb.RegionState.State state = 3;
     public static final int STATE_FIELD_NUMBER = 3;
-    private 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 state_;
+    private int state_;
     /**
-     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-     *
      * <pre>
      * State of the region transition. OPEN means fully operational 
'hbase:meta'
      * </pre>
+     *
+     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
      */
     public boolean hasState() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
     }
     /**
-     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-     *
      * <pre>
      * State of the region transition. OPEN means fully operational 
'hbase:meta'
      * </pre>
+     *
+     * <code>optional .hbase.pb.RegionState.State state = 3;</code>
      */
     public 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 getState() {
-      return state_;
+      
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 result = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(state_);
+      return result == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE
 : result;
     }
 
-    private void initFields() {
-      server_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      rpcVersion_ = 0;
-      state_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
-    }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
 
       if (!hasServer()) {
         memoizedIsInitialized = 0;
@@ -318,28 +294,26 @@ public final class ZooKeeperProtos {
 
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
-      getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, server_);
+        output.writeMessage(1, getServer());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeUInt32(2, rpcVersion_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeEnum(3, state_.getNumber());
+        output.writeEnum(3, state_);
       }
-      getUnknownFields().writeTo(output);
+      unknownFields.writeTo(output);
     }
 
-    private int memoizedSerializedSize = -1;
     public int getSerializedSize() {
-      int size = memoizedSerializedSize;
+      int size = memoizedSize;
       if (size != -1) return size;
 
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, server_);
+          .computeMessageSize(1, getServer());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
@@ -347,21 +321,15 @@ public final class ZooKeeperProtos {
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeEnumSize(3, state_.getNumber());
+          .computeEnumSize(3, state_);
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
       return size;
     }
 
     private static final long serialVersionUID = 0L;
     @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
     public boolean equals(final java.lang.Object obj) {
       if (obj == this) {
        return true;
@@ -384,15 +352,12 @@ public final class ZooKeeperProtos {
       }
       result = result && (hasState() == other.hasState());
       if (hasState()) {
-        result = result &&
-            (getState() == other.getState());
+        result = result && state_ == other.state_;
       }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
+      result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
 
-    private int memoizedHashCode = 0;
     @java.lang.Override
     public int hashCode() {
       if (memoizedHashCode != 0) {
@@ -410,9 +375,9 @@ public final class ZooKeeperProtos {
       }
       if (hasState()) {
         hash = (37 * hash) + STATE_FIELD_NUMBER;
-        hash = (53 * hash) + hashEnum(getState());
+        hash = (53 * hash) + state_;
       }
-      hash = (29 * hash) + getUnknownFields().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
     }
@@ -440,66 +405,78 @@ public final class ZooKeeperProtos {
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
     public static Builder 
newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 prototype) {
-      return newBuilder().mergeFrom(prototype);
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
     }
-    public Builder toBuilder() { return newBuilder(this); }
 
     @java.lang.Override
     protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
       Builder builder = new Builder(parent);
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.MetaRegionServer}
-     *
      * <pre>
      **
      * Content of the meta-region-server znode.
      * </pre>
+     *
+     * Protobuf type {@code hbase.pb.MetaRegionServer}
      */
     public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder
 {
+        com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // 
@@protoc_insertion_point(builder_implements:hbase.pb.MetaRegionServer)
+        
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder
 {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
       }
 
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internalGetFieldAccessorTable() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
@@ -512,38 +489,31 @@ public final class ZooKeeperProtos {
       }
 
       private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+          com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
       private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        if (com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
           getServerFieldBuilder();
         }
       }
-      private static Builder create() {
-        return new Builder();
-      }
-
       public Builder clear() {
         super.clear();
         if (serverBuilder_ == null) {
-          server_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          server_ = null;
         } else {
           serverBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
         rpcVersion_ = 0;
         bitField0_ = (bitField0_ & ~0x00000002);
-        state_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+        state_ = 0;
         bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
@@ -586,6 +556,32 @@ public final class ZooKeeperProtos {
         return result;
       }
 
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
       public Builder mergeFrom(com.google.protobuf.Message other) {
         if (other instanceof 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer)
 {
           return 
mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
@@ -606,17 +602,16 @@ public final class ZooKeeperProtos {
         if (other.hasState()) {
           setState(other.getState());
         }
-        this.mergeUnknownFields(other.getUnknownFields());
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
         return this;
       }
 
       public final boolean isInitialized() {
         if (!hasServer()) {
-          
           return false;
         }
         if (!getServer().isInitialized()) {
-          
           return false;
         }
         return true;
@@ -631,7 +626,7 @@ public final class ZooKeeperProtos {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
           parsedMessage = 
(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer)
 e.getUnfinishedMessage();
-          throw e;
+          throw e.unwrapIOException();
         } finally {
           if (parsedMessage != null) {
             mergeFrom(parsedMessage);
@@ -641,43 +636,42 @@ public final class ZooKeeperProtos {
       }
       private int bitField0_;
 
-      // required .hbase.pb.ServerName server = 1;
-      private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
server_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
+      private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
server_ = null;
+      private com.google.protobuf.SingleFieldBuilderV3<
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
 serverBuilder_;
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public boolean hasServer() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServer() {
         if (serverBuilder_ == null) {
-          return server_;
+          return server_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : server_;
         } else {
           return serverBuilder_.getMessage();
         }
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public Builder 
setServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName
 value) {
         if (serverBuilder_ == null) {
@@ -693,12 +687,12 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public Builder setServer(
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder
 builderForValue) {
@@ -712,16 +706,17 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public Builder 
mergeServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName
 value) {
         if (serverBuilder_ == null) {
           if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              server_ != null &&
               server_ != 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance())
 {
             server_ =
               
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
@@ -736,16 +731,16 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public Builder clearServer() {
         if (serverBuilder_ == null) {
-          server_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          server_ = null;
           onChanged();
         } else {
           serverBuilder_.clear();
@@ -754,12 +749,12 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder
 getServerBuilder() {
         bitField0_ |= 0x00000001;
@@ -767,35 +762,36 @@ public final class ZooKeeperProtos {
         return getServerFieldBuilder().getBuilder();
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerOrBuilder() {
         if (serverBuilder_ != null) {
           return serverBuilder_.getMessageOrBuilder();
         } else {
-          return server_;
+          return server_ == null ?
+              
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : server_;
         }
       }
       /**
-       * <code>required .hbase.pb.ServerName server = 1;</code>
-       *
        * <pre>
        * The ServerName hosting the meta region currently, or destination 
server,
        * if meta region is in transition.
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName server = 1;</code>
        */
-      private com.google.protobuf.SingleFieldBuilder<
+      private com.google.protobuf.SingleFieldBuilderV3<
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
 
           getServerFieldBuilder() {
         if (serverBuilder_ == null) {
-          serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+          serverBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
               
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  server_,
+                  getServer(),
                   getParentForChildren(),
                   isClean());
           server_ = null;
@@ -803,40 +799,39 @@ public final class ZooKeeperProtos {
         return serverBuilder_;
       }
 
-      // optional uint32 rpc_version = 2;
       private int rpcVersion_ ;
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * The major version of the rpc the server speaks.  This is used so that
        * clients connecting to the cluster can have prior knowledge of what 
version
        * to send to a RegionServer.  AsyncHBase will use this to detect 
versions.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public boolean hasRpcVersion() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * The major version of the rpc the server speaks.  This is used so that
        * clients connecting to the cluster can have prior knowledge of what 
version
        * to send to a RegionServer.  AsyncHBase will use this to detect 
versions.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public int getRpcVersion() {
         return rpcVersion_;
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * The major version of the rpc the server speaks.  This is used so that
        * clients connecting to the cluster can have prior knowledge of what 
version
        * to send to a RegionServer.  AsyncHBase will use this to detect 
versions.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public Builder setRpcVersion(int value) {
         bitField0_ |= 0x00000002;
@@ -845,13 +840,13 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * The major version of the rpc the server speaks.  This is used so that
        * clients connecting to the cluster can have prior knowledge of what 
version
        * to send to a RegionServer.  AsyncHBase will use this to detect 
versions.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public Builder clearRpcVersion() {
         bitField0_ = (bitField0_ & ~0x00000002);
@@ -860,117 +855,152 @@ public final class ZooKeeperProtos {
         return this;
       }
 
-      // optional .hbase.pb.RegionState.State state = 3;
-      private 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 state_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+      private int state_ = 0;
       /**
-       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-       *
        * <pre>
        * State of the region transition. OPEN means fully operational 
'hbase:meta'
        * </pre>
+       *
+       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
        */
       public boolean hasState() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
       }
       /**
-       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-       *
        * <pre>
        * State of the region transition. OPEN means fully operational 
'hbase:meta'
        * </pre>
+       *
+       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 getState() {
-        return state_;
+        
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 result = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(state_);
+        return result == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE
 : result;
       }
       /**
-       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-       *
        * <pre>
        * State of the region transition. OPEN means fully operational 
'hbase:meta'
        * </pre>
+       *
+       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
        */
       public Builder 
setState(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State
 value) {
         if (value == null) {
           throw new NullPointerException();
         }
         bitField0_ |= 0x00000004;
-        state_ = value;
+        state_ = value.getNumber();
         onChanged();
         return this;
       }
       /**
-       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
-       *
        * <pre>
        * State of the region transition. OPEN means fully operational 
'hbase:meta'
        * </pre>
+       *
+       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000004);
-        state_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
+        state_ = 0;
         onChanged();
         return this;
       }
+      public final Builder setUnknownFields(
+          final com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
 
       // @@protoc_insertion_point(builder_scope:hbase.pb.MetaRegionServer)
     }
 
+    // @@protoc_insertion_point(class_scope:hbase.pb.MetaRegionServer)
+    private static final 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 DEFAULT_INSTANCE;
     static {
-      defaultInstance = new MetaRegionServer(true);
-      defaultInstance.initFields();
+      DEFAULT_INSTANCE = new 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer();
+    }
+
+    public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final 
com.google.protobuf.Parser<MetaRegionServer>
+        PARSER = new com.google.protobuf.AbstractParser<MetaRegionServer>() {
+      public MetaRegionServer parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+          return new MetaRegionServer(input, extensionRegistry);
+      }
+    };
+
+    public static com.google.protobuf.Parser<MetaRegionServer> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<MetaRegionServer> getParserForType() {
+      return PARSER;
+    }
+
+    public 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer
 getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.MetaRegionServer)
   }
 
-  public interface MasterOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
+  public interface MasterOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.Master)
+      com.google.protobuf.MessageOrBuilder {
 
-    // required .hbase.pb.ServerName master = 1;
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     boolean hasMaster();
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getMaster();
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getMasterOrBuilder();
 
-    // optional uint32 rpc_version = 2;
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * Major RPC version so that clients can know what version the master can 
accept.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     boolean hasRpcVersion();
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * Major RPC version so that clients can know what version the master can 
accept.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     int getRpcVersion();
 
-    // optional uint32 info_port = 3;
     /**
      * <code>optional uint32 info_port = 3;</code>
      */
@@ -981,43 +1011,36 @@ public final class ZooKeeperProtos {
     int getInfoPort();
   }
   /**
-   * Protobuf type {@code hbase.pb.Master}
-   *
    * <pre>
    **
    * Content of the master znode.
    * </pre>
+   *
+   * Protobuf type {@code hbase.pb.Master}
    */
-  public static final class Master extends
-      com.google.protobuf.GeneratedMessage
-      implements MasterOrBuilder {
+  public  static final class Master extends
+      com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.Master)
+      MasterOrBuilder {
     // Use Master.newBuilder() to construct.
-    private Master(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+    private Master(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
       super(builder);
-      this.unknownFields = builder.getUnknownFields();
     }
-    private Master(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final Master defaultInstance;
-    public static Master getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public Master getDefaultInstanceForType() {
-      return defaultInstance;
+    private Master() {
+      rpcVersion_ = 0;
+      infoPort_ = 0;
     }
 
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
     @java.lang.Override
     public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
+    getUnknownFields() {
       return this.unknownFields;
     }
     private Master(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
+      this();
       int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
@@ -1065,7 +1088,7 @@ public final class ZooKeeperProtos {
         throw e.setUnfinishedMessage(this);
       } catch (java.io.IOException e) {
         throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
+            e).setUnfinishedMessage(this);
       } finally {
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
@@ -1076,88 +1099,70 @@ public final class ZooKeeperProtos {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
     }
 
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
               
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master.class, 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<Master> PARSER =
-        new com.google.protobuf.AbstractParser<Master>() {
-      public Master parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new Master(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<Master> getParserForType() {
-      return PARSER;
-    }
-
     private int bitField0_;
-    // required .hbase.pb.ServerName master = 1;
     public static final int MASTER_FIELD_NUMBER = 1;
     private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
master_;
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     public boolean hasMaster() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getMaster() {
-      return master_;
+      return master_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : master_;
     }
     /**
-     * <code>required .hbase.pb.ServerName master = 1;</code>
-     *
      * <pre>
      * The ServerName of the current Master
      * </pre>
+     *
+     * <code>required .hbase.pb.ServerName master = 1;</code>
      */
     public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getMasterOrBuilder() {
-      return master_;
+      return master_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : master_;
     }
 
-    // optional uint32 rpc_version = 2;
     public static final int RPC_VERSION_FIELD_NUMBER = 2;
     private int rpcVersion_;
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * Major RPC version so that clients can know what version the master can 
accept.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     public boolean hasRpcVersion() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional uint32 rpc_version = 2;</code>
-     *
      * <pre>
      * Major RPC version so that clients can know what version the master can 
accept.
      * </pre>
+     *
+     * <code>optional uint32 rpc_version = 2;</code>
      */
     public int getRpcVersion() {
       return rpcVersion_;
     }
 
-    // optional uint32 info_port = 3;
     public static final int INFO_PORT_FIELD_NUMBER = 3;
     private int infoPort_;
     /**
@@ -1173,15 +1178,11 @@ public final class ZooKeeperProtos {
       return infoPort_;
     }
 
-    private void initFields() {
-      master_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      rpcVersion_ = 0;
-      infoPort_ = 0;
-    }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
 
       if (!hasMaster()) {
         memoizedIsInitialized = 0;
@@ -1197,9 +1198,8 @@ public final class ZooKeeperProtos {
 
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
-      getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, master_);
+        output.writeMessage(1, getMaster());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeUInt32(2, rpcVersion_);
@@ -1207,18 +1207,17 @@ public final class ZooKeeperProtos {
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeUInt32(3, infoPort_);
       }
-      getUnknownFields().writeTo(output);
+      unknownFields.writeTo(output);
     }
 
-    private int memoizedSerializedSize = -1;
     public int getSerializedSize() {
-      int size = memoizedSerializedSize;
+      int size = memoizedSize;
       if (size != -1) return size;
 
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, master_);
+          .computeMessageSize(1, getMaster());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
@@ -1228,19 +1227,13 @@ public final class ZooKeeperProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeUInt32Size(3, infoPort_);
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
       return size;
     }
 
     private static final long serialVersionUID = 0L;
     @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
     public boolean equals(final java.lang.Object obj) {
       if (obj == this) {
        return true;
@@ -1266,12 +1259,10 @@ public final class ZooKeeperProtos {
         result = result && (getInfoPort()
             == other.getInfoPort());
       }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
+      result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
 
-    private int memoizedHashCode = 0;
     @java.lang.Override
     public int hashCode() {
       if (memoizedHashCode != 0) {
@@ -1291,7 +1282,7 @@ public final class ZooKeeperProtos {
         hash = (37 * hash) + INFO_PORT_FIELD_NUMBER;
         hash = (53 * hash) + getInfoPort();
       }
-      hash = (29 * hash) + getUnknownFields().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
     }
@@ -1319,66 +1310,78 @@ public final class ZooKeeperProtos {
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
     public static Builder 
newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master
 prototype) {
-      return newBuilder().mergeFrom(prototype);
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
     }
-    public Builder toBuilder() { return newBuilder(this); }
 
     @java.lang.Override
     protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
       Builder builder = new Builder(parent);
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.Master}
-     *
      * <pre>
      **
      * Content of the master znode.
      * </pre>
+     *
+     * Protobuf type {@code hbase.pb.Master}
      */
     public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MasterOrBuilder
 {
+        com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.Master)
+        
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MasterOrBuilder
 {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
       }
 
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internalGetFieldAccessorTable() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
@@ -1391,23 +1394,20 @@ public final class ZooKeeperProtos {
       }
 
       private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+          com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
       private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        if (com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
           getMasterFieldBuilder();
         }
       }
-      private static Builder create() {
-        return new Builder();
-      }
-
       public Builder clear() {
         super.clear();
         if (masterBuilder_ == null) {
-          master_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          master_ = null;
         } else {
           masterBuilder_.clear();
         }
@@ -1419,10 +1419,6 @@ public final class ZooKeeperProtos {
         return this;
       }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
@@ -1465,6 +1461,32 @@ public final class ZooKeeperProtos {
         return result;
       }
 
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
       public Builder mergeFrom(com.google.protobuf.Message other) {
         if (other instanceof 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master) {
           return 
mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master)other);
@@ -1485,17 +1507,16 @@ public final class ZooKeeperProtos {
         if (other.hasInfoPort()) {
           setInfoPort(other.getInfoPort());
         }
-        this.mergeUnknownFields(other.getUnknownFields());
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
         return this;
       }
 
       public final boolean isInitialized() {
         if (!hasMaster()) {
-          
           return false;
         }
         if (!getMaster().isInitialized()) {
-          
           return false;
         }
         return true;
@@ -1510,7 +1531,7 @@ public final class ZooKeeperProtos {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
           parsedMessage = 
(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master) 
e.getUnfinishedMessage();
-          throw e;
+          throw e.unwrapIOException();
         } finally {
           if (parsedMessage != null) {
             mergeFrom(parsedMessage);
@@ -1520,40 +1541,39 @@ public final class ZooKeeperProtos {
       }
       private int bitField0_;
 
-      // required .hbase.pb.ServerName master = 1;
-      private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
master_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
+      private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
master_ = null;
+      private com.google.protobuf.SingleFieldBuilderV3<
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
 masterBuilder_;
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public boolean hasMaster() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getMaster() {
         if (masterBuilder_ == null) {
-          return master_;
+          return master_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : master_;
         } else {
           return masterBuilder_.getMessage();
         }
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public Builder 
setMaster(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName
 value) {
         if (masterBuilder_ == null) {
@@ -1569,11 +1589,11 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public Builder setMaster(
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder
 builderForValue) {
@@ -1587,15 +1607,16 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public Builder 
mergeMaster(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName
 value) {
         if (masterBuilder_ == null) {
           if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              master_ != null &&
               master_ != 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance())
 {
             master_ =
               
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial();
@@ -1610,15 +1631,15 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public Builder clearMaster() {
         if (masterBuilder_ == null) {
-          master_ = 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          master_ = null;
           onChanged();
         } else {
           masterBuilder_.clear();
@@ -1627,11 +1648,11 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder
 getMasterBuilder() {
         bitField0_ |= 0x00000001;
@@ -1639,33 +1660,34 @@ public final class ZooKeeperProtos {
         return getMasterFieldBuilder().getBuilder();
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
       public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getMasterOrBuilder() {
         if (masterBuilder_ != null) {
           return masterBuilder_.getMessageOrBuilder();
         } else {
-          return master_;
+          return master_ == null ?
+              
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()
 : master_;
         }
       }
       /**
-       * <code>required .hbase.pb.ServerName master = 1;</code>
-       *
        * <pre>
        * The ServerName of the current Master
        * </pre>
+       *
+       * <code>required .hbase.pb.ServerName master = 1;</code>
        */
-      private com.google.protobuf.SingleFieldBuilder<
+      private com.google.protobuf.SingleFieldBuilderV3<
           
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
 
           getMasterFieldBuilder() {
         if (masterBuilder_ == null) {
-          masterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+          masterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
               
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  master_,
+                  getMaster(),
                   getParentForChildren(),
                   isClean());
           master_ = null;
@@ -1673,34 +1695,33 @@ public final class ZooKeeperProtos {
         return masterBuilder_;
       }
 
-      // optional uint32 rpc_version = 2;
       private int rpcVersion_ ;
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * Major RPC version so that clients can know what version the master 
can accept.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public boolean hasRpcVersion() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * Major RPC version so that clients can know what version the master 
can accept.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public int getRpcVersion() {
         return rpcVersion_;
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * Major RPC version so that clients can know what version the master 
can accept.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public Builder setRpcVersion(int value) {
         bitField0_ |= 0x00000002;
@@ -1709,11 +1730,11 @@ public final class ZooKeeperProtos {
         return this;
       }
       /**
-       * <code>optional uint32 rpc_version = 2;</code>
-       *
        * <pre>
        * Major RPC version so that clients can know what version the master 
can accept.
        * </pre>
+       *
+       * <code>optional uint32 rpc_version = 2;</code>
        */
       public Builder clearRpcVersion() {
         bitField0_ = (bitField0_ & ~0x00000002);
@@ -1722,7 +1743,6 @@ public final class ZooKeeperProtos {
         return this;
       }
 
-      // optional uint32 info_port = 3;
       private int infoPort_ ;
       /**
        * <code>optional uint32 info_port = 3;</code>
@@ -1754,89 +1774,118 @@ public final class ZooKeeperProtos {
         onChanged();
         return this;
       }
+      public final Builder setUnknownFields(
+          final com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
 
       // @@protoc_insertion_point(builder_scope:hbase.pb.Master)
     }
 
+    // @@protoc_insertion_point(class_scope:hbase.pb.Master)
+    private static final 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
DEFAULT_INSTANCE;
     static {
-      defaultInstance = new Master(true);
-      defaultInstance.initFields();
+      DEFAULT_INSTANCE = new 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master();
+    }
+
+    public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final 
com.google.protobuf.Parser<Master>
+        PARSER = new com.google.protobuf.AbstractParser<Master>() {
+      public Master parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+          return new Master(input, extensionRegistry);
+      }
+    };
+
+    public static com.google.protobuf.Parser<Master> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<Master> getParserForType() {
+      return PARSER;
+    }
+
+    public 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.Master 
getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.Master)
   }
 
-  public interface ClusterUpOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
+  public interface ClusterUpOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ClusterUp)
+      com.google.protobuf.MessageOrBuilder {
 
-    // required string start_date = 1;
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     boolean hasStartDate();
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     java.lang.String getStartDate();
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     com.google.protobuf.ByteString
         getStartDateBytes();
   }
   /**
-   * Protobuf type {@code hbase.pb.ClusterUp}
-   *
    * <pre>
    **
    * Content of the '/hbase/running', cluster state, znode.
    * </pre>
+   *
+   * Protobuf type {@code hbase.pb.ClusterUp}
    */
-  public static final class ClusterUp extends
-      com.google.protobuf.GeneratedMessage
-      implements ClusterUpOrBuilder {
+  public  static final class ClusterUp extends
+      com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.ClusterUp)
+      ClusterUpOrBuilder {
     // Use ClusterUp.newBuilder() to construct.
-    private ClusterUp(com.google.protobuf.GeneratedMessage.Builder<?> builder) 
{
+    private ClusterUp(com.google.protobuf.GeneratedMessageV3.Builder<?> 
builder) {
       super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ClusterUp(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ClusterUp defaultInstance;
-    public static ClusterUp getDefaultInstance() {
-      return defaultInstance;
     }
-
-    public ClusterUp getDefaultInstanceForType() {
-      return defaultInstance;
+    private ClusterUp() {
+      startDate_ = "";
     }
 
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
     @java.lang.Override
     public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
+    getUnknownFields() {
       return this.unknownFields;
     }
     private ClusterUp(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
+      this();
       int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
@@ -1856,8 +1905,9 @@ public final class ZooKeeperProtos {
               break;
             }
             case 10: {
+              com.google.protobuf.ByteString bs = input.readBytes();
               bitField0_ |= 0x00000001;
-              startDate_ = input.readBytes();
+              startDate_ = bs;
               break;
             }
           }
@@ -1866,7 +1916,7 @@ public final class ZooKeeperProtos {
         throw e.setUnfinishedMessage(this);
       } catch (java.io.IOException e) {
         throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
+            e).setUnfinishedMessage(this);
       } finally {
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
@@ -1877,50 +1927,34 @@ public final class ZooKeeperProtos {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
     }
 
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+    protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
       return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
               
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<ClusterUp> PARSER =
-        new com.google.protobuf.AbstractParser<ClusterUp>() {
-      public ClusterUp parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ClusterUp(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ClusterUp> getParserForType() {
-      return PARSER;
-    }
-
     private int bitField0_;
-    // required string start_date = 1;
     public static final int START_DATE_FIELD_NUMBER = 1;
-    private java.lang.Object startDate_;
+    private volatile java.lang.Object startDate_;
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     public boolean hasStartDate() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     public java.lang.String getStartDate() {
       java.lang.Object ref = startDate_;
@@ -1937,12 +1971,12 @@ public final class ZooKeeperProtos {
       }
     }
     /**
-     * <code>required string start_date = 1;</code>
-     *
      * <pre>
      * If this znode is present, cluster is up.  Currently
      * the data is cluster start_date.
      * </pre>
+     *
+     * <code>required string start_date = 1;</code>
      */
     public com.google.protobuf.ByteString
         getStartDateBytes() {
@@ -1958,13 +1992,11 @@ public final class ZooKeeperProtos {
       }
     }
 
-    private void initFields() {
-      startDate_ = "";
-    }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
 
       if (!hasStartDate()) {
         memoizedIsInitialized = 0;
@@ -1976,36 +2008,27 @@ public final class ZooKeeperProtos {
 
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
-      getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBytes(1, getStartDateBytes());
+        com.google.protobuf.GeneratedMessageV3.writeString(output, 1, 
startDate_);
       }
-      getUnknownFields().writeTo(output);
+      unknownFields.writeTo(output);
     }
 
-    private int memoizedSerializedSize = -1;
     public int getSerializedSize() {
-      int size = memoizedSerializedSize;
+      int size = memoizedSize;
       if (size != -1) return size;
 
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(1, getStartDateBytes());
+        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, 
startDate_);
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
       return size;
     }
 
     private static final long serialVersionUID = 0L;
     @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
     public boolean equals(final java.lang.Object obj) {
       if (obj == this) {
        return true;
@@ -2021,12 +2044,10 @@ public final class ZooKeeperProtos {
         result = result && getStartDate()
             .equals(other.getStartDate());
       }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
+      result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
 
-    private int memoizedHashCode = 0;
     @java.lang.Override
     public int hashCode() {
       if (memoizedHashCode != 0) {
@@ -2038,7 +2059,7 @@ public final class ZooKeeperProtos {
         hash = (37 * hash) + START_DATE_FIELD_NUMBER;
         hash = (53 * hash) + getStartDate().hashCode();
       }
-      hash = (29 * hash) + getUnknownFields().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
     }
@@ -2066,66 +2087,78 @@ public final class ZooKeeperProtos {
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
-      return PARSER.parseFrom(input);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
     }
     public static 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp 
parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+      return com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
     public static Builder 
newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp
 prototype) {
-      return newBuilder().mergeFrom(prototype);
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
     }
-    public Builder toBuilder() { return newBuilder(this); }
 
     @java.lang.Override
     protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
       Builder builder = new Builder(parent);
       return builder;
     }
     /**
-     * Protobuf type {@code hbase.pb.ClusterUp}
-     *
      * <pre>
      **
      * Content of the '/hbase/running', cluster state, znode.
      * </pre>
+     *
+     * Protobuf type {@code hbase.pb.ClusterUp}
      */
     public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder
 {
+        com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.ClusterUp)
+        
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder
 {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
       }
 
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internalGetFieldAccessorTable() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
@@ -2138,18 +2171,15 @@ public final class ZooKeeperProtos {
       }
 
       private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+          com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
         super(parent);
         maybeForceBuilderInitialization();
       }
       private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        if (com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
         }
       }
-      private static Builder create() {
-        return new Builder();
-      }
-
       public Builder clear() {
         super.clear();
         startDate_ = "";
@@ -2157,10 +2187,6 @@ public final class ZooKeeperProtos {
         return this;
       }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
         return 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
@@ -2191,6 +2217,32 @@ public final class ZooKeeperProtos {
         return result;
       }
 
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
       public Builder mergeFrom(com.google.protobuf.Message other) {
         if (other instanceof 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp) {
           return 
mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp)other);
@@ -2207,13 +2259,13 @@ public final class ZooKeeperProtos {
           startDate_ = other.startDate_;
           onChanged();
         }
-        this.mergeUnknownFields(other.getUnknownFields());
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
         return this;
       }
 
       public final boolean isInitialized() {
         if (!hasStartDate()) {
-          
           return false;
         }
         return true;
@@ -2228,7 +2280,7 @@ public final class ZooKeeperProtos {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
           parsedMessage = 
(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ClusterUp) 
e.getUnfinishedMessage();
-          throw e;
+          throw e.unwrapIOException();
         } finally {
           if (parsedMessage != null) {
             mergeFrom(parsedMessage);
@@ -2238,45 +2290,47 @@ public final class ZooKeeperProtos {
       }
       private int bitField0_;
 
-      // required string start_date = 1;
       private java.lang.Object startDate_ = "";
       /**
-       * <code>required string start_date = 1;</code>
-       *
        * <pre>
        * If this znode is present, cluster is up.  Currently
        * the data is cluster start_date.
        * </pre>
+       *
+       * <code>required string start_date = 1;</code>
        */
       public boolean hasStartDate() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
    

<TRUNCATED>

Reply via email to