Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 32bc800bf -> 75ab445eb


HBASE-18448 Added refresh HFiles coprocessor endpoint

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75ab445e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75ab445e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75ab445e

Branch: refs/heads/branch-1.4
Commit: 75ab445eb1f99f1f44382a71b5f681aeb395c2db
Parents: 32bc800
Author: Ajay Jadhav <jadha...@amazon.com>
Authored: Mon Aug 21 16:31:15 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Thu Aug 24 16:38:52 2017 -0700

----------------------------------------------------------------------
 hbase-examples/pom.xml                          |   1 +
 .../client/example/RefreshHFilesClient.java     |  95 ++
 .../example/RefreshHFilesEndpoint.java          |  86 ++
 .../protobuf/generated/RefreshHFilesProtos.java | 973 +++++++++++++++++++
 .../src/main/protobuf/RefreshHFiles.proto       |  36 +
 .../example/TestRefreshHFilesEndpoint.java      | 180 ++++
 .../hadoop/hbase/HBaseTestingUtility.java       |  18 +
 7 files changed, 1389 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index a3d16e0..0a8be15 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -305,6 +305,7 @@ if we can combine these profiles somehow -->
                     <includes>
                       <include>BulkDelete.proto</include>
                       <include>Examples.proto</include>
+                      <include>RefreshHFiles.proto</include>
                     </includes>
                   </source>
                   <output>${basedir}/src/main/java/</output>

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java
 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java
new file mode 100644
index 0000000..38f0362
--- /dev/null
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java
@@ -0,0 +1,95 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.example;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * This client class is for invoking the refresh HFile function deployed on the
+ * Region Server side via the RefreshHFilesService.
+ */
+public class RefreshHFilesClient implements Closeable {
+  private static final Log LOG = LogFactory.getLog(RefreshHFilesClient.class);
+  private final Connection connection;
+
+  /**
+   * Constructor with Conf object
+   *
+   * @param cfg
+   */
+  public RefreshHFilesClient(Configuration cfg) {
+    try {
+      this.connection = ConnectionFactory.createConnection(cfg);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.connection != null && !this.connection.isClosed()) {
+      this.connection.close();
+    }
+  }
+
+  public void refreshHFiles(final TableName tableName) throws Throwable {
+    try (Table table = connection.getTable(tableName)) {
+      refreshHFiles(table);
+    }
+  }
+
+  public void refreshHFiles(final Table table) throws Throwable {
+    final RefreshHFilesProtos.RefreshHFilesRequest request = 
RefreshHFilesProtos.RefreshHFilesRequest
+                                                               
.getDefaultInstance();
+    table.coprocessorService(RefreshHFilesProtos.RefreshHFilesService.class, 
HConstants.EMPTY_START_ROW,
+                             HConstants.EMPTY_END_ROW,
+                             new 
Batch.Call<RefreshHFilesProtos.RefreshHFilesService,
+                                             
RefreshHFilesProtos.RefreshHFilesResponse>() {
+                               @Override
+                               public 
RefreshHFilesProtos.RefreshHFilesResponse call(
+                                 RefreshHFilesProtos.RefreshHFilesService 
refreshHFilesService)
+                                 throws IOException {
+                                 ServerRpcController controller = new 
ServerRpcController();
+                                 
BlockingRpcCallback<RefreshHFilesProtos.RefreshHFilesResponse> rpcCallback =
+                                   new BlockingRpcCallback<>();
+                                 
refreshHFilesService.refreshHFiles(controller, request, rpcCallback);
+                                 if (controller.failedOnException()) {
+                                   throw controller.getFailedOn();
+                                 }
+                                 return rpcCallback.get();
+                               }
+                             });
+    LOG.debug("Done refreshing HFiles");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java
----------------------------------------------------------------------
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java
 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java
new file mode 100644
index 0000000..b49ea43
--- /dev/null
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor.example;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos;
+import org.apache.hadoop.hbase.regionserver.Store;
+
+import java.io.IOException;
+
+/**
+ * Coprocessor endpoint to refresh HFiles on replica.
+ * <p>
+ * <p>
+ * For the protocol buffer definition of the RefreshHFilesService, see the 
source file located under
+ * hbase-protocol/src/main/protobuf/RefreshHFiles.proto.
+ * </p>
+ */
+public class RefreshHFilesEndpoint extends 
RefreshHFilesProtos.RefreshHFilesService
+  implements Coprocessor, CoprocessorService {
+  protected static final Log LOG = 
LogFactory.getLog(RefreshHFilesEndpoint.class);
+  private RegionCoprocessorEnvironment env;
+
+  public RefreshHFilesEndpoint() {
+  }
+
+  @Override
+  public Service getService() {
+    return this;
+  }
+
+  @Override
+  public void refreshHFiles(RpcController controller, 
RefreshHFilesProtos.RefreshHFilesRequest request,
+                            
RpcCallback<RefreshHFilesProtos.RefreshHFilesResponse> done) {
+    try {
+      for (Store store : env.getRegion().getStores()) {
+        LOG.debug("Refreshing HFiles for region: " + 
store.getRegionInfo().getRegionNameAsString() +
+                    " and store: " + store.getColumnFamilyName() + "class:" + 
store.getClass());
+        store.refreshStoreFiles();
+      }
+    } catch (IOException ioe) {
+      LOG.error("Exception while trying to refresh store files: ", ioe);
+      ResponseConverter.setControllerException(controller, ioe);
+    }
+    done.run(RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance());
+  }
+
+  @Override
+  public void start(CoprocessorEnvironment env) throws IOException {
+    if (env instanceof RegionCoprocessorEnvironment) {
+      this.env = (RegionCoprocessorEnvironment) env;
+    } else {
+      throw new CoprocessorException("Must be loaded on a table region!");
+    }
+  }
+
+  @Override
+  public void stop(CoprocessorEnvironment env) throws IOException {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RefreshHFilesProtos.java
----------------------------------------------------------------------
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RefreshHFilesProtos.java
 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RefreshHFilesProtos.java
new file mode 100644
index 0000000..7430e75
--- /dev/null
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RefreshHFilesProtos.java
@@ -0,0 +1,973 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RefreshHFiles.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RefreshHFilesProtos {
+  private RefreshHFilesProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RefreshHFilesRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RefreshHFilesRequest}
+   */
+  public static final class RefreshHFilesRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements RefreshHFilesRequestOrBuilder {
+    // Use RefreshHFilesRequest.newBuilder() to construct.
+    private 
RefreshHFilesRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RefreshHFilesRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RefreshHFilesRequest defaultInstance;
+    public static RefreshHFilesRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RefreshHFilesRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RefreshHFilesRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RefreshHFilesRequest> PARSER =
+        new com.google.protobuf.AbstractParser<RefreshHFilesRequest>() {
+      public RefreshHFilesRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RefreshHFilesRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RefreshHFilesRequest> getParserForType() 
{
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 other = 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)
 obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RefreshHFilesRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequestOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesRequest_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 build() {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 buildPartial() {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 result = new 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)
 {
+          return 
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 other) {
+        if (other == 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.getDefaultInstance())
 return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RefreshHFilesRequest)
+    }
+
+    static {
+      defaultInstance = new RefreshHFilesRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RefreshHFilesRequest)
+  }
+
+  public interface RefreshHFilesResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RefreshHFilesResponse}
+   */
+  public static final class RefreshHFilesResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements RefreshHFilesResponseOrBuilder {
+    // Use RefreshHFilesResponse.newBuilder() to construct.
+    private 
RefreshHFilesResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RefreshHFilesResponse(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RefreshHFilesResponse defaultInstance;
+    public static RefreshHFilesResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RefreshHFilesResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RefreshHFilesResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.class,
 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<RefreshHFilesResponse> PARSER =
+        new com.google.protobuf.AbstractParser<RefreshHFilesResponse>() {
+      public RefreshHFilesResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RefreshHFilesResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<RefreshHFilesResponse> 
getParserForType() {
+      return PARSER;
+    }
+
+    private void initFields() {
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 other = 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse)
 obj;
+
+      boolean result = true;
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RefreshHFilesResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponseOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.class,
 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.internal_static_hbase_pb_RefreshHFilesResponse_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 build() {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 buildPartial() {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 result = new 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse)
 {
+          return 
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 other) {
+        if (other == 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance())
 return this;
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RefreshHFilesResponse)
+    }
+
+    static {
+      defaultInstance = new RefreshHFilesResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RefreshHFilesResponse)
+  }
+
+  /**
+   * Protobuf service {@code hbase.pb.RefreshHFilesService}
+   */
+  public static abstract class RefreshHFilesService
+      implements com.google.protobuf.Service {
+    protected RefreshHFilesService() {}
+
+    public interface Interface {
+      /**
+       * <code>rpc refreshHFiles(.hbase.pb.RefreshHFilesRequest) returns 
(.hbase.pb.RefreshHFilesResponse);</code>
+       */
+      public abstract void refreshHFiles(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse>
 done);
+
+    }
+
+    public static com.google.protobuf.Service newReflectiveService(
+        final Interface impl) {
+      return new RefreshHFilesService() {
+        @java.lang.Override
+        public  void refreshHFiles(
+            com.google.protobuf.RpcController controller,
+            
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request,
+            
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse>
 done) {
+          impl.refreshHFiles(controller, request, done);
+        }
+
+      };
+    }
+
+    public static com.google.protobuf.BlockingService
+        newReflectiveBlockingService(final BlockingInterface impl) {
+      return new com.google.protobuf.BlockingService() {
+        public final com.google.protobuf.Descriptors.ServiceDescriptor
+            getDescriptorForType() {
+          return getDescriptor();
+        }
+
+        public final com.google.protobuf.Message callBlockingMethod(
+            com.google.protobuf.Descriptors.MethodDescriptor method,
+            com.google.protobuf.RpcController controller,
+            com.google.protobuf.Message request)
+            throws com.google.protobuf.ServiceException {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.callBlockingMethod() given method descriptor for " +
+              "wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return impl.refreshHFiles(controller, 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)request);
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getRequestPrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getRequestPrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+        public final com.google.protobuf.Message
+            getResponsePrototype(
+            com.google.protobuf.Descriptors.MethodDescriptor method) {
+          if (method.getService() != getDescriptor()) {
+            throw new java.lang.IllegalArgumentException(
+              "Service.getResponsePrototype() given method " +
+              "descriptor for wrong service type.");
+          }
+          switch(method.getIndex()) {
+            case 0:
+              return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance();
+            default:
+              throw new java.lang.AssertionError("Can't get here.");
+          }
+        }
+
+      };
+    }
+
+    /**
+     * <code>rpc refreshHFiles(.hbase.pb.RefreshHFilesRequest) returns 
(.hbase.pb.RefreshHFilesResponse);</code>
+     */
+    public abstract void refreshHFiles(
+        com.google.protobuf.RpcController controller,
+        
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request,
+        
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse>
 done);
+
+    public static final
+        com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.getDescriptor().getServices().get(0);
+    }
+    public final com.google.protobuf.Descriptors.ServiceDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+
+    public final void callMethod(
+        com.google.protobuf.Descriptors.MethodDescriptor method,
+        com.google.protobuf.RpcController controller,
+        com.google.protobuf.Message request,
+        com.google.protobuf.RpcCallback<
+          com.google.protobuf.Message> done) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.callMethod() given method descriptor for wrong " +
+          "service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          this.refreshHFiles(controller, 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest)request,
+            
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse>specializeCallback(
+              done));
+          return;
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getRequestPrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getRequestPrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public final com.google.protobuf.Message
+        getResponsePrototype(
+        com.google.protobuf.Descriptors.MethodDescriptor method) {
+      if (method.getService() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "Service.getResponsePrototype() given method " +
+          "descriptor for wrong service type.");
+      }
+      switch(method.getIndex()) {
+        case 0:
+          return 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance();
+        default:
+          throw new java.lang.AssertionError("Can't get here.");
+      }
+    }
+
+    public static Stub newStub(
+        com.google.protobuf.RpcChannel channel) {
+      return new Stub(channel);
+    }
+
+    public static final class Stub extends 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesService
 implements Interface {
+      private Stub(com.google.protobuf.RpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.RpcChannel channel;
+
+      public com.google.protobuf.RpcChannel getChannel() {
+        return channel;
+      }
+
+      public  void refreshHFiles(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse>
 done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.class,
+            
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance()));
+      }
+    }
+
+    public static BlockingInterface newBlockingStub(
+        com.google.protobuf.BlockingRpcChannel channel) {
+      return new BlockingStub(channel);
+    }
+
+    public interface BlockingInterface {
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 refreshHFiles(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request)
+          throws com.google.protobuf.ServiceException;
+    }
+
+    private static final class BlockingStub implements BlockingInterface {
+      private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+        this.channel = channel;
+      }
+
+      private final com.google.protobuf.BlockingRpcChannel channel;
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse
 refreshHFiles(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest
 request)
+          throws com.google.protobuf.ServiceException {
+        return 
(org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse)
 channel.callBlockingMethod(
+          getDescriptor().getMethods().get(0),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesResponse.getDefaultInstance());
+      }
+
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RefreshHFilesService)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_RefreshHFilesRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_RefreshHFilesRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_RefreshHFilesResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_RefreshHFilesResponse_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\023RefreshHFiles.proto\022\010hbase.pb\"\026\n\024Refre" +
+      "shHFilesRequest\"\027\n\025RefreshHFilesResponse" +
+      "2h\n\024RefreshHFilesService\022P\n\rrefreshHFile" +
+      "s\022\036.hbase.pb.RefreshHFilesRequest\032\037.hbas" +
+      "e.pb.RefreshHFilesResponseBI\n*org.apache" +
+      ".hadoop.hbase.protobuf.generatedB\023Refres" +
+      "hHFilesProtosH\001\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
+      new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_hbase_pb_RefreshHFilesRequest_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_hbase_pb_RefreshHFilesRequest_fieldAccessorTable = 
new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_RefreshHFilesRequest_descriptor,
+              new java.lang.String[] { });
+          internal_static_hbase_pb_RefreshHFilesResponse_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_hbase_pb_RefreshHFilesResponse_fieldAccessorTable = 
new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_RefreshHFilesResponse_descriptor,
+              new java.lang.String[] { });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/src/main/protobuf/RefreshHFiles.proto
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/protobuf/RefreshHFiles.proto 
b/hbase-examples/src/main/protobuf/RefreshHFiles.proto
new file mode 100644
index 0000000..11cbab0
--- /dev/null
+++ b/hbase-examples/src/main/protobuf/RefreshHFiles.proto
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "RefreshHFilesProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message RefreshHFilesRequest {
+}
+
+message RefreshHFilesResponse {
+}
+
+service RefreshHFilesService {
+    rpc refreshHFiles(RefreshHFilesRequest)
+      returns (RefreshHFilesResponse);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
----------------------------------------------------------------------
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
new file mode 100644
index 0000000..9a4e293
--- /dev/null
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor.example;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.client.example.RefreshHFilesClient;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.HFileTestUtil;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+@Category(MediumTests.class)
+public class TestRefreshHFilesEndpoint {
+  private static final Log LOG = 
LogFactory.getLog(TestRefreshHFilesEndpoint.class);
+  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
+  private static final int NUM_MASTER = 1;
+  private static final int NUM_RS = 2;
+  private static final TableName TABLE_NAME = 
TableName.valueOf("testRefreshRegionHFilesEP");
+  private static final byte[] FAMILY = Bytes.toBytes("family");
+  private static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
+  private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("30") 
};
+  private static final int NUM_ROWS = 5;
+  private static final String HFILE_NAME = "123abcdef";
+
+  private static Configuration CONF = HTU.getConfiguration();
+  private static MiniHBaseCluster cluster;
+  private static HTableDescriptor desc;
+  private static Admin hbaseAdmin;
+  private static HTable table;
+
+  public static void setUp(String regionImpl) {
+    try {
+      CONF.set(HConstants.REGION_IMPL, regionImpl);
+      CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
+
+      CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
RefreshHFilesEndpoint.class.getName());
+      cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS);
+
+      // Create table
+      desc = new HTableDescriptor(TABLE_NAME);
+      desc.addFamily(new HColumnDescriptor(FAMILY));
+      hbaseAdmin = cluster.getMaster().getConnection().getAdmin();
+      hbaseAdmin.createTable(desc, SPLIT_KEY);
+      table = new HTable(HTU.getConfiguration(), TABLE_NAME);
+
+      // this will create 2 regions spread across slaves
+      HTU.loadNumericRows(table, FAMILY, 1, 20);
+      HTU.flush(TABLE_NAME);
+    } catch (Exception ex) {
+      LOG.error("Couldn't finish setup", ex);
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    HTU.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testRefreshRegionHFilesEndpoint() throws Exception {
+    setUp(HRegion.class.getName());
+    Path tableDir = desc.getTableDir(HTU.getDefaultRootDirPath(), 
TABLE_NAME.toBytes());
+    for (Region region : cluster.getRegions(TABLE_NAME)) {
+      Path regionDir = new Path(tableDir, 
region.getRegionInfo().getEncodedName());
+      Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
+      HFileTestUtil
+        .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new 
Path(familyDir, HFILE_NAME), FAMILY,
+                     QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), 
NUM_ROWS);
+    }
+    assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
+    callRefreshRegionHFilesEndPoint();
+    assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
+  }
+
+  @Test(expected = IOException.class)
+  public void testRefreshRegionHFilesEndpointWithException() throws 
IOException {
+    setUp(HRegionForRefreshHFilesEP.class.getName());
+    callRefreshRegionHFilesEndPoint();
+  }
+
+  private void callRefreshRegionHFilesEndPoint() throws IOException {
+    try {
+      RefreshHFilesClient refreshHFilesClient = new RefreshHFilesClient(CONF);
+      refreshHFilesClient.refreshHFiles(TABLE_NAME);
+    } catch (RetriesExhaustedException rex) {
+      if (rex.getCause() instanceof IOException)
+        throw new IOException();
+    } catch (Throwable ex) {
+      LOG.error(ex);
+      fail("Couldn't call the RefreshRegionHFilesEndpoint");
+    }
+  }
+
+  public static class HRegionForRefreshHFilesEP extends HRegion {
+    HStoreWithFaultyRefreshHFilesAPI store;
+
+    public HRegionForRefreshHFilesEP(final Path tableDir, final WAL wal, final 
FileSystem fs,
+                                     final Configuration confParam, final 
HRegionInfo regionInfo,
+                                     final HTableDescriptor htd, final 
RegionServerServices rsServices) {
+      super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
+    }
+
+    @Override
+    public List<Store> getStores() {
+      List<Store> list = new ArrayList<Store>(stores.size());
+      /**
+       * This is used to trigger the custom definition (faulty)
+       * of refresh HFiles API.
+       */
+      try {
+        if (this.store == null)
+          store = new HStoreWithFaultyRefreshHFilesAPI(this, new 
HColumnDescriptor(FAMILY), this.conf);
+        list.add(store);
+      } catch (IOException ioe) {
+        LOG.info("Couldn't instantiate custom store implementation", ioe);
+      }
+
+      list.addAll(stores.values());
+      return list;
+    }
+  }
+
+  public static class HStoreWithFaultyRefreshHFilesAPI extends HStore {
+    public HStoreWithFaultyRefreshHFilesAPI(final HRegion region, final 
HColumnDescriptor family,
+                                            final Configuration confParam) 
throws IOException {
+      super(region, family, confParam);
+    }
+
+    @Override
+    public void refreshStoreFiles() throws IOException {
+      throw new IOException();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/75ab445e/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index f60be66..0e17a96 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -4337,4 +4337,22 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
     HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
     return kdc;
   }
+
+  public int getNumHFiles(final TableName tableName, final byte[] family) {
+    int numHFiles = 0;
+    for (RegionServerThread regionServerThread : 
getMiniHBaseCluster().getRegionServerThreads()) {
+      numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), 
tableName,
+                                    family);
+    }
+    return numHFiles;
+  }
+
+  public int getNumHFilesForRS(final HRegionServer rs, final TableName 
tableName,
+                               final byte[] family) {
+    int numHFiles = 0;
+    for (Region region : rs.getOnlineRegions(tableName)) {
+      numHFiles += region.getStore(family).getStorefilesCount();
+    }
+    return numHFiles;
+  }
 }

Reply via email to