This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
     new d6a3e6b  HBASE-21679 Port HBASE-6028 (Start/Stop compactions at region 
server level) to branch-1
d6a3e6b is described below

commit d6a3e6b404a775edaf28cefb326e5186fd76b9dd
Author: Andrew Purtell <apurt...@apache.org>
AuthorDate: Thu Jan 10 18:07:36 2019 -0800

    HBASE-21679 Port HBASE-6028 (Start/Stop compactions at region server level) 
to branch-1
    
    HBASE-6028 Start/Stop compactions at region server level
    
    Add switching on/off of compactions.
    
    Switching off compactions will also interrupt any currently ongoing 
compactions.
    Adds a "compaction_switch" to hbase shell. Switching off compactions will
    interrupt any currently ongoing compactions. State set from shell will be
    lost on restart. To persist the changes across region servers modify
    hbase.regionserver.compaction.enabled in hbase-site.xml and restart.
---
 .../java/org/apache/hadoop/hbase/client/Admin.java |   13 +
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |   47 +
 hbase-common/src/main/resources/hbase-default.xml  |    7 +
 .../hbase/protobuf/generated/AdminProtos.java      | 1268 +++++++++++++++++---
 hbase-protocol/src/main/protobuf/Admin.proto       |   11 +
 .../hbase/regionserver/CompactSplitThread.java     |  149 ++-
 .../hbase/regionserver/CompactionRequestor.java    |    5 +
 .../apache/hadoop/hbase/regionserver/HRegion.java  |   23 +-
 .../hadoop/hbase/regionserver/RSRpcServices.java   |   22 +
 .../apache/hadoop/hbase/regionserver/Region.java   |    2 +-
 .../hadoop/hbase/master/MockRegionServer.java      |    8 +
 .../hadoop/hbase/regionserver/TestCompaction.java  |   86 +-
 .../replication/regionserver/TestReplicator.java   |    6 +
 hbase-shell/src/main/ruby/hbase/admin.rb           |   17 +-
 hbase-shell/src/main/ruby/shell.rb                 |    1 +
 .../main/ruby/shell/commands/compaction_switch.rb  |   53 +
 src/main/asciidoc/_chapters/architecture.adoc      |    8 +
 17 files changed, 1517 insertions(+), 209 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 8f5e150..ea87457 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -634,6 +634,19 @@ public interface Admin extends Abortable, Closeable {
     throws IOException;
 
   /**
+   * Turn the compaction on or off. Disabling compactions will also interrupt 
any currently ongoing
+   * compactions. It is ephemeral. This setting will be lost on restart of the 
server. Compaction
+   * can also be enabled/disabled by modifying configuration 
hbase.regionserver.compaction.enabled
+   * in hbase-site.xml.
+   *
+   * @param switchState     Set to <code>true</code> to enable, 
<code>false</code> to disable.
+   * @param serverNamesList list of region servers.
+   * @return Previous compaction states for region servers
+   */
+  Map<ServerName, Boolean> compactionSwitch(boolean switchState, List<String> 
serverNamesList)
+      throws IOException;
+
+ /**
    * Compact all regions on the region server
    * @param sn the region server name
    * @param major if it's major compaction
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fafc9fe..7e624cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -83,6 +83,8 @@ import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
@@ -2052,6 +2054,51 @@ public class HBaseAdmin implements Admin {
    * {@inheritDoc}
    */
   @Override
+  public Map<ServerName, Boolean> compactionSwitch(boolean switchState, 
List<String>
+      serverNamesList) throws IOException {
+    List<ServerName> serverList = new ArrayList<>();
+    if (serverNamesList.isEmpty()) {
+      ClusterStatus status = getClusterStatus();
+      serverList.addAll(status.getServers());
+    } else {
+      for (String regionServerName : serverNamesList) {
+        ServerName serverName = null;
+        try {
+          serverName = ServerName.valueOf(regionServerName);
+        } catch (Exception e) {
+          throw new IllegalArgumentException(String.format("Invalid format for 
regionserver:%s ",
+            regionServerName));
+        }
+        if (serverName == null) {
+          throw new IllegalArgumentException(String.format("Invalid 
regionserver:%s ",
+            regionServerName));
+        }
+        serverList.add(serverName);
+      }
+    }
+    HBaseRpcController controller = rpcControllerFactory.newController();
+    Map<ServerName, Boolean> res = new HashMap<>(serverList.size());
+    for (ServerName serverName : serverList) {
+      AdminService.BlockingInterface admin = 
this.connection.getAdmin(serverName);
+      try {
+        CompactionSwitchRequest request =
+            
CompactionSwitchRequest.newBuilder().setEnabled(switchState).build();
+        CompactionSwitchResponse compactionSwitchResponse =
+            admin.compactionSwitch(controller, request);
+        boolean prev_state = compactionSwitchResponse.getPrevState();
+        res.put(serverName, prev_state);
+      } catch (ServiceException se) {
+        throw ProtobufUtil.getRemoteException(se);
+      }
+    }
+    return res;
+  }
+
+  /**
+   *
+   * {@inheritDoc}
+   */
+  @Override
   public void compactRegionServer(final ServerName sn, boolean major)
   throws IOException, InterruptedException {
     for (HRegionInfo region : getOnlineRegions(sn)) {
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 78237b1..6835d84 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -721,6 +721,13 @@ possible configurations would overwhelm and obscure the 
important.
     put off compaction but when it runs, it takes longer to 
complete.</description>
   </property>
   <property>
+    <name>hbase.regionserver.compaction.enabled</name>
+    <value>true</value>
+    <description>Enable/disable compactions on by setting true/false.
+      We can further switch compactions dynamically with the
+      compaction_switch shell command.</description>
+  </property>
+  <property>
     <name>hbase.hstore.flusher.count</name>
     <value>2</value>
     <description>
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
index 1c59ea6..b69b157 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
@@ -12506,6 +12506,890 @@ public final class AdminProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.CompactRegionResponse)
   }
 
+  public interface CompactionSwitchRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool enabled = 1;
+    /**
+     * <code>required bool enabled = 1;</code>
+     */
+    boolean hasEnabled();
+    /**
+     * <code>required bool enabled = 1;</code>
+     */
+    boolean getEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.CompactionSwitchRequest}
+   */
+  public static final class CompactionSwitchRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements CompactionSwitchRequestOrBuilder {
+    // Use CompactionSwitchRequest.newBuilder() to construct.
+    private 
CompactionSwitchRequest(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private CompactionSwitchRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final CompactionSwitchRequest defaultInstance;
+    public static CompactionSwitchRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public CompactionSwitchRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private CompactionSwitchRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              enabled_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<CompactionSwitchRequest> PARSER =
+        new com.google.protobuf.AbstractParser<CompactionSwitchRequest>() {
+      public CompactionSwitchRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new CompactionSwitchRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<CompactionSwitchRequest> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool enabled = 1;
+    public static final int ENABLED_FIELD_NUMBER = 1;
+    private boolean enabled_;
+    /**
+     * <code>required bool enabled = 1;</code>
+     */
+    public boolean hasEnabled() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool enabled = 1;</code>
+     */
+    public boolean getEnabled() {
+      return enabled_;
+    }
+
+    private void initFields() {
+      enabled_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasEnabled()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, enabled_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, enabled_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
other = 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest)
 obj;
+
+      boolean result = true;
+      result = result && (hasEnabled() == other.hasEnabled());
+      if (hasEnabled()) {
+        result = result && (getEnabled()
+            == other.getEnabled());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasEnabled()) {
+        hash = (37 * hash) + ENABLED_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getEnabled());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.CompactionSwitchRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequestOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        enabled_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchRequest_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
build() {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
buildPartial() {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
result = new 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.enabled_ = enabled_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest) 
{
+          return 
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest
 other) {
+        if (other == 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.getDefaultInstance())
 return this;
+        if (other.hasEnabled()) {
+          setEnabled(other.getEnabled());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasEnabled()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool enabled = 1;
+      private boolean enabled_ ;
+      /**
+       * <code>required bool enabled = 1;</code>
+       */
+      public boolean hasEnabled() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool enabled = 1;</code>
+       */
+      public boolean getEnabled() {
+        return enabled_;
+      }
+      /**
+       * <code>required bool enabled = 1;</code>
+       */
+      public Builder setEnabled(boolean value) {
+        bitField0_ |= 0x00000001;
+        enabled_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool enabled = 1;</code>
+       */
+      public Builder clearEnabled() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        enabled_ = false;
+        onChanged();
+        return this;
+      }
+
+      // 
@@protoc_insertion_point(builder_scope:hbase.pb.CompactionSwitchRequest)
+    }
+
+    static {
+      defaultInstance = new CompactionSwitchRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.CompactionSwitchRequest)
+  }
+
+  public interface CompactionSwitchResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool prev_state = 1;
+    /**
+     * <code>required bool prev_state = 1;</code>
+     */
+    boolean hasPrevState();
+    /**
+     * <code>required bool prev_state = 1;</code>
+     */
+    boolean getPrevState();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.CompactionSwitchResponse}
+   */
+  public static final class CompactionSwitchResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements CompactionSwitchResponseOrBuilder {
+    // Use CompactionSwitchResponse.newBuilder() to construct.
+    private 
CompactionSwitchResponse(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private CompactionSwitchResponse(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final CompactionSwitchResponse defaultInstance;
+    public static CompactionSwitchResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public CompactionSwitchResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private CompactionSwitchResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              prevState_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.class,
 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<CompactionSwitchResponse> PARSER =
+        new com.google.protobuf.AbstractParser<CompactionSwitchResponse>() {
+      public CompactionSwitchResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new CompactionSwitchResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<CompactionSwitchResponse> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool prev_state = 1;
+    public static final int PREV_STATE_FIELD_NUMBER = 1;
+    private boolean prevState_;
+    /**
+     * <code>required bool prev_state = 1;</code>
+     */
+    public boolean hasPrevState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool prev_state = 1;</code>
+     */
+    public boolean getPrevState() {
+      return prevState_;
+    }
+
+    private void initFields() {
+      prevState_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasPrevState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, prevState_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, prevState_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse))
 {
+        return super.equals(obj);
+      }
+      
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
other = 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse)
 obj;
+
+      boolean result = true;
+      result = result && (hasPrevState() == other.hasPrevState());
+      if (hasPrevState()) {
+        result = result && (getPrevState()
+            == other.getPrevState());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPrevState()) {
+        hash = (37 * hash) + PREV_STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getPrevState());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.CompactionSwitchResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponseOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.class,
 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        prevState_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_CompactionSwitchResponse_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
build() {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
buildPartial() {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
result = new 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.prevState_ = prevState_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse)
 {
+          return 
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse
 other) {
+        if (other == 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance())
 return this;
+        if (other.hasPrevState()) {
+          setPrevState(other.getPrevState());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPrevState()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool prev_state = 1;
+      private boolean prevState_ ;
+      /**
+       * <code>required bool prev_state = 1;</code>
+       */
+      public boolean hasPrevState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool prev_state = 1;</code>
+       */
+      public boolean getPrevState() {
+        return prevState_;
+      }
+      /**
+       * <code>required bool prev_state = 1;</code>
+       */
+      public Builder setPrevState(boolean value) {
+        bitField0_ |= 0x00000001;
+        prevState_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool prev_state = 1;</code>
+       */
+      public Builder clearPrevState() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        prevState_ = false;
+        onChanged();
+        return this;
+      }
+
+      // 
@@protoc_insertion_point(builder_scope:hbase.pb.CompactionSwitchResponse)
+    }
+
+    static {
+      defaultInstance = new CompactionSwitchResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.CompactionSwitchResponse)
+  }
+
   public interface UpdateFavoredNodesRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -22493,6 +23377,14 @@ public final class AdminProtos {
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse>
 done);
 
       /**
+       * <code>rpc CompactionSwitch(.hbase.pb.CompactionSwitchRequest) returns 
(.hbase.pb.CompactionSwitchResponse);</code>
+       */
+      public abstract void compactionSwitch(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse>
 done);
+
+      /**
        * <code>rpc CompactRegion(.hbase.pb.CompactRegionRequest) returns 
(.hbase.pb.CompactRegionResponse);</code>
        */
       public abstract void compactRegion(
@@ -22634,6 +23526,14 @@ public final class AdminProtos {
         }
 
         @java.lang.Override
+        public  void compactionSwitch(
+            com.google.protobuf.RpcController controller,
+            
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request,
+            
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse>
 done) {
+          impl.compactionSwitch(controller, request, done);
+        }
+
+        @java.lang.Override
         public  void compactRegion(
             com.google.protobuf.RpcController controller,
             
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest 
request,
@@ -22744,22 +23644,24 @@ public final class AdminProtos {
             case 7:
               return impl.splitRegion(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request);
             case 8:
-              return impl.compactRegion(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request);
+              return impl.compactionSwitch(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest)request);
             case 9:
-              return impl.mergeRegions(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request);
+              return impl.compactRegion(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request);
             case 10:
-              return impl.replicateWALEntry(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
+              return impl.mergeRegions(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request);
             case 11:
-              return impl.replay(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
+              return impl.replicateWALEntry(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
             case 12:
-              return impl.rollWALWriter(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request);
+              return impl.replay(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
             case 13:
-              return impl.getServerInfo(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request);
+              return impl.rollWALWriter(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request);
             case 14:
-              return impl.stopServer(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request);
+              return impl.getServerInfo(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request);
             case 15:
-              return impl.updateFavoredNodes(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request);
+              return impl.stopServer(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request);
             case 16:
+              return impl.updateFavoredNodes(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request);
+            case 17:
               return impl.updateConfiguration(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -22792,22 +23694,24 @@ public final class AdminProtos {
             case 7:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance();
             case 8:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.getDefaultInstance();
             case 9:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
             case 10:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
             case 11:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
             case 12:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
             case 13:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
             case 14:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
             case 15:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
             case 16:
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
+            case 17:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -22840,22 +23744,24 @@ public final class AdminProtos {
             case 7:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance();
             case 8:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance();
             case 9:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
             case 10:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
             case 11:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
             case 12:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
             case 13:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
             case 14:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
             case 15:
-              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
             case 16:
+              return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
+            case 17:
               return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -22930,6 +23836,14 @@ public final class AdminProtos {
         
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse>
 done);
 
     /**
+     * <code>rpc CompactionSwitch(.hbase.pb.CompactionSwitchRequest) returns 
(.hbase.pb.CompactionSwitchResponse);</code>
+     */
+    public abstract void compactionSwitch(
+        com.google.protobuf.RpcController controller,
+        
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request,
+        
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse>
 done);
+
+    /**
      * <code>rpc CompactRegion(.hbase.pb.CompactRegionRequest) returns 
(.hbase.pb.CompactRegionResponse);</code>
      */
     public abstract void compactRegion(
@@ -23064,46 +23978,51 @@ public final class AdminProtos {
               done));
           return;
         case 8:
+          this.compactionSwitch(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest)request,
+            
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse>specializeCallback(
+              done));
+          return;
+        case 9:
           this.compactRegion(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse>specializeCallback(
               done));
           return;
-        case 9:
+        case 10:
           this.mergeRegions(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse>specializeCallback(
               done));
           return;
-        case 10:
+        case 11:
           this.replicateWALEntry(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>specializeCallback(
               done));
           return;
-        case 11:
+        case 12:
           this.replay(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>specializeCallback(
               done));
           return;
-        case 12:
+        case 13:
           this.rollWALWriter(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse>specializeCallback(
               done));
           return;
-        case 13:
+        case 14:
           this.getServerInfo(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse>specializeCallback(
               done));
           return;
-        case 14:
+        case 15:
           this.stopServer(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse>specializeCallback(
               done));
           return;
-        case 15:
+        case 16:
           this.updateFavoredNodes(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse>specializeCallback(
               done));
           return;
-        case 16:
+        case 17:
           this.updateConfiguration(controller, 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request,
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse>specializeCallback(
               done));
@@ -23139,22 +24058,24 @@ public final class AdminProtos {
         case 7:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance();
         case 8:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest.getDefaultInstance();
         case 9:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
         case 10:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
         case 11:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
         case 12:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
         case 13:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
         case 14:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
         case 15:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
         case 16:
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
+        case 17:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -23187,22 +24108,24 @@ public final class AdminProtos {
         case 7:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance();
         case 8:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance();
         case 9:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
         case 10:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
         case 11:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
         case 12:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
         case 13:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
         case 14:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
         case 15:
-          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
         case 16:
+          return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
+        case 17:
           return 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -23345,12 +24268,27 @@ public final class AdminProtos {
             
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance()));
       }
 
+      public  void compactionSwitch(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse>
 done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(8),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.class,
+            
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance()));
+      }
+
       public  void compactRegion(
           com.google.protobuf.RpcController controller,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(8),
+          getDescriptor().getMethods().get(9),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(),
@@ -23365,7 +24303,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(9),
+          getDescriptor().getMethods().get(10),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(),
@@ -23380,7 +24318,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(10),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(),
@@ -23395,7 +24333,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(),
@@ -23410,7 +24348,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(),
@@ -23425,7 +24363,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(13),
+          getDescriptor().getMethods().get(14),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(),
@@ -23440,7 +24378,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest 
request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(14),
+          getDescriptor().getMethods().get(15),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(),
@@ -23455,7 +24393,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest
 request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(15),
+          getDescriptor().getMethods().get(16),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(),
@@ -23470,7 +24408,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest
 request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse>
 done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(16),
+          getDescriptor().getMethods().get(17),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(),
@@ -23527,6 +24465,11 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest 
request)
           throws com.google.protobuf.ServiceException;
 
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
compactionSwitch(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request)
+          throws com.google.protobuf.ServiceException;
+
       public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse 
compactRegion(
           com.google.protobuf.RpcController controller,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest 
request)
@@ -23676,12 +24619,24 @@ public final class AdminProtos {
       }
 
 
+      public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse 
compactionSwitch(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest 
request)
+          throws com.google.protobuf.ServiceException {
+        return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse)
 channel.callBlockingMethod(
+          getDescriptor().getMethods().get(8),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse.getDefaultInstance());
+      }
+
+
       public 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse 
compactRegion(
           com.google.protobuf.RpcController controller,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse) 
channel.callBlockingMethod(
-          getDescriptor().getMethods().get(8),
+          getDescriptor().getMethods().get(9),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance());
@@ -23693,7 +24648,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse) 
channel.callBlockingMethod(
-          getDescriptor().getMethods().get(9),
+          getDescriptor().getMethods().get(10),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance());
@@ -23705,7 +24660,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse)
 channel.callBlockingMethod(
-          getDescriptor().getMethods().get(10),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance());
@@ -23717,7 +24672,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse)
 channel.callBlockingMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance());
@@ -23729,7 +24684,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse) 
channel.callBlockingMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance());
@@ -23741,7 +24696,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse) 
channel.callBlockingMethod(
-          getDescriptor().getMethods().get(13),
+          getDescriptor().getMethods().get(14),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance());
@@ -23753,7 +24708,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest 
request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse) 
channel.callBlockingMethod(
-          getDescriptor().getMethods().get(14),
+          getDescriptor().getMethods().get(15),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance());
@@ -23765,7 +24720,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest
 request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse)
 channel.callBlockingMethod(
-          getDescriptor().getMethods().get(15),
+          getDescriptor().getMethods().get(16),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance());
@@ -23777,7 +24732,7 @@ public final class AdminProtos {
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest
 request)
           throws com.google.protobuf.ServiceException {
         return 
(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse)
 channel.callBlockingMethod(
-          getDescriptor().getMethods().get(16),
+          getDescriptor().getMethods().get(17),
           controller,
           request,
           
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance());
@@ -23884,6 +24839,16 @@ public final class AdminProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_CompactRegionResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_CompactionSwitchRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_CompactionSwitchRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_CompactionSwitchResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_CompactionSwitchResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -24024,71 +24989,76 @@ public final class AdminProtos {
       "CompactRegionRequest\022)\n\006region\030\001 \002(\0132\031.h" +
       "base.pb.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016" +
       "\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResponse\"" +
-      "\315\001\n\031UpdateFavoredNodesRequest\022I\n\013update_" +
-      "info\030\001 \003(\01324.hbase.pb.UpdateFavoredNodes" +
-      "Request.RegionUpdateInfo\032e\n\020RegionUpdate",
-      "Info\022$\n\006region\030\001 \002(\0132\024.hbase.pb.RegionIn" +
-      "fo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase.pb.Ser" +
-      "verName\".\n\032UpdateFavoredNodesResponse\022\020\n" +
-      "\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsRequest\022" +
-      "+\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpeci" +
-      "fier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.Region" +
-      "Specifier\022\027\n\010forcible\030\003 
\001(\010:\005false\022\032\n\022ma" +
-      "ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" +
-      "sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." 
+
-      "pb.WALKey\022\027\n\017key_value_bytes\030\002 
\003(\014\022\035\n\025as",
-      "sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" +
-      "ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" +
-      ".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" +
-      "\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" +
-      "ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" +
-      "ateWALEntryResponse\"\026\n\024RollWALWriterRequ" +
-      "est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" +
-      "o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" +
-      "ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" +
-      "erverInfoRequest\"K\n\nServerInfo\022)\n\013server",
-      "_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" +
-      "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" +
-      "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" +
-      "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" +
-      "eConfigurationResponse2\207\013\n\014AdminService\022" +
-      "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" +
-      "oRequest\032\037.hbase.pb.GetRegionInfoRespons" +
-      "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" +
-      "eRequest\032\036.hbase.pb.GetStoreFileResponse" +
-      "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline",
-      "RegionRequest\032!.hbase.pb.GetOnlineRegion" +
-      "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" +
-      "gionRequest\032\034.hbase.pb.OpenRegionRespons" +
-      "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" +
-      "nRequest\032\036.hbase.pb.WarmupRegionResponse" +
-      "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" +
-      "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" +
-      "FlushRegion\022\034.hbase.pb.FlushRegionReques" +
-      "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" +
-      "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.",
-      "hbase.pb.SplitRegionResponse\022P\n\rCompactR" +
-      "egion\022\036.hbase.pb.CompactRegionRequest\032\037." +
-      "hbase.pb.CompactRegionResponse\022M\n\014MergeR" +
-      "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." +
-      "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" +
-      "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" +
-      "equest\032#.hbase.pb.ReplicateWALEntryRespo" +
-      "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" +
-      "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" +
-      "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW",
-      "ALWriterRequest\032\037.hbase.pb.RollWALWriter" +
-      "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" +
-      "ServerInfoRequest\032\037.hbase.pb.GetServerIn" +
-      "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" +
-      "ServerRequest\032\034.hbase.pb.StopServerRespo" +
-      "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" +
-      "ateFavoredNodesRequest\032$.hbase.pb.Update" +
-      "FavoredNodesResponse\022b\n\023UpdateConfigurat" +
-      "ion\022$.hbase.pb.UpdateConfigurationReques" +
-      "t\032%.hbase.pb.UpdateConfigurationResponse",
-      "BA\n*org.apache.hadoop.hbase.protobuf.gen" +
-      "eratedB\013AdminProtosH\001\210\001\001\240\001\001"
+      "*\n\027CompactionSwitchRequest\022\017\n\007enabled\030\001 " +
+      "\002(\010\".\n\030CompactionSwitchResponse\022\022\n\nprev_" +
+      "state\030\001 \002(\010\"\315\001\n\031UpdateFavoredNodesReques",
+      "t\022I\n\013update_info\030\001 \003(\01324.hbase.pb.Update" +
+      "FavoredNodesRequest.RegionUpdateInfo\032e\n\020" +
+      "RegionUpdateInfo\022$\n\006region\030\001 \002(\0132\024.hbase" +
+      ".pb.RegionInfo\022+\n\rfavored_nodes\030\002 \003(\0132\024." +
+      "hbase.pb.ServerName\".\n\032UpdateFavoredNode" +
+      "sResponse\022\020\n\010response\030\001 
\001(\r\"\244\001\n\023MergeReg" +
+      "ionsRequest\022+\n\010region_a\030\001 \002(\0132\031.hbase.pb" +
+      ".RegionSpecifier\022+\n\010region_b\030\002 \002(\0132\031.hba" +
+      "se.pb.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:" +
+      "\005false\022\032\n\022master_system_time\030\004 
\001(\004\"\026\n\024Me",
+      "rgeRegionsResponse\"a\n\010WALEntry\022\035\n\003key\030\001 " +
+      "\002(\0132\020.hbase.pb.WALKey\022\027\n\017key_value_bytes" +
+      "\030\002 \003(\014\022\035\n\025associated_cell_count\030\003 
\001(\005\"\242\001" +
+      "\n\030ReplicateWALEntryRequest\022!\n\005entry\030\001 \003(" +
+      "\0132\022.hbase.pb.WALEntry\022\034\n\024replicationClus" +
+      "terId\030\002 \001(\t\022\"\n\032sourceBaseNamespaceDirPat" +
+      "h\030\003 \001(\t\022!\n\031sourceHFileArchiveDirPath\030\004 \001" +
+      "(\t\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollW" +
+      "ALWriterRequest\"0\n\025RollWALWriterResponse" +
+      "\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopServerR",
+      "equest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResp" +
+      "onse\"\026\n\024GetServerInfoRequest\"K\n\nServerIn" +
+      "fo\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Serve" +
+      "rName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025GetServerIn" +
+      "foResponse\022)\n\013server_info\030\001 \002(\0132\024.hbase." +
+      "pb.ServerInfo\"\034\n\032UpdateConfigurationRequ" +
+      "est\"\035\n\033UpdateConfigurationResponse2\342\013\n\014A" +
+      "dminService\022P\n\rGetRegionInfo\022\036.hbase.pb." +
+      "GetRegionInfoRequest\032\037.hbase.pb.GetRegio" +
+      "nInfoResponse\022M\n\014GetStoreFile\022\035.hbase.pb",
+      ".GetStoreFileRequest\032\036.hbase.pb.GetStore" +
+      "FileResponse\022V\n\017GetOnlineRegion\022 .hbase." +
+      "pb.GetOnlineRegionRequest\032!.hbase.pb.Get" +
+      "OnlineRegionResponse\022G\n\nOpenRegion\022\033.hba" +
+      "se.pb.OpenRegionRequest\032\034.hbase.pb.OpenR" +
+      "egionResponse\022M\n\014WarmupRegion\022\035.hbase.pb" +
+      ".WarmupRegionRequest\032\036.hbase.pb.WarmupRe" +
+      "gionResponse\022J\n\013CloseRegion\022\034.hbase.pb.C" +
+      "loseRegionRequest\032\035.hbase.pb.CloseRegion" +
+      "Response\022J\n\013FlushRegion\022\034.hbase.pb.Flush",
+      "RegionRequest\032\035.hbase.pb.FlushRegionResp" +
+      "onse\022J\n\013SplitRegion\022\034.hbase.pb.SplitRegi" +
+      "onRequest\032\035.hbase.pb.SplitRegionResponse" +
+      "\022Y\n\020CompactionSwitch\022!.hbase.pb.Compacti" +
+      "onSwitchRequest\032\".hbase.pb.CompactionSwi" +
+      "tchResponse\022P\n\rCompactRegion\022\036.hbase.pb." +
+      "CompactRegionRequest\032\037.hbase.pb.CompactR" +
+      "egionResponse\022M\n\014MergeRegions\022\035.hbase.pb" +
+      ".MergeRegionsRequest\032\036.hbase.pb.MergeReg" +
+      "ionsResponse\022\\\n\021ReplicateWALEntry\022\".hbas",
+      "e.pb.ReplicateWALEntryRequest\032#.hbase.pb" +
+      ".ReplicateWALEntryResponse\022Q\n\006Replay\022\".h" +
+      "base.pb.ReplicateWALEntryRequest\032#.hbase" +
+      ".pb.ReplicateWALEntryResponse\022P\n\rRollWAL" +
+      "Writer\022\036.hbase.pb.RollWALWriterRequest\032\037" +
+      ".hbase.pb.RollWALWriterResponse\022P\n\rGetSe" +
+      "rverInfo\022\036.hbase.pb.GetServerInfoRequest" +
+      "\032\037.hbase.pb.GetServerInfoResponse\022G\n\nSto" +
+      "pServer\022\033.hbase.pb.StopServerRequest\032\034.h" +
+      "base.pb.StopServerResponse\022_\n\022UpdateFavo",
+      "redNodes\022#.hbase.pb.UpdateFavoredNodesRe" +
+      "quest\032$.hbase.pb.UpdateFavoredNodesRespo" +
+      "nse\022b\n\023UpdateConfiguration\022$.hbase.pb.Up" +
+      "dateConfigurationRequest\032%.hbase.pb.Upda" +
+      "teConfigurationResponseBA\n*org.apache.ha" +
+      "doop.hbase.protobuf.generatedB\013AdminProt" +
+      "osH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
       new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -24209,8 +25179,20 @@ public final class AdminProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_CompactRegionResponse_descriptor,
               new java.lang.String[] { });
-          internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor =
+          internal_static_hbase_pb_CompactionSwitchRequest_descriptor =
             getDescriptor().getMessageTypes().get(18);
+          internal_static_hbase_pb_CompactionSwitchRequest_fieldAccessorTable 
= new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_CompactionSwitchRequest_descriptor,
+              new java.lang.String[] { "Enabled", });
+          internal_static_hbase_pb_CompactionSwitchResponse_descriptor =
+            getDescriptor().getMessageTypes().get(19);
+          internal_static_hbase_pb_CompactionSwitchResponse_fieldAccessorTable 
= new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_CompactionSwitchResponse_descriptor,
+              new java.lang.String[] { "PrevState", });
+          internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor =
+            getDescriptor().getMessageTypes().get(20);
           
internal_static_hbase_pb_UpdateFavoredNodesRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UpdateFavoredNodesRequest_descriptor,
@@ -24222,91 +25204,91 @@ public final class AdminProtos {
               
internal_static_hbase_pb_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor,
               new java.lang.String[] { "Region", "FavoredNodes", });
           internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor =
-            getDescriptor().getMessageTypes().get(19);
+            getDescriptor().getMessageTypes().get(21);
           
internal_static_hbase_pb_UpdateFavoredNodesResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UpdateFavoredNodesResponse_descriptor,
               new java.lang.String[] { "Response", });
           internal_static_hbase_pb_MergeRegionsRequest_descriptor =
-            getDescriptor().getMessageTypes().get(20);
+            getDescriptor().getMessageTypes().get(22);
           internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MergeRegionsRequest_descriptor,
               new java.lang.String[] { "RegionA", "RegionB", "Forcible", 
"MasterSystemTime", });
           internal_static_hbase_pb_MergeRegionsResponse_descriptor =
-            getDescriptor().getMessageTypes().get(21);
+            getDescriptor().getMessageTypes().get(23);
           internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MergeRegionsResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_WALEntry_descriptor =
-            getDescriptor().getMessageTypes().get(22);
+            getDescriptor().getMessageTypes().get(24);
           internal_static_hbase_pb_WALEntry_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_WALEntry_descriptor,
               new java.lang.String[] { "Key", "KeyValueBytes", 
"AssociatedCellCount", });
           internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor =
-            getDescriptor().getMessageTypes().get(23);
+            getDescriptor().getMessageTypes().get(25);
           internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable 
= new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor,
               new java.lang.String[] { "Entry", "ReplicationClusterId", 
"SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", });
           internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor =
-            getDescriptor().getMessageTypes().get(24);
+            getDescriptor().getMessageTypes().get(26);
           
internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_RollWALWriterRequest_descriptor =
-            getDescriptor().getMessageTypes().get(25);
+            getDescriptor().getMessageTypes().get(27);
           internal_static_hbase_pb_RollWALWriterRequest_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RollWALWriterRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_RollWALWriterResponse_descriptor =
-            getDescriptor().getMessageTypes().get(26);
+            getDescriptor().getMessageTypes().get(28);
           internal_static_hbase_pb_RollWALWriterResponse_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RollWALWriterResponse_descriptor,
               new java.lang.String[] { "RegionToFlush", });
           internal_static_hbase_pb_StopServerRequest_descriptor =
-            getDescriptor().getMessageTypes().get(27);
+            getDescriptor().getMessageTypes().get(29);
           internal_static_hbase_pb_StopServerRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_StopServerRequest_descriptor,
               new java.lang.String[] { "Reason", });
           internal_static_hbase_pb_StopServerResponse_descriptor =
-            getDescriptor().getMessageTypes().get(28);
+            getDescriptor().getMessageTypes().get(30);
           internal_static_hbase_pb_StopServerResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_StopServerResponse_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_GetServerInfoRequest_descriptor =
-            getDescriptor().getMessageTypes().get(29);
+            getDescriptor().getMessageTypes().get(31);
           internal_static_hbase_pb_GetServerInfoRequest_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetServerInfoRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_ServerInfo_descriptor =
-            getDescriptor().getMessageTypes().get(30);
+            getDescriptor().getMessageTypes().get(32);
           internal_static_hbase_pb_ServerInfo_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ServerInfo_descriptor,
               new java.lang.String[] { "ServerName", "WebuiPort", });
           internal_static_hbase_pb_GetServerInfoResponse_descriptor =
-            getDescriptor().getMessageTypes().get(31);
+            getDescriptor().getMessageTypes().get(33);
           internal_static_hbase_pb_GetServerInfoResponse_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetServerInfoResponse_descriptor,
               new java.lang.String[] { "ServerInfo", });
           internal_static_hbase_pb_UpdateConfigurationRequest_descriptor =
-            getDescriptor().getMessageTypes().get(32);
+            getDescriptor().getMessageTypes().get(34);
           
internal_static_hbase_pb_UpdateConfigurationRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UpdateConfigurationRequest_descriptor,
               new java.lang.String[] { });
           internal_static_hbase_pb_UpdateConfigurationResponse_descriptor =
-            getDescriptor().getMessageTypes().get(33);
+            getDescriptor().getMessageTypes().get(35);
           
internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_UpdateConfigurationResponse_descriptor,
diff --git a/hbase-protocol/src/main/protobuf/Admin.proto 
b/hbase-protocol/src/main/protobuf/Admin.proto
index a1905a4..7011bd3 100644
--- a/hbase-protocol/src/main/protobuf/Admin.proto
+++ b/hbase-protocol/src/main/protobuf/Admin.proto
@@ -164,6 +164,14 @@ message CompactRegionRequest {
 message CompactRegionResponse {
 }
 
+message CompactionSwitchRequest {
+  required bool enabled = 1;
+}
+
+message CompactionSwitchResponse {
+  required bool prev_state = 1;
+}
+
 message UpdateFavoredNodesRequest {
   repeated RegionUpdateInfo update_info = 1;
 
@@ -281,6 +289,9 @@ service AdminService {
   rpc SplitRegion(SplitRegionRequest)
     returns(SplitRegionResponse);
 
+  rpc CompactionSwitch(CompactionSwitchRequest)
+    returns(CompactionSwitchResponse);
+
   rpc CompactRegion(CompactRegionRequest)
     returns(CompactRegionResponse);
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index d35d620..540bbdb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -79,18 +79,22 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
 
   public static final String REGION_SERVER_REGION_SPLIT_LIMIT =
       "hbase.regionserver.regionSplitLimit";
-  public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000;
-  
+  public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT = 1000;
+  public static final String HBASE_REGION_SERVER_ENABLE_COMPACTION =
+      "hbase.regionserver.compaction.enabled";
+
   private final HRegionServer server;
   private final Configuration conf;
 
-  private final ThreadPoolExecutor longCompactions;
-  private final ThreadPoolExecutor shortCompactions;
-  private final ThreadPoolExecutor splits;
+  private volatile ThreadPoolExecutor longCompactions;
+  private volatile ThreadPoolExecutor shortCompactions;
+  private volatile ThreadPoolExecutor splits;
   private final ThreadPoolExecutor mergePool;
 
   private volatile ThroughputController compactionThroughputController;
 
+  private volatile boolean compactionsEnabled;
+
   /**
    * Splitting should not take place if the total number of regions exceed 
this.
    * This is not a hard limit to the number of regions but it is a guideline to
@@ -103,66 +107,75 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
     super();
     this.server = server;
     this.conf = server.getConfiguration();
-    this.regionSplitLimit = conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT,
-        DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT);
 
-    int largeThreads = Math.max(1, conf.getInt(
-        LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
-    int smallThreads = conf.getInt(
-        SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);
+    this.compactionsEnabled = 
this.conf.getBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, true);
+    createCompactionExecutors();
+    createSplitExcecutors();
+
+    final String n = Thread.currentThread().getName();
+    int mergeThreads = conf.getInt(MERGE_THREADS, MERGE_THREADS_DEFAULT);
+    this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
+      mergeThreads, new ThreadFactory() {
+        @Override
+        public Thread newThread(Runnable r) {
+          String name = n + "-merges-" + System.currentTimeMillis();
+          return new Thread(r, name);
+        }
+      });
+
+    // compaction throughput controller
+    this.compactionThroughputController =
+        CompactionThroughputControllerFactory.create(server, conf);
+  }
 
+  private void createSplitExcecutors() {
+    final String n = Thread.currentThread().getName();
     int splitThreads = conf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);
+    this.splits =
+        (ThreadPoolExecutor) Executors.newFixedThreadPool(splitThreads, new 
ThreadFactory() {
+          @Override
+          public Thread newThread(Runnable r) {
+            String name = n + "-splits-" + System.currentTimeMillis();
+            return new Thread(r, name);
+          }
+        });
+  }
+
+  private void createCompactionExecutors() {
+    this.regionSplitLimit =
+        conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT, 
DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT);
+
+    int largeThreads =
+        Math.max(1, conf.getInt(LARGE_COMPACTION_THREADS, 
LARGE_COMPACTION_THREADS_DEFAULT));
+    int smallThreads = conf.getInt(SMALL_COMPACTION_THREADS, 
SMALL_COMPACTION_THREADS_DEFAULT);
 
     // if we have throttle threads, make sure the user also specified size
     Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0);
 
     final String n = Thread.currentThread().getName();
 
-    StealJobQueue<Runnable> stealJobQueue = new StealJobQueue<>();
-    this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads,
-        60, TimeUnit.SECONDS, stealJobQueue,
+    StealJobQueue<Runnable> stealJobQueue = new StealJobQueue<Runnable>();
+    this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads, 
60,
+        TimeUnit.SECONDS, stealJobQueue,
         new ThreadFactory() {
           @Override
           public Thread newThread(Runnable r) {
             String name = n + "-longCompactions-" + System.currentTimeMillis();
             return new Thread(r, name);
           }
-      });
+        });
     this.longCompactions.setRejectedExecutionHandler(new Rejection());
     this.longCompactions.prestartAllCoreThreads();
-    this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads,
-        60, TimeUnit.SECONDS, stealJobQueue.getStealFromQueue(),
+    this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads, 
60,
+        TimeUnit.SECONDS, stealJobQueue.getStealFromQueue(),
         new ThreadFactory() {
           @Override
           public Thread newThread(Runnable r) {
             String name = n + "-shortCompactions-" + 
System.currentTimeMillis();
             return new Thread(r, name);
           }
-      });
-    this.shortCompactions
-        .setRejectedExecutionHandler(new Rejection());
-    this.splits = (ThreadPoolExecutor)
-        Executors.newFixedThreadPool(splitThreads,
-            new ThreadFactory() {
-          @Override
-          public Thread newThread(Runnable r) {
-            String name = n + "-splits-" + System.currentTimeMillis();
-            return new Thread(r, name);
-          }
-      });
-    int mergeThreads = conf.getInt(MERGE_THREADS, MERGE_THREADS_DEFAULT);
-    this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
-        mergeThreads, new ThreadFactory() {
-          @Override
-          public Thread newThread(Runnable r) {
-            String name = n + "-merges-" + System.currentTimeMillis();
-            return new Thread(r, name);
-          }
         });
-
-    // compaction throughput controller
-    this.compactionThroughputController =
-        CompactionThroughputControllerFactory.create(server, conf);
+    this.shortCompactions.setRejectedExecutionHandler(new Rejection());
   }
 
   @Override
@@ -330,6 +343,30 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
     requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false, null);
   }
 
+  private void reInitializeCompactionsExecutors() {
+    createCompactionExecutors();
+  }
+
+  private void interrupt() {
+    longCompactions.shutdownNow();
+    shortCompactions.shutdownNow();
+  }
+
+  @Override
+  public void switchCompaction(boolean onOrOff) {
+    if (onOrOff) {
+      // re-create executor pool if compactions are disabled.
+      if (!isCompactionsEnabled()) {
+        LOG.info("Re-Initializing compactions because user switched on 
compactions");
+        reInitializeCompactionsExecutors();
+      }
+    } else {
+      LOG.info("Interrupting running compactions because user switched off 
compactions");
+      interrupt();
+    }
+    setCompactionsEnabled(onOrOff);
+  }
+
   /**
    * @param r region store belongs to
    * @param s Store to request compaction on
@@ -368,6 +405,13 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
 
   private CompactionContext selectCompaction(final Region r, final Store s,
       int priority, CompactionRequest request, User user) throws IOException {
+    // don't even select for compaction if disableCompactions is set to true
+    if (!isCompactionsEnabled()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("User has disabled compactions");
+      }
+      return null;
+    }
     CompactionContext compaction = s.requestCompaction(priority, request, 
user);
     if (compaction == null) {
       if(LOG.isDebugEnabled() && r.getRegionInfo() != null) {
@@ -738,4 +782,27 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   void shutdownLongCompactions(){
     this.longCompactions.shutdown();
   }
+
+  public boolean isCompactionsEnabled() {
+    return compactionsEnabled;
+  }
+
+  public void setCompactionsEnabled(boolean compactionsEnabled) {
+    this.compactionsEnabled = compactionsEnabled;
+    
this.conf.set(HBASE_REGION_SERVER_ENABLE_COMPACTION,String.valueOf(compactionsEnabled));
+  }
+
+  /**
+   * @return the longCompactions thread pool executor
+   */
+  ThreadPoolExecutor getLongCompactions() {
+    return longCompactions;
+  }
+
+  /**
+   * @return the shortCompactions thread pool executor
+   */
+  ThreadPoolExecutor getShortCompactions() {
+    return shortCompactions;
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
index c39f310..8cfa9e3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
@@ -96,4 +96,9 @@ public interface CompactionRequestor {
   CompactionRequest requestCompaction(
     final Region r, final Store s, final String why, int pri, 
CompactionRequest request, User user
   ) throws IOException;
+
+  /**
+   * on/off compaction
+   */
+  void switchCompaction(boolean onOrOff);
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 85f5b73..0f7bd9f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -58,7 +58,6 @@ import java.util.NavigableSet;
 import java.util.RandomAccess;
 import java.util.Set;
 import java.util.TreeMap;
-import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ConcurrentHashMap;
@@ -5793,7 +5792,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * Determines whether multiple column families are present
    * Precondition: familyPaths is not null
    *
-   * @param familyPaths List of Pair<byte[] column family, String hfilePath>
+   * @param familyPaths
    */
   private static boolean hasMultipleColumnFamilies(Collection<Pair<byte[], 
String>> familyPaths) {
     boolean multipleFamilies = false;
@@ -6591,9 +6590,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     /**
      * This function is to maintain backward compatibility for 0.94 filters. 
HBASE-6429 combines
-     * both filterRow & filterRow(List<KeyValue> kvs) functions. While 0.94 
code or older, it may
+     * both filterRow & filterRow(List&lt;KeyValue&gt; kvs) functions. While 
0.94 code or older, it may
      * not implement hasFilterRow as HBase-6429 expects because 0.94 
hasFilterRow() only returns
-     * true when filterRow(List<KeyValue> kvs) is overridden not the 
filterRow(). Therefore, the
+     * true when filterRow(List&lt;KeyValue&gt; kvs) is overridden not the 
filterRow(). Therefore, the
      * filterRow() will be skipped.
      */
     private boolean filterRow() throws IOException {
@@ -7739,20 +7738,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * @param cell
-   * @param tags
-   * @return The passed-in List<Tag> but with the tags from <code>cell</code> 
added.
-   */
-  private static List<Tag> carryForwardTags(final Cell cell, final List<Tag> 
tags) {
-    if (cell.getTagsLength() <= 0) return tags;
-    List<Tag> newTags = tags == null? new ArrayList<Tag>(): /*Append 
Tags*/tags;
-    Iterator<Tag> i =
-        CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), 
cell.getTagsLength());
-    while (i.hasNext()) newTags.add(i.next());
-    return newTags;
-  }
-
-  /**
    * Run a Get against passed in <code>store</code> on passed 
<code>row</code>, etc.
    * @param store
    * @param row
@@ -8668,7 +8653,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       break;
     }
     if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION
-        || op == Operation.COMPACT_REGION) {
+        || op == Operation.COMPACT_REGION || op == Operation.COMPACT_SWITCH) {
       // split, merge or compact region doesn't need to check the 
closing/closed state or lock the
       // region
       return;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 4dd8061..32f9d03 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -112,6 +112,8 @@ import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -1517,6 +1519,26 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
     }
   }
 
+  @Override
+  public CompactionSwitchResponse compactionSwitch(RpcController controller,
+      CompactionSwitchRequest request) throws ServiceException {
+    try {
+      checkOpen();
+      requestCount.increment();
+      boolean prevState = 
regionServer.compactSplitThread.isCompactionsEnabled();
+      CompactionSwitchResponse response =
+          
CompactionSwitchResponse.newBuilder().setPrevState(prevState).build();
+      if (prevState == request.getEnabled()) {
+        // passed in requested state is same as current state. No action 
required
+        return response;
+      }
+      regionServer.compactSplitThread.switchCompaction(request.getEnabled());
+      return response;
+    } catch (IOException ie) {
+      throw new ServiceException(ie);
+    }
+  }
+
   /**
    * Flush a region on the region server.
    *
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 6642220..6a06bd8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -221,7 +221,7 @@ public interface Region extends ConfigurationObserver {
    */
   enum Operation {
     ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, 
MERGE_REGION, BATCH_MUTATE,
-    REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT
+    REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH
   }
 
   /**
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 42c04e4..c0a49a9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -509,6 +511,12 @@ ClientProtos.ClientService.BlockingInterface, 
RegionServerServices {
   }
 
   @Override
+  public CompactionSwitchResponse compactionSwitch(RpcController controller,
+      CompactionSwitchRequest request) throws ServiceException {
+    return null;
+  }
+
+  @Override
   public CompactRegionResponse compactRegion(RpcController controller,
       CompactRegionRequest request) throws ServiceException {
     // TODO Auto-generated method stub
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index b9d1dd3..e8025b2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -38,8 +38,6 @@ import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -87,7 +85,6 @@ import org.mockito.stubbing.Answer;
 @Category(MediumTests.class)
 public class TestCompaction {
   @Rule public TestName name = new TestName();
-  private static final Log LOG = 
LogFactory.getLog(TestCompaction.class.getName());
   private static final HBaseTestingUtility UTIL = 
HBaseTestingUtility.createLocalHTU();
   protected Configuration conf = UTIL.getConfiguration();
 
@@ -357,6 +354,73 @@ public class TestCompaction {
   }
 
   /**
+   * Test no new Compaction requests are generated after calling stop 
compactions
+   */
+  @Test
+  public void testStopStartCompaction() throws IOException {
+    // setup a compact/split thread on a mock server
+    HRegionServer mockServer = Mockito.mock(HRegionServer.class);
+    Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
+    CompactSplitThread thread = new CompactSplitThread(mockServer);
+    Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
+    // setup a region/store with some files
+    Store store = r.getStore(COLUMN_FAMILY);
+    createStoreFile(r);
+    for (int i = 0; i < HStore.DEFAULT_BLOCKING_STOREFILE_COUNT - 1; i++) {
+      createStoreFile(r);
+    }
+    thread.switchCompaction(false);
+    thread.requestCompaction(r, store, "test", Store.PRIORITY_USER, new 
CompactionRequest(), null);
+    assertEquals(false, thread.isCompactionsEnabled());
+    assertEquals(0, thread.getLongCompactions().getActiveCount() + 
thread.getShortCompactions()
+      .getActiveCount());
+    thread.switchCompaction(true);
+    assertEquals(true, thread.isCompactionsEnabled());
+    thread.requestCompaction(r, store, "test", Store.PRIORITY_USER, new 
CompactionRequest(), null);
+    assertEquals(1, thread.getLongCompactions().getActiveCount() + 
thread.getShortCompactions()
+      .getActiveCount());
+  }
+
+  @Test
+  public void testInterruptingRunningCompactions() throws Exception {
+    // setup a compact/split thread on a mock server
+    
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
+      WaitThroughPutController.class.getName());
+    HRegionServer mockServer = Mockito.mock(HRegionServer.class);
+    Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
+    CompactSplitThread thread = new CompactSplitThread(mockServer);
+
+    Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
+
+    // setup a region/store with some files
+    Store store = r.getStore(COLUMN_FAMILY);
+    int jmax = (int) Math.ceil(15.0 / compactionThreshold);
+    byte[] pad = new byte[1000]; // 1 KB chunk
+    for (int i = 0; i < compactionThreshold; i++) {
+      HRegionIncommon loader = new HRegionIncommon(r);
+      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
+      p.setDurability(Durability.SKIP_WAL);
+      for (int j = 0; j < jmax; j++) {
+        p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad);
+      }
+      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
+      loader.put(p);
+      r.flush(true);
+    }
+    Store s = r.getStore(COLUMN_FAMILY);
+    int initialFiles = s.getStorefilesCount();
+
+    thread.requestCompaction(r, store, "test custom comapction", 
Store.PRIORITY_USER,
+      new CompactionRequest(), null);
+
+    Thread.sleep(3000);
+    thread.switchCompaction(false);
+    assertEquals(initialFiles, s.getStorefilesCount());
+    //don't mess up future tests
+    thread.switchCompaction(true);
+  }
+
+  /**
    * HBASE-7947: Regression test to ensure adding to the correct list in the
    * {@link CompactSplitThread}
    * @throws Exception on failure
@@ -715,4 +779,20 @@ public class TestCompaction {
       this.done.countDown();
     }
   }
+
+  /**
+   * Simple {@link CompactionLifeCycleTracker} on which you can wait until the 
requested compaction
+   * finishes.
+   */
+  public static class WaitThroughPutController extends 
NoLimitThroughputController{
+
+    public WaitThroughPutController() {
+    }
+
+    @Override
+    public long control(String compactionName, long size) throws 
InterruptedException {
+      Thread.sleep(6000000);
+      return 6000000;
+    }
+  }
 }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 4b5d331..ffad6e8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -345,6 +345,12 @@ public class TestReplicator extends TestReplicationBase {
       }
 
       @Override
+      public CompactionSwitchResponse compactionSwitch(RpcController 
controller,
+          CompactionSwitchRequest request) throws ServiceException {
+        return null;
+      }
+
+      @Override
       public CompactRegionResponse compactRegion(RpcController controller,
           CompactRegionRequest request) throws ServiceException {
         return delegate.compactRegion(controller, request);
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 468832d..36d6af9 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -67,6 +67,19 @@ module Hbase
     end
 
     
#----------------------------------------------------------------------------------------------
+    # Switch compaction on/off at runtime on a region server
+    def compaction_switch(on_or_off, regionserver_names)
+      region_servers = regionserver_names.flatten.compact
+      servers = java.util.ArrayList.new
+      if region_servers.any?
+        region_servers.each do |s|
+          servers.add(s)
+        end
+      end
+      @admin.compactionSwitch(java.lang.Boolean.valueOf(on_or_off), servers)
+    end
+
+    
#----------------------------------------------------------------------------------------------
     # Gets compaction state for specified table
     def getCompactionState(table_name)
       @admin.getCompactionState(TableName.valueOf(table_name)).name()
@@ -490,8 +503,8 @@ module Hbase
       locator = @connection.getRegionLocator(TableName.valueOf(table_name))
       begin
         splits = locator.getAllRegionLocations().
-            map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.
-            delete_if{|k| k == ""}.to_java :String
+                 map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.
+                 delete_if{|k| k == ""}.to_java :String
         splits = org.apache.hadoop.hbase.util.Bytes.toBinaryByteArrays(splits)
       ensure
         locator.close()
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 02ad8f7..ad3bcd8 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -332,6 +332,7 @@ Shell.load_command_group(
     is_in_maintenance_mode
     close_region
     compact
+    compaction_switch
     flush
     major_compact
     move
diff --git a/hbase-shell/src/main/ruby/shell/commands/compaction_switch.rb 
b/hbase-shell/src/main/ruby/shell/commands/compaction_switch.rb
new file mode 100644
index 0000000..143e244
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/compaction_switch.rb
@@ -0,0 +1,53 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    # Switch compaction for a region server
+    class CompactionSwitch < Command
+      def help
+        return <<-EOF
+          Turn the compaction on or off on regionservers. Disabling 
compactions will also interrupt
+          any currently ongoing compactions. This state is ephemeral. The 
setting will be lost on
+          restart of the server. Compaction can also be enabled/disabled by 
modifying configuration
+          hbase.regionserver.compaction.enabled in hbase-site.xml.
+          Examples:
+            To enable compactions on all region servers
+            hbase> compaction_switch true
+            To disable compactions on all region servers
+            hbase> compaction_switch false
+            To enable compactions on specific region servers
+            hbase> compaction_switch true 'server2','server1'
+            To disable compactions on specific region servers
+            hbase> compaction_switch false 'server2','server1'
+          NOTE: A server name is its host, port plus startcode. For example:
+          host187.example.com,60020,1289493121758
+        EOF
+      end
+
+      def command(enable_disable, *server)
+        now = Time.now
+        formatter.header(%w(['SERVER' 'PREV_STATE']))
+        prev_state = admin.compaction_switch(enable_disable, server)
+        prev_state.each { |k, v| formatter.row([k.getServerName, 
java.lang.String.valueOf(v)]) }
+        formatter.footer(now, prev_state.size)
+      end
+    end
+  end
+end
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 0aac442..7309410 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1667,6 +1667,14 @@ See <<managed.compactions>>.
 Compactions do not perform region merges.
 See <<ops.regionmgt.merge>> for more information on region merging.
 
+.Compaction Switch
+We can switch on and off the compactions at region servers. Switching off 
compactions will also
+interrupt any currently ongoing compactions. It can be done dynamically using 
the "compaction_switch"
+command from hbase shell. If done from the command line, this setting will be 
lost on restart of the
+server. To persist the changes across region servers modify the configuration 
hbase.regionserver
+.compaction.enabled in hbase-site.xml and restart HBase.
+
+
 [[compaction.file.selection]]
 ===== Compaction Policy - HBase 0.96.x and newer
 

Reply via email to