http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index e3ba2fa..7d72b88 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -22,13 +22,12 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.CompactionState;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
@@ -41,34 +40,17 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
-import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-
 /**
- * Regions store data for a certain region of a table.  It stores all columns
- * for each row. A given table consists of one or more Regions.
- *
- * <p>An Region is defined by its table and its key extent.
- *
- * <p>Locking at the Region level serves only one purpose: preventing the
- * region from being closed (and consequently split) while other operations
- * are ongoing. Each row level operation obtains both a row lock and a region
- * read lock for the duration of the operation. While a scanner is being
- * constructed, getScanner holds a read lock. If the scanner is successfully
- * constructed, it holds a read lock until it is closed. A close takes out a
- * write lock and consequently will block for ongoing operations and will block
- * new operations from starting while the close is in progress.
+ * Region is a subset of HRegion with operations required for the {@link 
RegionCoprocessor
+ * Coprocessors}. The operations include ability to do mutations, requesting 
compaction, getting
+ * different counters/sizes, locking rows and getting access to {@linkplain 
Store}s.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
@@ -131,24 +113,11 @@ public interface Region extends ConfigurationObserver {
    */
   boolean refreshStoreFiles() throws IOException;
 
-  /** @return the latest sequence number that was read from storage when this 
region was opened */
-  long getOpenSeqNum();
-
   /** @return the max sequence id of flushed data on this region; no edit in 
memory will have
    * a sequence id that is less that what is returned here.
    */
   long getMaxFlushedSeqId();
 
-  /** @return the oldest flushed sequence id for the given family; can be 
beyond
-   * {@link #getMaxFlushedSeqId()} in case where we've flushed a subset of a 
regions column
-   * families
-   * @deprecated Since version 1.2.0. Exposes too much about our internals; 
shutting it down.
-   * Do not use.
-   */
-  @VisibleForTesting
-  @Deprecated
-  public long getOldestSeqIdOfStore(byte[] familyName);
-
   /**
    * This can be used to determine the last time all files of this region were 
major compacted.
    * @param majorCompactionOnly Only consider HFile that are the result of 
major compaction
@@ -162,20 +131,10 @@ public interface Region extends ConfigurationObserver {
    */
   public Map<byte[], Long> getMaxStoreSeqId();
 
-  /** @return true if loading column families on demand by default */
-  boolean isLoadingCfsOnDemandDefault();
-
   /** @return readpoint considering given IsolationLevel; pass null for 
default*/
   long getReadPoint(IsolationLevel isolationLevel);
 
   /**
-   * @return readpoint considering given IsolationLevel
-   * @deprecated Since 1.2.0. Use {@link #getReadPoint(IsolationLevel)} 
instead.
-   */
-  @Deprecated
-  long getReadpoint(IsolationLevel isolationLevel);
-
-  /**
    * @return The earliest time a store in the region was flushed. All
    *         other stores in the region would have been flushed either at, or
    *         after this time.
@@ -188,12 +147,6 @@ public interface Region extends ConfigurationObserver {
   /** @return read requests count for this region */
   long getReadRequestsCount();
 
-  /**
-   * Update the read request count for this region
-   * @param i increment
-   */
-  void updateReadRequestsCount(long i);
-
   /** @return filtered read requests count for this region */
   long getFilteredReadRequestsCount();
 
@@ -201,21 +154,12 @@ public interface Region extends ConfigurationObserver {
   long getWriteRequestsCount();
 
   /**
-   * Update the write request count for this region
-   * @param i increment
-   */
-  void updateWriteRequestsCount(long i);
-
-  /**
    * @return memstore size for this region, in bytes. It just accounts data 
size of cells added to
    *         the memstores of this Region. Means size in bytes for key, value 
and tags within Cells.
    *         It wont consider any java heap overhead for the cell objects or 
any other.
    */
   long getMemStoreSize();
 
-  /** @return store services for this region, to access services required by 
store level needs */
-  RegionServicesForStores getRegionServicesForStores();
-
   /** @return the number of mutations processed bypassing the WAL */
   long getNumMutationsWithoutWAL();
 
@@ -231,12 +175,6 @@ public interface Region extends ConfigurationObserver {
   /** @return the number of failed checkAndMutate guards */
   long getCheckAndMutateChecksFailed();
 
-  /** @return the MetricsRegion for this region */
-  MetricsRegion getMetrics();
-
-  /** @return the block distribution for all Stores managed by this region */
-  HDFSBlocksDistribution getHDFSBlocksDistribution();
-
   ///////////////////////////////////////////////////////////////////////////
   // Locking
 
@@ -259,6 +197,8 @@ public interface Region extends ConfigurationObserver {
    * the operation has completed, whether it succeeded or failed.
    * @throws IOException
    */
+  // TODO Exposing this and closeRegionOperation() as we have getRowLock() 
exposed.
+  // Remove if we get rid of exposing getRowLock().
   void startRegionOperation() throws IOException;
 
   /**
@@ -308,7 +248,9 @@ public interface Region extends ConfigurationObserver {
    *
    * Before calling this function make sure that a region operation has 
already been
    * started (the calling thread has already acquired the region-close-guard 
lock).
-   *
+   * <p>
+   * The obtained locks should be released after use by {@link 
RowLock#release()}
+   * <p>
    * NOTE: the boolean passed here has changed. It used to be a boolean that
    * stated whether or not to wait on the lock. Now it is whether it an 
exclusive
    * lock is requested.
@@ -319,51 +261,34 @@ public interface Region extends ConfigurationObserver {
    * @see #startRegionOperation()
    * @see #startRegionOperation(Operation)
    */
+  // TODO this needs to be exposed as we have RowProcessor now. If 
RowProcessor is removed, we can
+  // remove this too..
   RowLock getRowLock(byte[] row, boolean readLock) throws IOException;
 
-  /**
-   * If the given list of row locks is not null, releases all locks.
-   */
-  void releaseRowLocks(List<RowLock> rowLocks);
-
   ///////////////////////////////////////////////////////////////////////////
   // Region operations
 
   /**
    * Perform one or more append operations on a row.
    * @param append
-   * @param nonceGroup
-   * @param nonce
    * @return result of the operation
    * @throws IOException
    */
-  Result append(Append append, long nonceGroup, long nonce) throws IOException;
+  Result append(Append append) throws IOException;
 
   /**
    * Perform a batch of mutations.
    * <p>
    * Note this supports only Put and Delete mutations and will ignore other 
types passed.
    * @param mutations the list of mutations
-   * @param nonceGroup
-   * @param nonce
    * @return an array of OperationStatus which internally contains the
    *         OperationStatusCode and the exceptionMessage if any.
    * @throws IOException
    */
-  OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long 
nonce)
+  OperationStatus[] batchMutate(Mutation[] mutations)
       throws IOException;
 
   /**
-   * Replay a batch of mutations.
-   * @param mutations mutations to replay.
-   * @param replaySeqId
-   * @return an array of OperationStatus which internally contains the
-   *         OperationStatusCode and the exceptionMessage if any.
-   * @throws IOException
-   */
-   OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) 
throws IOException;
-
-  /**
    * Atomically checks if a row/family/qualifier value matches the expected 
value and if it does,
    * it performs the mutation. If the passed value is null, the lack of column 
value
    * (ie: non-existence) is used. See checkAndRowMutate to do many 
checkAndPuts at a time on a
@@ -424,17 +349,6 @@ public interface Region extends ConfigurationObserver {
   List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
 
   /**
-   * Do a get for duplicate non-idempotent operation.
-   * @param get query parameters.
-   * @param withCoprocessor
-   * @param nonceGroup Nonce group.
-   * @param nonce Nonce.
-   * @return list of cells resulting from the operation
-   * @throws IOException
-   */
-  List<Cell> get(Get get, boolean withCoprocessor, long nonceGroup, long 
nonce) throws IOException;
-
-  /**
    * Return an iterator that scans over the HRegion, returning the indicated
    * columns and rows specified by the {@link Scan}.
    * <p>
@@ -467,12 +381,10 @@ public interface Region extends ConfigurationObserver {
   /**
    * Perform one or more increment operations on a row.
    * @param increment
-   * @param nonceGroup
-   * @param nonce
    * @return result of the operation
    * @throws IOException
    */
-  Result increment(Increment increment, long nonceGroup, long nonce) throws 
IOException;
+  Result increment(Increment increment) throws IOException;
 
   /**
    * Performs multiple mutations atomically on a single row. Currently
@@ -496,6 +408,8 @@ public interface Region extends ConfigurationObserver {
    * <code>rowsToLock</code> is sorted in order to avoid deadlocks.
    * @throws IOException
    */
+  // TODO Should not be exposing with params nonceGroup, nonce. Change when 
doing the jira for
+  // Changing processRowsWithLocks and RowProcessor
   void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> 
rowsToLock,
       long nonceGroup, long nonce) throws IOException;
 
@@ -513,6 +427,8 @@ public interface Region extends ConfigurationObserver {
    * @param nonceGroup Optional nonce group of the operation (client Id)
    * @param nonce Optional nonce of the operation (unique random id to ensure 
"more idempotence")
    */
+  // TODO Should not be exposing with params nonceGroup, nonce. Change when 
doing the jira for
+  // Changing processRowsWithLocks and RowProcessor
   void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long 
nonce)
       throws IOException;
 
@@ -525,6 +441,8 @@ public interface Region extends ConfigurationObserver {
    * @param nonceGroup Optional nonce group of the operation (client Id)
    * @param nonce Optional nonce of the operation (unique random id to ensure 
"more idempotence")
    */
+  // TODO Should not be exposing with params nonceGroup, nonce. Change when 
doing the jira for
+  // Changing processRowsWithLocks and RowProcessor
   void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long 
nonceGroup, long nonce)
       throws IOException;
 
@@ -535,216 +453,14 @@ public interface Region extends ConfigurationObserver {
    */
   void put(Put put) throws IOException;
 
-  /**
-   * Listener class to enable callers of
-   * bulkLoadHFile() to perform any necessary
-   * pre/post processing of a given bulkload call
-   */
-  interface BulkLoadListener {
-    /**
-     * Called before an HFile is actually loaded
-     * @param family family being loaded to
-     * @param srcPath path of HFile
-     * @return final path to be used for actual loading
-     * @throws IOException
-     */
-    String prepareBulkLoad(byte[] family, String srcPath, boolean copyFile)
-        throws IOException;
-
-    /**
-     * Called after a successful HFile load
-     * @param family family being loaded to
-     * @param srcPath path of HFile
-     * @throws IOException
-     */
-    void doneBulkLoad(byte[] family, String srcPath) throws IOException;
-
-    /**
-     * Called after a failed HFile load
-     * @param family family being loaded to
-     * @param srcPath path of HFile
-     * @throws IOException
-     */
-    void failedBulkLoad(byte[] family, String srcPath) throws IOException;
-  }
-
-  /**
-   * Attempts to atomically load a group of hfiles.  This is critical for 
loading
-   * rows with multiple column families atomically.
-   *
-   * @param familyPaths List of Pair&lt;byte[] column family, String 
hfilePath&gt;
-   * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
-   * file about to be bulk loaded
-   * @param assignSeqId
-   * @return Map from family to List of store file paths if successful, null 
if failed recoverably
-   * @throws IOException if failed unrecoverably.
-   */
-  Map<byte[], List<Path>> bulkLoadHFiles(Collection<Pair<byte[], String>> 
familyPaths,
-      boolean assignSeqId, BulkLoadListener bulkLoadListener) throws 
IOException;
-
-  /**
-   * Attempts to atomically load a group of hfiles.  This is critical for 
loading
-   * rows with multiple column families atomically.
-   *
-   * @param familyPaths List of Pair&lt;byte[] column family, String 
hfilePath&gt;
-   * @param assignSeqId
-   * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
-   * file about to be bulk loaded
-   * @param copyFile always copy hfiles if true
-   * @return Map from family to List of store file paths if successful, null 
if failed recoverably
-   * @throws IOException if failed unrecoverably.
-   */
-  Map<byte[], List<Path>> bulkLoadHFiles(Collection<Pair<byte[], String>> 
familyPaths,
-      boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean 
copyFile) throws IOException;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // Coprocessors
-
   /** @return the coprocessor host */
+  // TODO To be removed by HBASE-18954
   RegionCoprocessorHost getCoprocessorHost();
 
-  /**
-   * Executes a single protocol buffer coprocessor endpoint {@link Service} 
method using
-   * the registered protocol handlers.  {@link Service} implementations must 
be registered via the
-   * {@link Region#registerService(com.google.protobuf.Service)}
-   * method before they are available.
-   *
-   * @param controller an {@code RpcContoller} implementation to pass to the 
invoked service
-   * @param call a {@code CoprocessorServiceCall} instance identifying the 
service, method,
-   *     and parameters for the method invocation
-   * @return a protocol buffer {@code Message} instance containing the 
method's result
-   * @throws IOException if no registered service handler is found or an error
-   *     occurs during the invocation
-   * @see 
org.apache.hadoop.hbase.regionserver.Region#registerService(com.google.protobuf.Service)
-   */
-  com.google.protobuf.Message execService(com.google.protobuf.RpcController 
controller,
-      CoprocessorServiceCall call)
-  throws IOException;
-
-  /**
-   * Registers a new protocol buffer {@link Service} subclass as a coprocessor 
endpoint to
-   * be available for handling 
Region#execService(com.google.protobuf.RpcController,
-   *    
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall) 
calls.
-   *
-   * <p>
-   * Only a single instance may be registered per region for a given {@link 
Service} subclass (the
-   * instances are keyed on {@link 
com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}.
-   * After the first registration, subsequent calls with the same service name 
will fail with
-   * a return value of {@code false}.
-   * </p>
-   * @param instance the {@code Service} subclass instance to expose as a 
coprocessor endpoint
-   * @return {@code true} if the registration was successful, {@code false}
-   * otherwise
-   */
-  boolean registerService(com.google.protobuf.Service instance);
-
-  ///////////////////////////////////////////////////////////////////////////
-  // RowMutation processor support
-
-  /**
-   * Check the collection of families for validity.
-   * @param families
-   * @throws NoSuchColumnFamilyException
-   */
-  void checkFamilies(Collection<byte[]> families) throws 
NoSuchColumnFamilyException;
-
-  /**
-   * Check the collection of families for valid timestamps
-   * @param familyMap
-   * @param now current timestamp
-   * @throws FailedSanityCheckException
-   */
-  void checkTimestamps(Map<byte[], List<Cell>> familyMap, long now)
-      throws FailedSanityCheckException;
-
-  /**
-   * Prepare a delete for a row mutation processor
-   * @param delete The passed delete is modified by this method. WARNING!
-   * @throws IOException
-   */
-  void prepareDelete(Delete delete) throws IOException;
-
-  /**
-   * Set up correct timestamps in the KVs in Delete object.
-   * <p>Caller should have the row and region locks.
-   * @param mutation
-   * @param familyCellMap
-   * @param now
-   * @throws IOException
-   */
-  void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> 
familyCellMap,
-      byte[] now) throws IOException;
-
-  /**
-   * Replace any cell timestamps set to {@link 
org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP}
-   * provided current timestamp.
-   * @param values
-   * @param now
-   */
-  void updateCellTimestamps(final Iterable<List<Cell>> values, final byte[] 
now)
-      throws IOException;
-
   ///////////////////////////////////////////////////////////////////////////
   // Flushes, compactions, splits, etc.
   // Wizards only, please
 
-  interface FlushResult {
-    enum Result {
-      FLUSHED_NO_COMPACTION_NEEDED,
-      FLUSHED_COMPACTION_NEEDED,
-      // Special case where a flush didn't run because there's nothing in the 
memstores. Used when
-      // bulk loading to know when we can still load even if a flush didn't 
happen.
-      CANNOT_FLUSH_MEMSTORE_EMPTY,
-      CANNOT_FLUSH
-    }
-
-    /** @return the detailed result code */
-    Result getResult();
-
-    /** @return true if the memstores were flushed, else false */
-    boolean isFlushSucceeded();
-
-    /** @return True if the flush requested a compaction, else false */
-    boolean isCompactionNeeded();
-  }
-
-  /**
-   * Flush the cache.
-   *
-   * <p>When this method is called the cache will be flushed unless:
-   * <ol>
-   *   <li>the cache is empty</li>
-   *   <li>the region is closed.</li>
-   *   <li>a flush is already in progress</li>
-   *   <li>writes are disabled</li>
-   * </ol>
-   *
-   * <p>This method may block for some time, so it should not be called from a
-   * time-sensitive thread.
-   * @param force whether we want to force a flush of all stores
-   * @return FlushResult indicating whether the flush was successful or not 
and if
-   * the region needs compacting
-   *
-   * @throws IOException general io exceptions
-   * because a snapshot was not properly persisted.
-   */
-  FlushResult flush(boolean force) throws IOException;
-
-  /**
-   * Synchronously compact all stores in the region.
-   * <p>This operation could block for a long time, so don't call it from a
-   * time-sensitive thread.
-   * <p>Note that no locks are taken to prevent possible conflicts between
-   * compaction and splitting activities. The regionserver does not normally 
compact
-   * and split in parallel. However by calling this method you may introduce
-   * unexpected and unhandled concurrency. Don't do this unless you know what
-   * you are doing.
-   *
-   * @param majorCompaction True to force a major compaction regardless of 
thresholds
-   * @throws IOException
-   */
-  void compact(final boolean majorCompaction) throws IOException;
-
   /**
    * Trigger major compaction on all stores in the region.
    * <p>
@@ -770,11 +486,4 @@ public interface Region extends ConfigurationObserver {
    */
   void requestCompaction(byte[] family, String why, int priority,
       CompactionLifeCycleTracker tracker, User user) throws IOException;
-
-  /** Wait for all current flushes and compactions of the region to complete */
-  void waitForFlushesAndCompactions();
-
-  /** Wait for all current flushes of the region to complete
-   */
-  void waitForFlushes();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 035c8d1..c204b32 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -201,7 +201,7 @@ public class RegionCoprocessorHost
   /** The region server services */
   RegionServerServices rsServices;
   /** The region */
-  Region region;
+  HRegion region;
 
   /**
    * Constructor
@@ -209,7 +209,7 @@ public class RegionCoprocessorHost
    * @param rsServices interface to available region server functionality
    * @param conf the configuration
    */
-  public RegionCoprocessorHost(final Region region,
+  public RegionCoprocessorHost(final HRegion region,
       final RegionServerServices rsServices, final Configuration conf) {
     super(rsServices);
     this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
index 1779e5c..f9e93a1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
@@ -22,9 +22,11 @@ import java.security.PrivilegedAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import 
org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -91,7 +93,8 @@ class RegionMergeRequest implements Runnable {
     // The parent region will be unassigned and the two new regions will be 
assigned.
     // hri_a and hri_b objects may not reflect the regions that will be 
created, those objectes
     // are created just to pass the information to the 
reportRegionStateTransition().
-    if (!server.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, 
merged, region_a, region_b)) {
+    if (!server.reportRegionStateTransition(new RegionStateTransitionContext(
+        TransitionCode.READY_TO_MERGE, HConstants.NO_SEQNUM, -1, merged, 
region_a, region_b))) {
       LOG.error("Unable to ask master to merge: " + region_a + ", " + 
region_b);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index b21d55a..d04f382 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -85,15 +85,15 @@ public interface RegionServerServices
    * Context for postOpenDeployTasks().
    */
   class PostOpenDeployContext {
-    private final Region region;
+    private final HRegion region;
     private final long masterSystemTime;
 
     @InterfaceAudience.Private
-    public PostOpenDeployContext(Region region, long masterSystemTime) {
+    public PostOpenDeployContext(HRegion region, long masterSystemTime) {
       this.region = region;
       this.masterSystemTime = masterSystemTime;
     }
-    public Region getRegion() {
+    public HRegion getRegion() {
       return region;
     }
     public long getMasterSystemTime() {
@@ -111,18 +111,6 @@ public interface RegionServerServices
    */
   void postOpenDeployTasks(final PostOpenDeployContext context) throws 
KeeperException, IOException;
 
-  /**
-   * Tasks to perform after region open to complete deploy of region on
-   * regionserver
-   *
-   * @param r Region to open.
-   * @throws KeeperException
-   * @throws IOException
-   * @deprecated use {@link #postOpenDeployTasks(PostOpenDeployContext)}
-   */
-  @Deprecated
-  void postOpenDeployTasks(final Region r) throws KeeperException, IOException;
-
   class RegionStateTransitionContext {
     private final TransitionCode code;
     private final long openSeqNum;
@@ -157,20 +145,6 @@ public interface RegionServerServices
   boolean reportRegionStateTransition(final RegionStateTransitionContext 
context);
 
   /**
-   * Notify master that a handler requests to change a region state
-   * @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
-   */
-  @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, 
RegionInfo... hris);
-
-  /**
-   * Notify master that a handler requests to change a region state
-   * @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
-   */
-  @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, RegionInfo... hris);
-
-  /**
    * Returns a reference to the region server's RPC server
    */
   RpcServerInterface getRpcServer();
@@ -194,7 +168,7 @@ public interface RegionServerServices
   /**
    * @return set of recovering regions on the hosting region server
    */
-  Map<String, Region> getRecoveringRegions();
+  Map<String, HRegion> getRecoveringRegions();
 
   /**
    * Only required for "old" log replay; if it's removed, remove this.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index 296e98c..c358f6c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener;
+import org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
@@ -157,7 +157,7 @@ public class SecureBulkLoadManager {
     LOG.info("Cleaned up " + path + " successfully.");
   }
 
-  public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region,
+  public Map<byte[], List<Path>> secureBulkLoadHFiles(final HRegion region,
       final BulkLoadHFileRequest request) throws IOException {
     final List<Pair<byte[], String>> familyPaths = new 
ArrayList<>(request.getFamilyPathCount());
     for(ClientProtos.BulkLoadHFileRequest.FamilyPath el : 
request.getFamilyPathList()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index 0227f5b..4b1ae31 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -22,9 +22,11 @@ import java.security.PrivilegedAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import 
org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -85,7 +87,8 @@ class SplitRequest implements Runnable {
     // The parent region will be unassigned and the two new regions will be 
assigned.
     // hri_a and hri_b objects may not reflect the regions that will be 
created, those objects
     // are created just to pass the information to the 
reportRegionStateTransition().
-    if (!server.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT, 
parent, hri_a, hri_b)) {
+    if (!server.reportRegionStateTransition(new RegionStateTransitionContext(
+        TransitionCode.READY_TO_SPLIT, HConstants.NO_SEQNUM, -1, parent, 
hri_a, hri_b))) {
       LOG.error("Unable to ask master to split " + 
parent.getRegionNameAsString());
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
index 8c5ba9f..f48ee92 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import 
org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@@ -117,7 +119,8 @@ public class CloseRegionHandler extends EventHandler {
       }
 
       this.rsServices.removeRegion(region, destination);
-      rsServices.reportRegionStateTransition(TransitionCode.CLOSED, 
regionInfo);
+      rsServices.reportRegionStateTransition(new 
RegionStateTransitionContext(TransitionCode.CLOSED,
+          HConstants.NO_SEQNUM, -1, regionInfo));
 
       // Done!  Region is closed on this RS
       LOG.debug("Closed " + region.getRegionInfo().getRegionNameAsString());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index 147317c..e664cd5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -23,6 +23,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import 
org.apache.hadoop.hbase.regionserver.RegionServerServices.PostOpenDeployContext;
+import 
org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -160,7 +162,8 @@ public class OpenRegionHandler extends EventHandler {
         cleanupFailedOpen(region);
       }
     } finally {
-      rsServices.reportRegionStateTransition(TransitionCode.FAILED_OPEN, 
regionInfo);
+      rsServices.reportRegionStateTransition(new RegionStateTransitionContext(
+          TransitionCode.FAILED_OPEN, HConstants.NO_SEQNUM, -1, regionInfo));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
index 21963d8..13ab8c8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
@@ -31,8 +31,7 @@ import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.procedure.ProcedureMember;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.Region.FlushResult;
+import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
 import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import 
org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -50,7 +49,7 @@ import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 public class FlushSnapshotSubprocedure extends Subprocedure {
   private static final Log LOG = 
LogFactory.getLog(FlushSnapshotSubprocedure.class);
 
-  private final List<Region> regions;
+  private final List<HRegion> regions;
   private final SnapshotDescription snapshot;
   private final SnapshotSubprocedurePool taskManager;
   private boolean snapshotSkipFlush = false;
@@ -60,7 +59,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure {
 
   public FlushSnapshotSubprocedure(ProcedureMember member,
       ForeignExceptionDispatcher errorListener, long wakeFrequency, long 
timeout,
-      List<Region> regions, SnapshotDescription snapshot,
+      List<HRegion> regions, SnapshotDescription snapshot,
       SnapshotSubprocedurePool taskManager) {
     super(member, snapshot.getName(), errorListener, wakeFrequency, timeout);
     this.snapshot = snapshot;
@@ -76,12 +75,12 @@ public class FlushSnapshotSubprocedure extends Subprocedure 
{
    * Callable for adding files to snapshot manifest working dir.  Ready for 
multithreading.
    */
   public static class RegionSnapshotTask implements Callable<Void> {
-    private Region region;
+    private HRegion region;
     private boolean skipFlush;
     private ForeignExceptionDispatcher monitor;
     private SnapshotDescription snapshotDesc;
 
-    public RegionSnapshotTask(Region region, SnapshotDescription snapshotDesc,
+    public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc,
         boolean skipFlush, ForeignExceptionDispatcher monitor) {
       this.region = region;
       this.skipFlush = skipFlush;
@@ -111,7 +110,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure 
{
         } else {
           LOG.debug("Flush Snapshotting region " + region.toString() + " 
started...");
           boolean succeeded = false;
-          long readPt = region.getReadpoint(IsolationLevel.READ_COMMITTED);
+          long readPt = region.getReadPoint(IsolationLevel.READ_COMMITTED);
           for (int i = 0; i < MAX_RETRIES; i++) {
             FlushResult res = region.flush(true);
             if (res.getResult() == FlushResult.Result.CANNOT_FLUSH) {
@@ -132,7 +131,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure 
{
             throw new IOException("Unable to complete flush after " + 
MAX_RETRIES + " attempts");
           }
         }
-        ((HRegion)region).addRegionToSnapshot(snapshotDesc, monitor);
+        region.addRegionToSnapshot(snapshotDesc, monitor);
         if (skipFlush) {
           LOG.debug("... SkipFlush Snapshotting region " + region.toString() + 
" completed.");
         } else {
@@ -162,7 +161,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure 
{
     }
 
     // Add all hfiles already existing in region.
-    for (Region region : regions) {
+    for (HRegion region : regions) {
       // submit one task per region for parallelize by region.
       taskManager.submitTask(new RegionSnapshotTask(region, snapshot, 
snapshotSkipFlush, monitor));
       monitor.rethrowException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index 775d63f..a4b4387 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
 import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -162,7 +163,7 @@ public class RegionServerSnapshotManager extends 
RegionServerProcedureManager {
 
     // check to see if this server is hosting any regions for the snapshots
     // check to see if we have regions for the snapshot
-    List<Region> involvedRegions;
+    List<HRegion> involvedRegions;
     try {
       involvedRegions = getRegionsToSnapshot(snapshot);
     } catch (IOException e1) {
@@ -222,12 +223,13 @@ public class RegionServerSnapshotManager extends 
RegionServerProcedureManager {
    *         the given snapshot.
    * @throws IOException
    */
-  private List<Region> getRegionsToSnapshot(SnapshotDescription snapshot) 
throws IOException {
-    List<Region> onlineRegions = 
rss.getRegions(TableName.valueOf(snapshot.getTable()));
-    Iterator<Region> iterator = onlineRegions.iterator();
+  private List<HRegion> getRegionsToSnapshot(SnapshotDescription snapshot) 
throws IOException {
+    List<HRegion> onlineRegions = (List<HRegion>) rss
+        .getRegions(TableName.valueOf(snapshot.getTable()));
+    Iterator<HRegion> iterator = onlineRegions.iterator();
     // remove the non-default regions
     while (iterator.hasNext()) {
-      Region r = iterator.next();
+      HRegion r = iterator.next();
       if (!RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) {
         iterator.remove();
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index 26620c1..8a5265d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -307,7 +307,7 @@ public class DefaultVisibilityLabelServiceImpl implements 
VisibilityLabelService
   private boolean mutateLabelsRegion(List<Mutation> mutations, 
OperationStatus[] finalOpStatus)
       throws IOException {
     OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations
-      .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, 
HConstants.NO_NONCE);
+      .toArray(new Mutation[mutations.size()]));
     int i = 0;
     boolean updateZk = false;
     for (OperationStatus status : opStatus) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 59ad6de..30ba66c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3947,8 +3947,8 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
           if (server.equals(rs.getServerName())) {
             continue;
           }
-          Collection<Region> hrs = rs.getOnlineRegionsLocalContext();
-          for (Region r: hrs) {
+          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
+          for (HRegion r: hrs) {
             assertTrue("Region should not be double assigned",
               r.getRegionInfo().getRegionId() != hri.getRegionId());
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 59a66ec..fe4119a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -615,7 +615,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void flushcache() throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         r.flush(true);
       }
     }
@@ -628,7 +628,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void flushcache(TableName tableName) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         if(r.getTableDescriptor().getTableName().equals(tableName)) {
           r.flush(true);
         }
@@ -643,7 +643,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void compact(boolean major) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         r.compact(major);
       }
     }
@@ -656,7 +656,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   public void compact(TableName tableName, boolean major) throws IOException {
     for (JVMClusterUtil.RegionServerThread t:
         this.hbaseCluster.getRegionServers()) {
-      for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
+      for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) {
         if(r.getTableDescriptor().getTableName().equals(tableName)) {
           r.compact(major);
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index e453be2..4d2a8cc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HeapMemoryManager;
 import org.apache.hadoop.hbase.regionserver.Leases;
 import org.apache.hadoop.hbase.regionserver.MetricsRegionServer;
@@ -56,7 +57,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
 import com.google.protobuf.Service;
 
@@ -96,7 +96,7 @@ public class MockRegionServerServices implements 
RegionServerServices {
   }
 
   @Override
-  public boolean removeRegion(Region r, ServerName destination) {
+  public boolean removeRegion(HRegion r, ServerName destination) {
     return this.regions.remove(r.getRegionInfo().getEncodedName()) != null;
   }
 
@@ -121,16 +121,11 @@ public class MockRegionServerServices implements 
RegionServerServices {
   }
 
   @Override
-  public void addRegion(Region r) {
+  public void addRegion(HRegion r) {
     this.regions.put(r.getRegionInfo().getEncodedName(), r);
   }
 
   @Override
-  public void postOpenDeployTasks(Region r) throws KeeperException, 
IOException {
-    addRegion(r);
-  }
-
-  @Override
   public void postOpenDeployTasks(PostOpenDeployContext context) throws 
KeeperException,
       IOException {
     addRegion(context.getRegion());
@@ -269,7 +264,7 @@ public class MockRegionServerServices implements 
RegionServerServices {
   }
 
   @Override
-  public Map<String, Region> getRecoveringRegions() {
+  public Map<String, HRegion> getRecoveringRegions() {
     // TODO Auto-generated method stub
     return null;
   }
@@ -281,18 +276,6 @@ public class MockRegionServerServices implements 
RegionServerServices {
   }
 
   @Override
-  public boolean reportRegionStateTransition(TransitionCode code, long 
openSeqNum,
-      RegionInfo... hris) {
-    return false;
-  }
-
-  @Override
-  public boolean reportRegionStateTransition(TransitionCode code,
-      RegionInfo... hris) {
-    return false;
-  }
-
-  @Override
   public boolean reportRegionStateTransition(RegionStateTransitionContext 
context) {
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index 202ea4b..34e8c3c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -109,7 +109,7 @@ public class TestGlobalMemStoreSize {
 
       for (RegionInfo regionInfo :
           ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
-        Region r = server.getRegion(regionInfo.getEncodedName());
+        HRegion r = server.getRegion(regionInfo.getEncodedName());
         flush(r, server);
       }
       LOG.info("Post flush on " + server.getServerName());
@@ -125,7 +125,7 @@ public class TestGlobalMemStoreSize {
         // our test was running....
         for (RegionInfo regionInfo :
             ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
-          Region r = server.getRegion(regionInfo.getEncodedName());
+          HRegion r = server.getRegion(regionInfo.getEncodedName());
           long l = r.getMemStoreSize();
           if (l > 0) {
             // Only meta could have edits at this stage.  Give it another flush
@@ -150,7 +150,7 @@ public class TestGlobalMemStoreSize {
    * @param server
    * @throws IOException
    */
-  private void flush(final Region r, final HRegionServer server)
+  private void flush(final HRegion r, final HRegionServer server)
   throws IOException {
     LOG.info("Flush " + r.toString() + " on " + server.getServerName() +
       ", " +  r.flush(true) + ", size=" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index 327fc89..c95f7b3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -230,7 +230,7 @@ public class TestHFileArchiving {
     List<HRegion> servingRegions = 
UTIL.getHBaseCluster().getRegions(tableName);
     // make sure we only have 1 region serving this table
     assertEquals(1, servingRegions.size());
-    Region region = servingRegions.get(0);
+    HRegion region = servingRegions.get(0);
 
     // get the parent RS and monitor
     HRegionServer hrs = UTIL.getRSForFirstRegionInTable(tableName);
@@ -241,7 +241,7 @@ public class TestHFileArchiving {
     UTIL.loadRegion(region, TEST_FAM);
 
     // get the hfiles in the region
-    List<Region> regions = hrs.getRegions(tableName);
+    List<HRegion> regions = hrs.getRegions(tableName);
     assertEquals("More that 1 region for test table.", 1, regions.size());
 
     region = regions.get(0);
@@ -309,7 +309,7 @@ public class TestHFileArchiving {
     List<HRegion> servingRegions = 
UTIL.getHBaseCluster().getRegions(tableName);
     // make sure we only have 1 region serving this table
     assertEquals(1, servingRegions.size());
-    Region region = servingRegions.get(0);
+    HRegion region = servingRegions.get(0);
 
     // get the parent RS and monitor
     HRegionServer hrs = UTIL.getRSForFirstRegionInTable(tableName);
@@ -320,7 +320,7 @@ public class TestHFileArchiving {
     UTIL.loadRegion(region, TEST_FAM);
 
     // get the hfiles in the region
-    List<Region> regions = hrs.getRegions(tableName);
+    List<HRegion> regions = hrs.getRegions(tableName);
     assertEquals("More that 1 region for test table.", 1, regions.size());
 
     region = regions.get(0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 3e8d42e..20cb513 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -48,7 +47,6 @@ import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -179,9 +177,9 @@ public class TestZooKeeperTableArchiveClient {
     // create the region
     ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
     HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
-    List<Region> regions = new ArrayList<>();
+    List<HRegion> regions = new ArrayList<>();
     regions.add(region);
-    when(rss.getRegions()).thenReturn(regions);
+    Mockito.doReturn(regions).when(rss).getRegions();
     final CompactedHFilesDischarger compactionCleaner =
         new CompactedHFilesDischarger(100, stop, rss, false);
     loadFlushAndCompact(region, TEST_FAM);
@@ -232,9 +230,9 @@ public class TestZooKeeperTableArchiveClient {
     // create the region
     ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
     HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
-    List<Region> regions = new ArrayList<>();
+    List<HRegion> regions = new ArrayList<>();
     regions.add(region);
-    when(rss.getRegions()).thenReturn(regions);
+    Mockito.doReturn(regions).when(rss).getRegions();
     final CompactedHFilesDischarger compactionCleaner =
         new CompactedHFilesDischarger(100, stop, rss, false);
     loadFlushAndCompact(region, TEST_FAM);
@@ -244,7 +242,7 @@ public class TestZooKeeperTableArchiveClient {
     HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
     regions = new ArrayList<>();
     regions.add(otherRegion);
-    when(rss.getRegions()).thenReturn(regions);
+    Mockito.doReturn(regions).when(rss).getRegions();
     final CompactedHFilesDischarger compactionCleaner1 = new 
CompactedHFilesDischarger(100, stop,
         rss, false);
     loadFlushAndCompact(otherRegion, TEST_FAM);
@@ -422,7 +420,7 @@ public class TestZooKeeperTableArchiveClient {
    * @param columnFamily family for which to add data
    * @throws IOException
    */
-  private void createHFileInRegion(Region region, byte[] columnFamily) throws 
IOException {
+  private void createHFileInRegion(HRegion region, byte[] columnFamily) throws 
IOException {
     // put one row in the region
     Put p = new Put(Bytes.toBytes("row"));
     p.addColumn(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 30cd8bf..4ece5c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -472,7 +473,7 @@ public class TestAdmin2 {
         + 
AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log 
files");
 
     // flush all regions
-    for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+    for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
       r.flush(true);
     }
     admin.rollWALWriter(regionServer.getServerName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index 6307210..c173a7f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -41,8 +41,8 @@ import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -140,7 +140,7 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
         + 
AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log 
files");
 
     // flush all regions
-    for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+    for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
       r.flush(true);
     }
     admin.rollWALWriter(regionServer.getServerName()).join();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 400e109..d887e7b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -110,7 +110,6 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
@@ -4535,7 +4534,7 @@ public class TestFromClientSide {
       // set block size to 64 to making 2 kvs into one block, bypassing the 
walkForwardInSingleRow
       // in Store.rowAtOrBeforeFromStoreFile
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HRegion region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
       Put put1 = new Put(firstRow);
       Put put2 = new Put(secondRow);
       Put put3 = new Put(thirdRow);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index ca0a5ea..c3772db 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -963,7 +963,7 @@ public class TestFromClientSide3 {
   private static Region find(final TableName tableName)
       throws IOException, InterruptedException {
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName);
-    List<Region> regions = rs.getRegions(tableName);
+    List<HRegion> regions = rs.getRegions(tableName);
     assertEquals(1, regions.size());
     return regions.get(0);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 1a67457..e2bdaf4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
@@ -1347,7 +1348,7 @@ public class TestHCM {
       assertTrue(!destServerName.equals(metaServerName));
 
        //find another row in the cur server that is less than ROW_X
-      List<Region> regions = curServer.getRegions(TABLE_NAME3);
+      List<HRegion> regions = curServer.getRegions(TABLE_NAME3);
       byte[] otherRow = null;
        for (Region region : regions) {
          if 
(!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName())

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
index 3b12845..e7aa60f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
@@ -30,7 +30,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.quotas.ThrottlingException;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -248,11 +247,6 @@ public class TestMetaCache {
       exceptions.throwOnScan(this, request);
       return super.scan(controller, request);
     }
-
-    public Region getRegion(
-        final HBaseProtos.RegionSpecifier regionSpecifier) throws IOException {
-      return super.getRegion(regionSpecifier);
-    }
   }
 
   public static abstract class ExceptionInjector {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
index ecb3f24..9fa9aa8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
@@ -289,7 +288,7 @@ public class TestCoprocessorInterface {
     byte [][] families = { fam1, fam2, fam3 };
 
     Configuration hc = initConfig();
-    Region region = initHRegion(tableName, name.getMethodName(), hc, new 
Class<?>[]{}, families);
+    HRegion region = initHRegion(tableName, name.getMethodName(), hc, new 
Class<?>[]{}, families);
 
     for (int i = 0; i < 3; i++) {
       HBaseTestCase.addContent(region, fam3);
@@ -351,7 +350,7 @@ public class TestCoprocessorInterface {
     byte [][] families = { fam1, fam2, fam3 };
 
     Configuration hc = initConfig();
-    Region region = initHRegion(tableName, name.getMethodName(), hc,
+    HRegion region = initHRegion(tableName, name.getMethodName(), hc,
       new Class<?>[]{CoprocessorImpl.class}, families);
     for (int i = 0; i < 3; i++) {
       HBaseTestCase.addContent(region, fam3);
@@ -378,10 +377,10 @@ public class TestCoprocessorInterface {
     assertTrue(((CoprocessorImpl)c).wasCompacted());
   }
 
-  Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
+  HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
       throws IOException {
     //RegionInfo info = new RegionInfo(tableName, null, null, false);
-    Region r = HRegion.openHRegion(closedRegion, null);
+    HRegion r = HRegion.openHRegion(closedRegion, null);
 
     // this following piece is a hack. currently a coprocessorHost
     // is secretly loaded at OpenRegionHandler. we don't really
@@ -389,7 +388,7 @@ public class TestCoprocessorInterface {
     // and set it to region.
     Configuration conf = TEST_UTIL.getConfiguration();
     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
-    ((HRegion)r).setCoprocessorHost(host);
+    r.setCoprocessorHost(host);
 
     for (Class<?> implClass : implClasses) {
       host.load((Class<? extends RegionCoprocessor>) implClass, 
Coprocessor.PRIORITY_USER, conf);
@@ -405,7 +404,7 @@ public class TestCoprocessorInterface {
     return r;
   }
 
-  Region initHRegion (TableName tableName, String callingMethod,
+  HRegion initHRegion (TableName tableName, String callingMethod,
       Configuration conf, Class<?> [] implClasses, byte [][] families)
       throws IOException {
     HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -419,11 +418,11 @@ public class TestCoprocessorInterface {
         .setSplit(false)
         .build();
     Path path = new Path(DIR + callingMethod);
-    Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
 
     // this following piece is a hack.
     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
-    ((HRegion)r).setCoprocessorHost(host);
+    r.setCoprocessorHost(host);
 
     for (Class<?> implClass : implClasses) {
       host.load((Class<? extends RegionCoprocessor>) implClass, 
Coprocessor.PRIORITY_USER, conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 0a95f41..6099381 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -186,7 +186,7 @@ public class TestRegionObserverScannerOpenHook {
     }
   }
 
-  Region initHRegion(byte[] tableName, String callingMethod, Configuration 
conf,
+  HRegion initHRegion(byte[] tableName, String callingMethod, Configuration 
conf,
       byte[]... families) throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
     for (byte[] family : families) {
@@ -239,7 +239,7 @@ public class TestRegionObserverScannerOpenHook {
     byte[][] FAMILIES = new byte[][] { A };
 
     Configuration conf = HBaseConfiguration.create();
-    Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
+    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
     RegionCoprocessorHost h = region.getCoprocessorHost();
     h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
     h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
@@ -323,7 +323,7 @@ public class TestRegionObserverScannerOpenHook {
     table.put(put);
 
     HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
-    List<Region> regions = rs.getRegions(desc.getTableName());
+    List<HRegion> regions = rs.getRegions(desc.getTableName());
     assertEquals("More than 1 region serving test table with 1 row", 1, 
regions.size());
     Region region = regions.get(0);
     admin.flushRegion(region.getRegionInfo().getRegionName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
index 2624d95..c68921e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
@@ -71,7 +71,7 @@ import org.junit.rules.TestName;
 @Category({FilterTests.class, SmallTests.class})
 public class TestFilter {
   private final static Log LOG = LogFactory.getLog(TestFilter.class);
-  private Region region;
+  private HRegion region;
   private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 
   @Rule
@@ -1499,7 +1499,7 @@ public class TestFilter {
     HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
     htd.addFamily(new HColumnDescriptor(family));
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(),
+    HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(),
         TEST_UTIL.getConfiguration(), htd);
 
     for(int i=0; i<5; i++) {
@@ -2060,7 +2060,7 @@ public class TestFilter {
     HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
     htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(),
+    HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(),
         TEST_UTIL.getConfiguration(), htd);
     for(int i=0; i<10; i++) {
       Put p = new Put(Bytes.toBytes("row" + i));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
index 159769e..b6bc2f1 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -61,7 +60,7 @@ public class TestInvocationRecordFilter {
   private static final String VALUE_PREFIX = "value";
 
   private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-  private Region region;
+  private HRegion region;
 
   @Before
   public void setUp() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
index 706c463..a8c45dd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
@@ -292,7 +292,7 @@ public class TestBlockReorder {
 
     int nbTest = 0;
     while (nbTest < 10) {
-      final List<Region> regions = targetRs.getRegions(h.getName());
+      final List<HRegion> regions = targetRs.getRegions(h.getName());
       final CountDownLatch latch = new CountDownLatch(regions.size());
       // listen for successful log rolls
       final WALActionsListener listener = new WALActionsListener.Base() {
@@ -301,8 +301,8 @@ public class TestBlockReorder {
               latch.countDown();
             }
           };
-      for (Region region : regions) {
-        ((HRegion)region).getWAL().registerWALActionsListener(listener);
+      for (HRegion region : regions) {
+        region.getWAL().registerWALActionsListener(listener);
       }
 
       htu.getAdmin().rollWALWriter(targetRs.getServerName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index efdf765..5c1baca 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -114,7 +115,7 @@ public class TestEncodedSeekers {
         setBlocksize(BLOCK_SIZE).
         setBloomFilterType(BloomType.NONE).
         setCompressTags(compressTags);
-    Region region = testUtil.createTestRegion(TABLE_NAME, hcd);
+    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
 
     //write the data, but leave some in the memstore
     doPuts(region);
@@ -138,7 +139,7 @@ public class TestEncodedSeekers {
   }
 
 
-  private void doPuts(Region region) throws IOException{
+  private void doPuts(HRegion region) throws IOException{
     LoadTestKVGenerator dataGenerator = new 
LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
      for (int i = 0; i < NUM_ROWS; ++i) {
       byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
index 6b13899..273f82d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -65,7 +66,7 @@ public class TestPrefixTree {
 
   private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
 
-  private Region region;
+  private HRegion region;
 
   @Before
   public void setUp() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java
index 3bf189d..e0d2a9b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -48,7 +48,7 @@ import org.junit.experimental.categories.Category;
 public class TestSeekBeforeWithReverseScan {
   private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
 
-  private Region region;
+  private HRegion region;
 
   private byte[] cfName = Bytes.toBytes("a");
   private byte[] cqName = Bytes.toBytes("b");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 950beec..a86a551 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -411,7 +411,7 @@ public class TestCacheOnWrite {
     final String cf = "myCF";
     final byte[] cfBytes = Bytes.toBytes(cf);
     final int maxVersions = 3;
-    Region region = TEST_UTIL.createTestRegion(table, 
+    HRegion region = TEST_UTIL.createTestRegion(table,
         new HColumnDescriptor(cf)
             .setCompressionType(compress)
             .setBloomFilterType(BLOOM_TYPE)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index e94859a..9983e1d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -127,7 +127,7 @@ public class TestForceCacheImportantBlocks {
     else assertTrue(stats.getMissCount() > missCount);
   }
 
-  private void writeTestData(Region region) throws IOException {
+  private void writeTestData(HRegion region) throws IOException {
     for (int i = 0; i < NUM_ROWS; ++i) {
       Put put = new Put(Bytes.toBytes("row" + i));
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
index c834fca..b1ae855 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -99,7 +100,7 @@ public class TestScannerSelectionUsingKeyRange {
     HTableDescriptor htd = new HTableDescriptor(TABLE);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(TABLE);
-    Region region = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(), conf,
+    HRegion region = HBaseTestingUtility.createRegionAndWAL(info, 
TEST_UTIL.getDataTestDir(), conf,
         htd);
 
     for (int iFile = 0; iFile < NUM_FILES; ++iFile) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b212bf93/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
index 4af48ce..459deeb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -106,7 +106,7 @@ public class TestScannerSelectionUsingTTL {
     HTableDescriptor htd = new HTableDescriptor(TABLE);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(TABLE);
-    Region region = HBaseTestingUtility.createRegionAndWAL(info,
+    HRegion region = HBaseTestingUtility.createRegionAndWAL(info,
       TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd);
 
     long ts = EnvironmentEdgeManager.currentTime();
@@ -150,7 +150,7 @@ public class TestScannerSelectionUsingTTL {
 
     // Exercise both compaction codepaths.
     if (explicitCompaction) {
-      HStore store = (HStore)region.getStore(FAMILY_BYTES);
+      HStore store = region.getStore(FAMILY_BYTES);
       store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
     } else {
       region.compact(false);

Reply via email to