HBASE-20790 Fix the style issues on branch HBASE-19064 before merging back to 
master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0789e15b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0789e15b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0789e15b

Branch: refs/heads/master
Commit: 0789e15b5e34dcafd6adfb08b4474ea1b4628265
Parents: a84cdbd
Author: zhangduo <zhang...@apache.org>
Authored: Wed Jun 27 16:45:53 2018 +0800
Committer: zhangduo <zhang...@apache.org>
Committed: Thu Jun 28 18:08:43 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  26 ++--
 .../replication/ReplicationPeerConfig.java      |   1 +
 .../hadoop/hbase/regionserver/HRegion.java      | 148 +++++++++----------
 .../replication/TestSyncReplicationStandBy.java |   3 +-
 4 files changed, 89 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0789e15b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 963cca7..6dc0555 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -1594,30 +1594,28 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 
   @Override
   public CompletableFuture<ReplicationPeerConfig> 
getReplicationPeerConfig(String peerId) {
-    return this
-        .<ReplicationPeerConfig> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<GetReplicationPeerConfigRequest, 
GetReplicationPeerConfigResponse, ReplicationPeerConfig> call(
-                controller, stub, 
RequestConverter.buildGetReplicationPeerConfigRequest(peerId), (
-                    s, c, req, done) -> s.getReplicationPeerConfig(c, req, 
done),
-                (resp) -> 
ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call();
+    return this.<ReplicationPeerConfig> newMasterCaller().action((controller, 
stub) -> this
+      .<GetReplicationPeerConfigRequest, GetReplicationPeerConfigResponse, 
ReplicationPeerConfig> call(
+        controller, stub, 
RequestConverter.buildGetReplicationPeerConfigRequest(peerId),
+        (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done),
+        (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig())))
+      .call();
   }
 
   @Override
   public CompletableFuture<Void> updateReplicationPeerConfig(String peerId,
       ReplicationPeerConfig peerConfig) {
     return this
-        .<UpdateReplicationPeerConfigRequest, 
UpdateReplicationPeerConfigResponse> procedureCall(
-          RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, 
peerConfig),
-          (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done),
-          (resp) -> resp.getProcId(),
-          new ReplicationProcedureBiConsumer(peerId, () -> 
"UPDATE_REPLICATION_PEER_CONFIG"));
+      .<UpdateReplicationPeerConfigRequest, 
UpdateReplicationPeerConfigResponse> procedureCall(
+        RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, 
peerConfig),
+        (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done),
+        (resp) -> resp.getProcId(),
+        new ReplicationProcedureBiConsumer(peerId, () -> 
"UPDATE_REPLICATION_PEER_CONFIG"));
   }
 
   @Override
   public CompletableFuture<Void> 
transitReplicationPeerSyncReplicationState(String peerId,
-    SyncReplicationState clusterState) {
+      SyncReplicationState clusterState) {
     return this
       .<TransitReplicationPeerSyncReplicationStateRequest, 
TransitReplicationPeerSyncReplicationStateResponse> procedureCall(
         
RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0789e15b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index cc7b4bc..86c829b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -344,6 +344,7 @@ public class ReplicationPeerConfig {
       return this;
     }
 
+    @Override
     public ReplicationPeerConfigBuilder setRemoteWALDir(String dir) {
       this.remoteWALDir = dir;
       return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0789e15b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e3c2ca3..7771ad0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2001,6 +2001,80 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     return false;
   }
 
+  /**
+   * We are trying to remove / relax the region read lock for compaction.
+   * Let's see what are the potential race conditions among the operations 
(user scan,
+   * region split, region close and region bulk load).
+   *
+   *  user scan ---> region read lock
+   *  region split --> region close first --> region write lock
+   *  region close --> region write lock
+   *  region bulk load --> region write lock
+   *
+   * read lock is compatible with read lock. ---> no problem with user 
scan/read
+   * region bulk load does not cause problem for compaction (no consistency 
problem, store lock
+   * will help the store file accounting).
+   * They can run almost concurrently at the region level.
+   *
+   * The only remaining race condition is between the region close and 
compaction.
+   * So we will evaluate, below, how region close intervenes with compaction 
if compaction does
+   * not acquire region read lock.
+   *
+   * Here are the steps for compaction:
+   * 1. obtain list of StoreFile's
+   * 2. create StoreFileScanner's based on list from #1
+   * 3. perform compaction and save resulting files under tmp dir
+   * 4. swap in compacted files
+   *
+   * #1 is guarded by store lock. This patch does not change this --> no worse 
or better
+   * For #2, we obtain smallest read point (for region) across all the 
Scanners (for both default
+   * compactor and stripe compactor).
+   * The read points are for user scans. Region keeps the read points for all 
currently open
+   * user scanners.
+   * Compaction needs to know the smallest read point so that during re-write 
of the hfiles,
+   * it can remove the mvcc points for the cells if their mvccs are older than 
the smallest
+   * since they are not needed anymore.
+   * This will not conflict with compaction.
+   * For #3, it can be performed in parallel to other operations.
+   * For #4 bulk load and compaction don't conflict with each other on the 
region level
+   *   (for multi-family atomicy).
+   * Region close and compaction are guarded pretty well by the 'writestate'.
+   * In HRegion#doClose(), we have :
+   * synchronized (writestate) {
+   *   // Disable compacting and flushing by background threads for this
+   *   // region.
+   *   canFlush = !writestate.readOnly;
+   *   writestate.writesEnabled = false;
+   *   LOG.debug("Closing " + this + ": disabling compactions & flushes");
+   *   waitForFlushesAndCompactions();
+   * }
+   * waitForFlushesAndCompactions() would wait for writestate.compacting to 
come down to 0.
+   * and in HRegion.compact()
+   *  try {
+   *    synchronized (writestate) {
+   *    if (writestate.writesEnabled) {
+   *      wasStateSet = true;
+   *      ++writestate.compacting;
+   *    } else {
+   *      String msg = "NOT compacting region " + this + ". Writes disabled.";
+   *      LOG.info(msg);
+   *      status.abort(msg);
+   *      return false;
+   *    }
+   *  }
+   * Also in compactor.performCompaction():
+   * check periodically to see if a system stop is requested
+   * if (closeCheckInterval > 0) {
+   *   bytesWritten += len;
+   *   if (bytesWritten > closeCheckInterval) {
+   *     bytesWritten = 0;
+   *     if (!store.areWritesEnabled()) {
+   *       progress.cancel();
+   *       return false;
+   *     }
+   *   }
+   * }
+   */
   public boolean compact(CompactionContext compaction, HStore store,
       ThroughputController throughputController, User user) throws IOException 
{
     assert compaction != null && compaction.hasSelection();
@@ -2021,80 +2095,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     MonitoredTask status = null;
     boolean requestNeedsCancellation = true;
-    /*
-     * We are trying to remove / relax the region read lock for compaction.
-     * Let's see what are the potential race conditions among the operations 
(user scan,
-     * region split, region close and region bulk load).
-     *
-     *  user scan ---> region read lock
-     *  region split --> region close first --> region write lock
-     *  region close --> region write lock
-     *  region bulk load --> region write lock
-     *
-     * read lock is compatible with read lock. ---> no problem with user 
scan/read
-     * region bulk load does not cause problem for compaction (no consistency 
problem, store lock
-     *  will help the store file accounting).
-     * They can run almost concurrently at the region level.
-     *
-     * The only remaining race condition is between the region close and 
compaction.
-     * So we will evaluate, below, how region close intervenes with compaction 
if compaction does
-     * not acquire region read lock.
-     *
-     * Here are the steps for compaction:
-     * 1. obtain list of StoreFile's
-     * 2. create StoreFileScanner's based on list from #1
-     * 3. perform compaction and save resulting files under tmp dir
-     * 4. swap in compacted files
-     *
-     * #1 is guarded by store lock. This patch does not change this --> no 
worse or better
-     * For #2, we obtain smallest read point (for region) across all the 
Scanners (for both default
-     * compactor and stripe compactor).
-     * The read points are for user scans. Region keeps the read points for 
all currently open
-     * user scanners.
-     * Compaction needs to know the smallest read point so that during 
re-write of the hfiles,
-     * it can remove the mvcc points for the cells if their mvccs are older 
than the smallest
-     * since they are not needed anymore.
-     * This will not conflict with compaction.
-     * For #3, it can be performed in parallel to other operations.
-     * For #4 bulk load and compaction don't conflict with each other on the 
region level
-     *   (for multi-family atomicy).
-     * Region close and compaction are guarded pretty well by the 'writestate'.
-     * In HRegion#doClose(), we have :
-     * synchronized (writestate) {
-     *   // Disable compacting and flushing by background threads for this
-     *   // region.
-     *   canFlush = !writestate.readOnly;
-     *   writestate.writesEnabled = false;
-     *   LOG.debug("Closing " + this + ": disabling compactions & flushes");
-     *   waitForFlushesAndCompactions();
-     * }
-     * waitForFlushesAndCompactions() would wait for writestate.compacting to 
come down to 0.
-     * and in HRegion.compact()
-     *  try {
-     *    synchronized (writestate) {
-     *    if (writestate.writesEnabled) {
-     *      wasStateSet = true;
-     *      ++writestate.compacting;
-     *    } else {
-     *      String msg = "NOT compacting region " + this + ". Writes 
disabled.";
-     *      LOG.info(msg);
-     *      status.abort(msg);
-     *      return false;
-     *    }
-     *  }
-     * Also in compactor.performCompaction():
-     * check periodically to see if a system stop is requested
-     * if (closeCheckInterval > 0) {
-     *   bytesWritten += len;
-     *   if (bytesWritten > closeCheckInterval) {
-     *     bytesWritten = 0;
-     *     if (!store.areWritesEnabled()) {
-     *       progress.cancel();
-     *       return false;
-     *     }
-     *   }
-     * }
-     */
     try {
       byte[] cf = Bytes.toBytes(store.getColumnFamilyName());
       if (stores.get(cf) != store) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0789e15b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
index de409fc..3bfd9a8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
@@ -21,10 +21,10 @@ import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Arrays;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -62,6 +62,7 @@ public class TestSyncReplicationStandBy extends 
SyncReplicationTestBase {
   private void assertDisallow(Table table, TableAction action) throws 
IOException {
     try {
       action.call(table);
+      fail("Should not allow the action");
     } catch (DoNotRetryIOException | RetriesExhaustedException e) {
       // expected
       assertThat(e.getMessage(), containsString("STANDBY"));

Reply via email to