hbase git commit: HBASE-16837 Implement checkAndPut and checkAndDelete
Repository: hbase Updated Branches: refs/heads/master ef8c65e54 -> acc606571 HBASE-16837 Implement checkAndPut and checkAndDelete Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acc60657 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acc60657 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acc60657 Branch: refs/heads/master Commit: acc606571bca4e82aec7136cff64bf6187dd71f7 Parents: ef8c65e Author: zhangduoAuthored: Tue Oct 18 21:46:27 2016 +0800 Committer: zhangduo Committed: Wed Oct 19 13:32:24 2016 +0800 -- .../apache/hadoop/hbase/client/AsyncTable.java | 67 .../hadoop/hbase/client/AsyncTableImpl.java | 36 ++- .../hadoop/hbase/client/TestAsyncTable.java | 58 + 3 files changed, 159 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/acc60657/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index 2ed3c26..6019bdc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -204,4 +205,70 @@ public interface AsyncTable { new Increment(row).addColumn(family, qualifier, amount).setDurability(durability)) .thenApply(r -> Bytes.toLong(r.getValue(family, qualifier))); } + + /** + * Atomically checks if a row/family/qualifier value equals to the expected value. If it does, it + * adds the put. If the passed value is null, the check is for the lack of column (ie: + * non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param put data to put if check succeeds + * @return true if the new put was executed, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. + */ + default CompletableFuture checkAndPut(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Put put) { +return checkAndPut(row, family, qualifier, CompareOp.EQUAL, value, put); + } + + /** + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the put. If the passed value is null, the check is for the lack of column (ie: + * non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp comparison operator to use + * @param value the expected value + * @param put data to put if check succeeds + * @return true if the new put was executed, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. + */ + CompletableFuture checkAndPut(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, Put put); + + /** + * Atomically checks if a row/family/qualifier value equals to the expected value. If it does, it + * adds the delete. If the passed value is null, the check is for the lack of column (ie: + * non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param value the expected value + * @param delete data to delete if check succeeds + * @return true if the new delete was executed, false otherwise. The return value will be wrapped + * by a {@link CompletableFuture}. + */ + default CompletableFuture checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Delete delete) { +return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, delete); + } + + /** + * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it + * adds the delete. If the passed value is null, the check is for the lack of column (ie: + * non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp comparison operator to use + * @param value
[4/5] hbase git commit: HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll
HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/57181442 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/57181442 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/57181442 Branch: refs/heads/branch-1.2 Commit: 57181442577c36689114334b011a6e72de4ae785 Parents: bcc74e5 Author: Enis SoztutarAuthored: Tue Oct 18 18:46:02 2016 -0700 Committer: Enis Soztutar Committed: Tue Oct 18 19:19:12 2016 -0700 -- .../hadoop/hbase/regionserver/wal/FSHLog.java | 31 -- .../wal/TestLogRollingNoCluster.java| 43 ++-- 2 files changed, 57 insertions(+), 17 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/57181442/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 79ff1bc..7e3d82b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1127,6 +1127,7 @@ public class FSHLog implements WAL { private volatile long sequence; // Keep around last exception thrown. Clear on successful sync. private final BlockingQueue syncFutures; +private volatile SyncFuture takeSyncFuture = null; /** * UPDATE! @@ -1216,13 +1217,21 @@ public class FSHLog implements WAL { return sequence; } +boolean areSyncFuturesReleased() { + // check whether there is no sync futures offered, and no in-flight sync futures that is being + // processed. + return syncFutures.size() <= 0 + && takeSyncFuture == null; +} + public void run() { long currentSequence; while (!isInterrupted()) { int syncCount = 0; -SyncFuture takeSyncFuture; + try { while (true) { +takeSyncFuture = null; // We have to process what we 'take' from the queue takeSyncFuture = this.syncFutures.take(); currentSequence = this.sequence; @@ -1733,9 +1742,21 @@ public class FSHLog implements WAL { * @return True if outstanding sync futures still */ private boolean isOutstandingSyncs() { + // Look at SyncFutures in the EventHandler for (int i = 0; i < this.syncFuturesCount; i++) { if (!this.syncFutures[i].isDone()) return true; } + + return false; +} + +private boolean isOutstandingSyncsFromRunners() { + // Look at SyncFutures in the SyncRunners + for (SyncRunner syncRunner: syncRunners) { +if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + return true; +} + } return false; } @@ -1846,11 +1867,13 @@ public class FSHLog implements WAL { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). -while (!this.shutdown && this.zigzagLatch.isCocked() && -highestSyncedSequence.get() < currentSequence && +while ((!this.shutdown && this.zigzagLatch.isCocked() +&& highestSyncedSequence.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. -isOutstandingSyncs()) { +isOutstandingSyncs()) +// Wait for all SyncRunners to finish their work so that we can replace the writer +|| isOutstandingSyncsFromRunners()) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } http://git-wip-us.apache.org/repos/asf/hbase/blob/57181442/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 1c36552..034ddcd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++
[5/5] hbase git commit: HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll
HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e304b3f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e304b3f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e304b3f Branch: refs/heads/branch-1.1 Commit: 4e304b3f919a9000e15fd66df190ab97e63bc07d Parents: 382f88a Author: Enis SoztutarAuthored: Tue Oct 18 18:46:02 2016 -0700 Committer: Enis Soztutar Committed: Tue Oct 18 19:41:04 2016 -0700 -- .../hadoop/hbase/regionserver/wal/FSHLog.java | 31 -- .../wal/TestLogRollingNoCluster.java| 44 +--- 2 files changed, 57 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/4e304b3f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 53545ed..76d09c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1278,6 +1278,7 @@ public class FSHLog implements WAL { private volatile long sequence; // Keep around last exception thrown. Clear on successful sync. private final BlockingQueue syncFutures; +private volatile SyncFuture takeSyncFuture = null; /** * UPDATE! @@ -1367,13 +1368,21 @@ public class FSHLog implements WAL { return sequence; } +boolean areSyncFuturesReleased() { + // check whether there is no sync futures offered, and no in-flight sync futures that is being + // processed. + return syncFutures.size() <= 0 + && takeSyncFuture == null; +} + public void run() { long currentSequence; while (!isInterrupted()) { int syncCount = 0; -SyncFuture takeSyncFuture; + try { while (true) { +takeSyncFuture = null; // We have to process what we 'take' from the queue takeSyncFuture = this.syncFutures.take(); currentSequence = this.sequence; @@ -2010,9 +2019,21 @@ public class FSHLog implements WAL { * @return True if outstanding sync futures still */ private boolean isOutstandingSyncs() { + // Look at SyncFutures in the EventHandler for (int i = 0; i < this.syncFuturesCount; i++) { if (!this.syncFutures[i].isDone()) return true; } + + return false; +} + +private boolean isOutstandingSyncsFromRunners() { + // Look at SyncFutures in the SyncRunners + for (SyncRunner syncRunner: syncRunners) { +if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + return true; +} + } return false; } @@ -2123,11 +2144,13 @@ public class FSHLog implements WAL { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). -while (!this.shutdown && this.zigzagLatch.isCocked() && -highestSyncedSequence.get() < currentSequence && +while ((!this.shutdown && this.zigzagLatch.isCocked() +&& highestSyncedSequence.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. -isOutstandingSyncs()) { +isOutstandingSyncs()) +// Wait for all SyncRunners to finish their work so that we can replace the writer +|| isOutstandingSyncsFromRunners()) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } http://git-wip-us.apache.org/repos/asf/hbase/blob/4e304b3f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 8727e23..722c218 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++
[2/5] hbase git commit: HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll
HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/019c7f93 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/019c7f93 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/019c7f93 Branch: refs/heads/branch-1 Commit: 019c7f9303a7242b7c5d6713bed414b180b5c84a Parents: 6694191 Author: Enis SoztutarAuthored: Tue Oct 18 18:46:02 2016 -0700 Committer: Enis Soztutar Committed: Tue Oct 18 19:14:20 2016 -0700 -- .../hadoop/hbase/regionserver/wal/FSHLog.java | 31 -- .../wal/TestLogRollingNoCluster.java| 43 ++-- 2 files changed, 57 insertions(+), 17 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/019c7f93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 11ebfef..a8b0372 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1138,6 +1138,7 @@ public class FSHLog implements WAL { private volatile long sequence; // Keep around last exception thrown. Clear on successful sync. private final BlockingQueue syncFutures; +private volatile SyncFuture takeSyncFuture = null; /** * UPDATE! @@ -1227,13 +1228,21 @@ public class FSHLog implements WAL { return sequence; } +boolean areSyncFuturesReleased() { + // check whether there is no sync futures offered, and no in-flight sync futures that is being + // processed. + return syncFutures.size() <= 0 + && takeSyncFuture == null; +} + public void run() { long currentSequence; while (!isInterrupted()) { int syncCount = 0; -SyncFuture takeSyncFuture; + try { while (true) { +takeSyncFuture = null; // We have to process what we 'take' from the queue takeSyncFuture = this.syncFutures.take(); currentSequence = this.sequence; @@ -1744,9 +1753,21 @@ public class FSHLog implements WAL { * @return True if outstanding sync futures still */ private boolean isOutstandingSyncs() { + // Look at SyncFutures in the EventHandler for (int i = 0; i < this.syncFuturesCount; i++) { if (!this.syncFutures[i].isDone()) return true; } + + return false; +} + +private boolean isOutstandingSyncsFromRunners() { + // Look at SyncFutures in the SyncRunners + for (SyncRunner syncRunner: syncRunners) { +if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + return true; +} + } return false; } @@ -1857,11 +1878,13 @@ public class FSHLog implements WAL { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). -while (!this.shutdown && this.zigzagLatch.isCocked() && -highestSyncedSequence.get() < currentSequence && +while ((!this.shutdown && this.zigzagLatch.isCocked() +&& highestSyncedSequence.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. -isOutstandingSyncs()) { +isOutstandingSyncs()) +// Wait for all SyncRunners to finish their work so that we can replace the writer +|| isOutstandingSyncsFromRunners()) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } http://git-wip-us.apache.org/repos/asf/hbase/blob/019c7f93/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 7ce3615..bca4a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++
[3/5] hbase git commit: HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll
HBASE-16824 Writer.flush() can be called on already closed streams in WAL roll Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5172262 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5172262 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5172262 Branch: refs/heads/branch-1.3 Commit: c51722629418b8b5e3a6e688219ee7d806f251c7 Parents: d38310a Author: Enis SoztutarAuthored: Tue Oct 18 18:46:02 2016 -0700 Committer: Enis Soztutar Committed: Tue Oct 18 19:16:31 2016 -0700 -- .../hadoop/hbase/regionserver/wal/FSHLog.java | 31 -- .../wal/TestLogRollingNoCluster.java| 43 ++-- 2 files changed, 57 insertions(+), 17 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c5172262/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 097101b..9993d62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1131,6 +1131,7 @@ public class FSHLog implements WAL { private volatile long sequence; // Keep around last exception thrown. Clear on successful sync. private final BlockingQueue syncFutures; +private volatile SyncFuture takeSyncFuture = null; /** * UPDATE! @@ -1220,13 +1221,21 @@ public class FSHLog implements WAL { return sequence; } +boolean areSyncFuturesReleased() { + // check whether there is no sync futures offered, and no in-flight sync futures that is being + // processed. + return syncFutures.size() <= 0 + && takeSyncFuture == null; +} + public void run() { long currentSequence; while (!isInterrupted()) { int syncCount = 0; -SyncFuture takeSyncFuture; + try { while (true) { +takeSyncFuture = null; // We have to process what we 'take' from the queue takeSyncFuture = this.syncFutures.take(); currentSequence = this.sequence; @@ -1737,9 +1746,21 @@ public class FSHLog implements WAL { * @return True if outstanding sync futures still */ private boolean isOutstandingSyncs() { + // Look at SyncFutures in the EventHandler for (int i = 0; i < this.syncFuturesCount; i++) { if (!this.syncFutures[i].isDone()) return true; } + + return false; +} + +private boolean isOutstandingSyncsFromRunners() { + // Look at SyncFutures in the SyncRunners + for (SyncRunner syncRunner: syncRunners) { +if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + return true; +} + } return false; } @@ -1850,11 +1871,13 @@ public class FSHLog implements WAL { // Wait on outstanding syncers; wait for them to finish syncing (unless we've been // shutdown or unless our latch has been thrown because we have been aborted or unless // this WAL is broken and we can't get a sync/append to complete). -while (!this.shutdown && this.zigzagLatch.isCocked() && -highestSyncedSequence.get() < currentSequence && +while ((!this.shutdown && this.zigzagLatch.isCocked() +&& highestSyncedSequence.get() < currentSequence && // We could be in here and all syncs are failing or failed. Check for this. Otherwise // we'll just be stuck here for ever. In other words, ensure there syncs running. -isOutstandingSyncs()) { +isOutstandingSyncs()) +// Wait for all SyncRunners to finish their work so that we can replace the writer +|| isOutstandingSyncsFromRunners()) { synchronized (this.safePointWaiter) { this.safePointWaiter.wait(0, 1); } http://git-wip-us.apache.org/repos/asf/hbase/blob/c5172262/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 7ce3615..bca4a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++
hbase git commit: HBASE-16863 Move Backup constants from HConstants to BackupRestoreConstants (Vladimir Rodionov)
Repository: hbase Updated Branches: refs/heads/HBASE-7912 914d162ca -> 2e67f72f5 HBASE-16863 Move Backup constants from HConstants to BackupRestoreConstants (Vladimir Rodionov) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2e67f72f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2e67f72f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2e67f72f Branch: refs/heads/HBASE-7912 Commit: 2e67f72f535e3039db7c096d8b53ba4d14bbd263 Parents: 914d162 Author: tedyuAuthored: Tue Oct 18 14:13:04 2016 -0700 Committer: tedyu Committed: Tue Oct 18 14:13:04 2016 -0700 -- .../org/apache/hadoop/hbase/HConstants.java | 7 --- .../hadoop/hbase/backup/BackupDriver.java | 2 +- .../hbase/backup/BackupRestoreConstants.java| 56 .../hadoop/hbase/backup/RestoreDriver.java | 1 - .../hbase/backup/impl/BackupCommands.java | 3 +- .../hadoop/hbase/backup/impl/BackupManager.java | 7 +-- .../backup/impl/BackupRestoreConstants.java | 47 .../hbase/backup/impl/BackupSystemTable.java| 4 +- .../backup/impl/FullTableBackupClient.java | 2 +- .../hbase/backup/impl/HBaseBackupAdmin.java | 1 + .../hbase/backup/master/BackupLogCleaner.java | 4 +- .../hbase/backup/util/BackupClientUtil.java | 2 +- .../hbase/backup/util/BackupServerUtil.java | 2 +- .../org/apache/hadoop/hbase/master/HMaster.java | 2 +- 14 files changed, 74 insertions(+), 66 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/2e67f72f/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 59948ca..ac02c38 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1260,13 +1260,6 @@ public final class HConstants { public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/" + System.getProperty("user.name") + "/hbase-staging"; - /** - * Backup/Restore constants - */ - public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable"; - public final static boolean BACKUP_ENABLE_DEFAULT = true; - public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl"; - public final static int BACKUP_SYSTEM_TTL_DEFAULT = FOREVER; private HConstants() { // Can't be instantiated with this ctor. http://git-wip-us.apache.org/repos/asf/hbase/blob/2e67f72f/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 57c7a12..99b7460 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -26,8 +26,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand; import org.apache.hadoop.hbase.backup.impl.BackupCommands; -import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.AbstractHBaseTool; http://git-wip-us.apache.org/repos/asf/hbase/blob/2e67f72f/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java new file mode 100644 index 000..76f0e75 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may
hbase git commit: HBASE-16866 Avoid NPE in AsyncRequestFutureImpl#updateStats (ChiaPing Tsai)
Repository: hbase Updated Branches: refs/heads/master bb6cc4d43 -> 6c89c6251 HBASE-16866 Avoid NPE in AsyncRequestFutureImpl#updateStats (ChiaPing Tsai) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c89c625 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c89c625 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c89c625 Branch: refs/heads/master Commit: 6c89c6251ff611c2f10bfd6f8c9f8a2d717dc71b Parents: bb6cc4d Author: tedyuAuthored: Tue Oct 18 11:10:33 2016 -0700 Committer: tedyu Committed: Tue Oct 18 11:10:33 2016 -0700 -- .../org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java | 5 + 1 file changed, 5 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/6c89c625/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index d48179b..6b6b99a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -977,6 +977,11 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { for (Map.Entry regionStats : results.entrySet()) { byte[] regionName = regionStats.getKey(); ClientProtos.RegionLoadStats stat = regionStats.getValue().getStat(); + if (stat == null) { +LOG.error("No ClientProtos.RegionLoadStats found for server=" + server + + ", region=" + Bytes.toStringBinary(regionName)); +continue; + } RegionLoadStats regionLoadstats = ProtobufUtil.createRegionLoadStats(stat); ResultStatsUtil.updateStats(asyncProcess.connection.getStatisticsTracker(), server, regionName, regionLoadstats);
hbase git commit: HBASE-16854 Refactor the org.apache.hadoop.hbase.client.Action (ChiaPing Tsai)
Repository: hbase Updated Branches: refs/heads/master 317136e27 -> bb6cc4d43 HBASE-16854 Refactor the org.apache.hadoop.hbase.client.Action (ChiaPing Tsai) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb6cc4d4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb6cc4d4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb6cc4d4 Branch: refs/heads/master Commit: bb6cc4d43e36fa1d558c6ece2c5c6d1ed414db0c Parents: 317136e Author: tedyuAuthored: Tue Oct 18 09:47:25 2016 -0700 Committer: tedyu Committed: Tue Oct 18 09:47:25 2016 -0700 -- .../org/apache/hadoop/hbase/client/Action.java | 29 +++--- .../hadoop/hbase/client/AsyncProcess.java | 30 +++--- .../hbase/client/AsyncRequestFutureImpl.java| 104 +-- .../hadoop/hbase/client/DelayingRunner.java | 10 +- .../hadoop/hbase/client/HTableMultiplexer.java | 8 +- .../apache/hadoop/hbase/client/MultiAction.java | 12 +-- .../hbase/client/MultiServerCallable.java | 14 +-- .../hbase/shaded/protobuf/RequestConverter.java | 12 +-- .../hadoop/hbase/client/TestAsyncProcess.java | 40 +-- .../hadoop/hbase/client/TestDelayingRunner.java | 4 +- .../hadoop/hbase/CoordinatedStateManager.java | 2 +- 11 files changed, 140 insertions(+), 125 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/bb6cc4d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index 1c38349..ef05912 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -22,21 +22,18 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** - * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by + * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by * {@link Table#batch} to associate the action with it's region and maintain - * the index from the original request. + * the index from the original request. */ @InterfaceAudience.Private -//TODO: R is never used -public class Action implements Comparable { - // TODO: This class should not be visible outside of the client package. - private Row action; - private int originalIndex; +public class Action implements Comparable { + private final Row action; + private final int originalIndex; private long nonce = HConstants.NO_NONCE; private int replicaId = RegionReplicaUtil.DEFAULT_REPLICA_ID; public Action(Row action, int originalIndex) { -super(); this.action = action; this.originalIndex = originalIndex; } @@ -46,15 +43,13 @@ public class Action implements Comparable { * @param action Original action. * @param replicaId Replica id for the new action. */ - public Action(Action action, int replicaId) { -super(); + public Action(Action action, int replicaId) { this.action = action.action; this.nonce = action.nonce; this.originalIndex = action.originalIndex; this.replicaId = replicaId; } - public void setNonce(long nonce) { this.nonce = nonce; } @@ -75,10 +70,9 @@ public class Action implements Comparable { return replicaId; } - @SuppressWarnings("rawtypes") @Override - public int compareTo(Object o) { -return action.compareTo(((Action) o).getAction()); + public int compareTo(Action other) { +return action.compareTo(other.getAction()); } @Override @@ -89,9 +83,10 @@ public class Action implements Comparable { @Override public boolean equals(Object obj) { if (this == obj) return true; -if (obj == null || getClass() != obj.getClass()) return false; -Action other = (Action) obj; -return compareTo(other) == 0; +if (obj instanceof Action) { + return compareTo((Action) obj) == 0; +} +return false; } public long getNonce() { http://git-wip-us.apache.org/repos/asf/hbase/blob/bb6cc4d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index abefc46..e653c80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
hbase git commit: HBASE-16754 All WALSplitter OutputSinks should process compaction events
Repository: hbase Updated Branches: refs/heads/master 5bc518b38 -> 317136e27 HBASE-16754 All WALSplitter OutputSinks should process compaction events Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/317136e2 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/317136e2 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/317136e2 Branch: refs/heads/master Commit: 317136e2721f0722104283cba326bde4c59d0742 Parents: 5bc518b Author: Gary HelmlingAuthored: Thu Oct 13 15:31:42 2016 -0700 Committer: Gary Helmling Committed: Tue Oct 18 09:37:37 2016 -0700 -- .../hadoop/hbase/regionserver/wal/WALEdit.java | 11 +++- .../RegionReplicaReplicationEndpoint.java | 2 +- .../apache/hadoop/hbase/wal/WALSplitter.java| 19 --- .../apache/hadoop/hbase/wal/TestWALSplit.java | 54 4 files changed, 77 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/317136e2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index f92db13..75c1c3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -363,13 +363,22 @@ public class WALEdit implements Writable, HeapSize { * @return deserialized CompactionDescriptor or null. */ public static CompactionDescriptor getCompaction(Cell kv) throws IOException { -if (CellUtil.matchingColumn(kv, METAFAMILY, COMPACTION)) { +if (isCompactionMarker(kv)) { return CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)); } return null; } /** + * Returns true if the given cell is a serialized {@link CompactionDescriptor} + * + * @see #getCompaction(Cell) + */ + public static boolean isCompactionMarker(Cell cell) { +return CellUtil.matchingColumn(cell, METAFAMILY, COMPACTION); + } + + /** * Create a bulk loader WALEdit * * @param hriThe HRegionInfo for the region in which we are bulk loading http://git-wip-us.apache.org/repos/asf/hbase/blob/317136e2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 0e33e55..dc4fad0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -361,7 +361,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } @Override -public boolean keepRegionEvents() { +public boolean keepRegionEvent(Entry entry) { return true; } http://git-wip-us.apache.org/repos/asf/hbase/blob/317136e2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 0483651..2fe9f38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -358,7 +358,7 @@ public class WALSplitter { continue; } // Don't send Compaction/Close/Open region events to recovered edit type sinks. -if (entry.getEdit().isMetaEdit() && !outputSink.keepRegionEvents()) { +if (entry.getEdit().isMetaEdit() && !outputSink.keepRegionEvent(entry)) { editsSkipped++; continue; } @@ -1277,12 +1277,11 @@ public class WALSplitter { /** * Some WALEdit's contain only KV's for account on what happened to a region. - * Not all sinks will want to get those edits. + * Not all sinks will want to get all of those edits. * - * @return Return true if this sink wants to get all WALEdit's regardless of if it's a region - * event. + * @return Return true if this sink wants to accept this region-level WALEdit. */ -public abstract
hbase git commit: HBASE-16774 [shell] Add coverage to TestShell when ZooKeeper is not reachable
Repository: hbase Updated Branches: refs/heads/master 0d40a52ee -> 5bc518b38 HBASE-16774 [shell] Add coverage to TestShell when ZooKeeper is not reachable Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5bc518b3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5bc518b3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5bc518b3 Branch: refs/heads/master Commit: 5bc518b38717413c844968a29f817b5c99b9a136 Parents: 0d40a52 Author: Esteban GutierrezAuthored: Wed Oct 5 15:25:54 2016 -0700 Committer: Esteban Gutierrez Committed: Tue Oct 18 09:08:33 2016 -0700 -- .../hbase/client/ConnectionImplementation.java | 2 +- .../apache/hadoop/hbase/client/HBaseAdmin.java | 40 ++--- .../apache/hadoop/hbase/client/Registry.java| 4 +- .../hadoop/hbase/client/ZooKeeperRegistry.java | 7 +- .../hadoop/hbase/mapred/TableOutputFormat.java | 13 +++ .../hadoop/hbase/client/TestClientTimeouts.java | 8 +- .../hadoop/hbase/client/TestShellNoCluster.java | 60 + .../ruby/hbase/test_connection_no_cluster.rb| 46 ++ .../src/test/ruby/no_cluster_tests_runner.rb| 92 9 files changed, 247 insertions(+), 25 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/5bc518b3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 922168d..53eb522 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -439,7 +439,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { protected String clusterId = null; - protected void retrieveClusterId() throws IOException { + protected void retrieveClusterId() { if (clusterId != null) { return; } http://git-wip-us.apache.org/repos/asf/hbase/blob/5bc518b3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e7f6929..51d07e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -2112,10 +2112,12 @@ public class HBaseAdmin implements Admin { /** * Is HBase available? Throw an exception if not. * @param conf system configuration - * @throws ZooKeeperConnectionException if unable to connect to zookeeper] + * @throws MasterNotRunningException if the master is not running. + * @throws ZooKeeperConnectionException if unable to connect to zookeeper. + * // TODO do not expose ZKConnectionException. */ public static void available(final Configuration conf) - throws ZooKeeperConnectionException, InterruptedIOException { + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { Configuration copyOfConf = HBaseConfiguration.create(conf); // We set it to make it fail as soon as possible if HBase is not available copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); @@ -2124,19 +2126,29 @@ public class HBaseAdmin implements Admin { // Check ZK first. // If the connection exists, we may have a connection to ZK that does not work anymore try (ClusterConnection connection = - (ClusterConnection) ConnectionFactory.createConnection(copyOfConf); - ZooKeeperKeepAliveConnection zkw = ((ConnectionImplementation) connection). - getKeepAliveZooKeeperWatcher();) { - // This is NASTY. FIX Dependent on internal implementation! TODO - zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.znodePaths.baseZNode, false); +(ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) { + // Check ZK first. + // If the connection exists, we may have a connection to ZK that does not work anymore + ZooKeeperKeepAliveConnection zkw = null; + try { +// This is NASTY. FIX Dependent on internal implementation! TODO +zkw = ((ConnectionImplementation) connection) +.getKeepAliveZooKeeperWatcher(); + zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.znodePaths.baseZNode, false); + } catch (IOException e) { +throw new
[1/2] hbase git commit: Revert "Revert "HBASE-16698 Performance issue: handlers stuck waiting for CountDownLatch inside WALKey#getWriteEntry under high writing workload""
Repository: hbase Updated Branches: refs/heads/master b4f6ebde2 -> 0d40a52ee Revert "Revert "HBASE-16698 Performance issue: handlers stuck waiting for CountDownLatch inside WALKey#getWriteEntry under high writing workload"" This reverts commit 13baf4d37a7d3b4b0194dc616c8ac15959efa18f. This is a revert of a revert. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec1adb7b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec1adb7b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec1adb7b Branch: refs/heads/master Commit: ec1adb7baaca5b89ff11a24f26f49fec63e754d8 Parents: b4f6ebd Author: Michael StackAuthored: Tue Oct 18 08:34:29 2016 -0700 Committer: Michael Stack Committed: Tue Oct 18 08:34:29 2016 -0700 -- .../hadoop/hbase/regionserver/HRegion.java | 85 +++- .../hbase/regionserver/wal/FSWALEntry.java | 19 +++-- .../org/apache/hadoop/hbase/wal/WALKey.java | 26 ++ .../hadoop/hbase/regionserver/TestHRegion.java | 7 +- 4 files changed, 108 insertions(+), 29 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/ec1adb7b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 2cf55b5..311937b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -64,6 +64,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; @@ -197,6 +198,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY = "hbase.hregion.scan.loadColumnFamiliesOnDemand"; + /** Config key for using mvcc pre-assign feature for put */ + public static final String HREGION_MVCC_PRE_ASSIGN = "hbase.hregion.mvcc.preassign"; + public static final boolean DEFAULT_HREGION_MVCC_PRE_ASSIGN = true; + /** * This is the global default value for durability. All tables/mutations not * defining a durability or using USE_DEFAULT will default to this value. @@ -585,6 +590,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // that has non-default scope private final NavigableMap replicationScope = new TreeMap ( Bytes.BYTES_COMPARATOR); + // flag and lock for MVCC preassign + private final boolean mvccPreAssign; + private final ReentrantLock preAssignMvccLock; /** * HRegion constructor. This constructor should only be used for testing and @@ -744,6 +752,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi false : conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); + +// get mvcc pre-assign flag and lock +this.mvccPreAssign = conf.getBoolean(HREGION_MVCC_PRE_ASSIGN, DEFAULT_HREGION_MVCC_PRE_ASSIGN); +if (this.mvccPreAssign) { + this.preAssignMvccLock = new ReentrantLock(); +} else { + this.preAssignMvccLock = null; +} } void setHTableSpecificConf() { @@ -3214,36 +3230,61 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // STEP 4. Append the final edit to WAL and sync. Mutation mutation = batchOp.getMutation(firstIndex); WALKey walKey = null; + long txid; if (replay) { // use wal key from the original walKey = new ReplayHLogKey(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, mutation.getClusterIds(), currentNonceGroup, currentNonce, mvcc); walKey.setOrigLogSeqNum(batchOp.getReplaySequenceId()); - } - // Not sure what is going on here when replay is going on... does the below append get - // called for replayed edits? Am afraid to change it without test. - if (!walEdit.isEmpty()) { -if (!replay) { - // we use HLogKey here instead of WALKey directly to support legacy coprocessors. - walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), - this.htableDescriptor.getTableName(),
[2/2] hbase git commit: Revert "Revert "HBASE-16698 Performance issue: handlers stuck waiting for CountDownLatch inside WALKey#getWriteEntry under high writing workload; ADDENDUM. Fix findbugs""
Revert "Revert "HBASE-16698 Performance issue: handlers stuck waiting for CountDownLatch inside WALKey#getWriteEntry under high writing workload; ADDENDUM. Fix findbugs"" This reverts commit f555b5be9c4574be7969c734270bd8922f522391. A revert of a revert Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0d40a52e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0d40a52e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0d40a52e Branch: refs/heads/master Commit: 0d40a52ee82651866ad124183367edb4d9c52dda Parents: ec1adb7 Author: Michael StackAuthored: Tue Oct 18 08:35:45 2016 -0700 Committer: Michael Stack Committed: Tue Oct 18 08:35:45 2016 -0700 -- .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0d40a52e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 311937b..71cc247 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -3309,7 +3309,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.mvcc.advanceTo(batchOp.getReplaySequenceId()); } else { // writeEntry won't be empty if not in replay mode -assert writeEntry != null; mvcc.completeAndWait(writeEntry); writeEntry = null; }
hbase git commit: HBASE-16836 Implement increment and append
Repository: hbase Updated Branches: refs/heads/master 1b005f30e -> b4f6ebde2 HBASE-16836 Implement increment and append Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4f6ebde Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4f6ebde Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4f6ebde Branch: refs/heads/master Commit: b4f6ebde24ef60b35564734067e300c8fc6258af Parents: 1b005f3 Author: zhangduoAuthored: Tue Oct 18 13:17:57 2016 +0800 Committer: zhangduo Committed: Tue Oct 18 22:34:14 2016 +0800 -- .../hbase/client/AsyncConnectionImpl.java | 15 +++ .../apache/hadoop/hbase/client/AsyncTable.java | 91 +- .../hadoop/hbase/client/AsyncTableImpl.java | 87 + .../hbase/client/ConnectionImplementation.java | 21 +--- .../hadoop/hbase/client/ConnectionUtils.java| 20 +++ .../org/apache/hadoop/hbase/client/HTable.java | 27 ++--- .../hadoop/hbase/client/NonceGenerator.java | 6 +- .../client/PerClientRandomNonceGenerator.java | 20 ++- .../hbase/client/CoprocessorHConnection.java| 7 +- .../hadoop/hbase/client/TestAsyncTable.java | 92 +++--- .../hbase/client/TestAsyncTableNoncedRetry.java | 121 +++ .../hadoop/hbase/client/TestMultiParallel.java | 15 ++- .../master/TestDistributedLogSplitting.java | 11 +- 13 files changed, 439 insertions(+), 94 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b4f6ebde/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index c50e244..7a8fd9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -20,7 +20,9 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.CLUSTER_ID_DEFAULT; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT; import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY; +import static org.apache.hadoop.hbase.client.ConnectionUtils.NO_NONCE_GENERATOR; import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey; +import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY; import io.netty.util.HashedWheelTimer; @@ -80,8 +82,11 @@ class AsyncConnectionImpl implements AsyncConnection { final AsyncRpcRetryingCallerFactory callerFactory; + private final NonceGenerator nonceGenerator; + private final ConcurrentMap rsStubs = new ConcurrentHashMap<>(); + @SuppressWarnings("deprecation") public AsyncConnectionImpl(Configuration conf, User user) throws IOException { this.conf = conf; this.user = user; @@ -103,6 +108,11 @@ class AsyncConnectionImpl implements AsyncConnection { this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true); this.rpcTimeout = conf.getInt(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT); this.callerFactory = new AsyncRpcRetryingCallerFactory(this, RETRY_TIMER); +if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) { + nonceGenerator = PerClientRandomNonceGenerator.get(); +} else { + nonceGenerator = NO_NONCE_GENERATOR; +} } @Override @@ -127,6 +137,11 @@ class AsyncConnectionImpl implements AsyncConnection { return locator; } + // ditto + public NonceGenerator getNonceGenerator() { +return nonceGenerator; + } + private ClientService.Interface createRegionServerStub(ServerName serverName) throws IOException { return ClientService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout)); } http://git-wip-us.apache.org/repos/asf/hbase/blob/b4f6ebde/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index c4e7cec..2ed3c26 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.client; +import com.google.common.base.Preconditions; + import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -24,6 +26,8 @@
hbase git commit: HBASE-16733 add hadoop 3.0.0-alpha1 to precommit checks
Repository: hbase Updated Branches: refs/heads/master 10840a51e -> 1b005f30e HBASE-16733 add hadoop 3.0.0-alpha1 to precommit checks Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b005f30 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b005f30 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b005f30 Branch: refs/heads/master Commit: 1b005f30e45c279bb74c8994cdd5628fa6d2fdf2 Parents: 10840a5 Author: Jonathan M HsiehAuthored: Thu Sep 29 14:56:01 2016 -0700 Committer: Jonathan M Hsieh Committed: Tue Oct 18 01:30:03 2016 -0700 -- dev-support/hbase-personality.sh | 21 ++--- 1 file changed, 18 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/1b005f30/dev-support/hbase-personality.sh -- diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index af397f0..469359c 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -53,7 +53,8 @@ function personality_globals # TODO use PATCH_BRANCH to select hadoop versions to use. # All supported Hadoop versions that we want to test the compilation with - HBASE_HADOOP_VERSIONS="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 2.7.1" + HBASE_HADOOP2_VERSIONS="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 2.7.1" + HBASE_HADOOP3_VERSIONS="3.0.0-alpha1" # TODO use PATCH_BRANCH to select jdk versions to use. @@ -188,7 +189,7 @@ function hadoopcheck_rebuild big_console_header "Compiling against various Hadoop versions" export MAVEN_OPTS="${MAVEN_OPTS}" - for hadoopver in ${HBASE_HADOOP_VERSIONS}; do + for hadoopver in ${HBASE_HADOOP2_VERSIONS}; do logfile="${PATCH_DIR}/patch-javac-${hadoopver}.txt" echo_and_redirect "${logfile}" \ "${MAVEN}" clean install \ @@ -201,11 +202,25 @@ function hadoopcheck_rebuild fi done + for hadoopver in ${HBASE_HADOOP3_VERSIONS}; do +logfile="${PATCH_DIR}/patch-javac-${hadoopver}.txt" +echo_and_redirect "${logfile}" \ + "${MAVEN}" clean install \ +-DskipTests -DHBasePatchProcess \ +-Dhadoop-three.version="${hadoopver} \ +-Dhadoop.profile=3.0" +count=$(${GREP} -c ERROR "${logfile}") +if [[ ${count} -gt 0 ]]; then + add_vote_table -1 hadoopcheck "${BUILDMODEMSG} causes ${count} errors with Hadoop v${hadoopver}." + ((result=result+1)) +fi + done + if [[ ${result} -gt 0 ]]; then return 1 fi - add_vote_table +1 hadoopcheck "Patch does not cause any errors with Hadoop ${HBASE_HADOOP_VERSIONS}." + add_vote_table +1 hadoopcheck "Patch does not cause any errors with Hadoop ${HBASE_HADOOP2_VERSIONS} or ${HBASE_HADOOP3_VERSIONS}." return 0 }