hbase git commit: HBASE-17584 Expose ScanMetrics with ResultScanner rather than Scan

2017-03-20 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 02d9bf0c5 -> b973d3fd4


HBASE-17584 Expose ScanMetrics with ResultScanner rather than Scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b973d3fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b973d3fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b973d3fd

Branch: refs/heads/branch-1
Commit: b973d3fd467b4382198aefc35d2c4e9b4c41ee6d
Parents: 02d9bf0
Author: zhangduo 
Authored: Fri Feb 24 14:08:10 2017 +0800
Committer: zhangduo 
Committed: Mon Mar 20 16:32:20 2017 +0800

--
 .../hadoop/hbase/client/AbstractClientScanner.java | 17 -
 .../apache/hadoop/hbase/client/ClientScanner.java  | 11 ++-
 .../apache/hadoop/hbase/client/ResultScanner.java  | 14 +-
 .../java/org/apache/hadoop/hbase/client/Scan.java  |  6 +-
 .../client/metrics/ServerSideScanMetrics.java  | 13 +++--
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  4 ++--
 .../hadoop/hbase/rest/client/RemoteHTable.java | 11 +++
 .../hbase/client/ClientSideRegionScanner.java  |  1 -
 .../hbase/mapreduce/TableRecordReaderImpl.java |  4 ++--
 .../TestServerSideScanMetricsFromClientSide.java   | 14 +++---
 10 files changed, 61 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b973d3fd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
index f926fa9..f9ab7e9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
@@ -42,13 +42,11 @@ public abstract class AbstractClientScanner implements 
ResultScanner {
   }
 
   /**
-   * Used internally accumulating metrics on scan. To
-   * enable collection of metrics on a Scanner, call {@link 
Scan#setScanMetricsEnabled(boolean)}.
-   * These metrics are cleared at key transition points. Metrics are 
accumulated in the
-   * {@link Scan} object itself.
-   * @see Scan#getScanMetrics()
+   * Used internally accumulating metrics on scan. To enable collection of 
metrics on a Scanner,
+   * call {@link Scan#setScanMetricsEnabled(boolean)}.
* @return Returns the running {@link ScanMetrics} instance or null if scan 
metrics not enabled.
*/
+  @Override
   public ScanMetrics getScanMetrics() {
 return scanMetrics;
   }
@@ -63,7 +61,7 @@ public abstract class AbstractClientScanner implements 
ResultScanner {
* @throws IOException
*/
   @Override
-  public Result [] next(int nbRows) throws IOException {
+  public Result[] next(int nbRows) throws IOException {
 // Collect values to be returned here
 ArrayList resultSets = new ArrayList(nbRows);
 for(int i = 0; i < nbRows; i++) {
@@ -124,11 +122,4 @@ public abstract class AbstractClientScanner implements 
ResultScanner {
   }
 };
   }
-  /**
-   * Allow the client to renew the scanner's lease on the server.
-   * @return true if the lease was successfully renewed, false otherwise.
-   */
-  // Note that this method should be on ResultScanner, but that is marked 
stable.
-  // Callers have to cast their instance of ResultScanner to 
AbstractClientScanner to use this.
-  public abstract boolean renewLease();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b973d3fd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 57586d8..abcb67e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -47,7 +47,6 @@ import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
 import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -291,15 +290,17 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
* for scan/map reduce scenarios, we will have multiple scans running at the 
same time. By
* defaul

hbase git commit: HBASE-17691 Add ScanMetrics support for async scan

2017-03-20 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 7c03a213f -> 5b4bb8217


HBASE-17691 Add ScanMetrics support for async scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b4bb821
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b4bb821
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b4bb821

Branch: refs/heads/master
Commit: 5b4bb8217dd4327a89fa29c93ac37bc887d96c2c
Parents: 7c03a21
Author: zhangduo 
Authored: Mon Mar 20 17:12:53 2017 +0800
Committer: zhangduo 
Committed: Mon Mar 20 20:54:04 2017 +0800

--
 .../hadoop/hbase/client/AsyncClientScanner.java |  34 +++-
 .../client/AsyncRpcRetryingCallerFactory.java   |  24 ++-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 ++--
 .../hadoop/hbase/client/AsyncTableBase.java |   9 +-
 .../hadoop/hbase/client/AsyncTableImpl.java |   1 +
 .../hbase/client/AsyncTableResultScanner.java   |   9 +-
 .../hadoop/hbase/client/ClientScanner.java  |   8 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  75 +
 .../hbase/client/RawScanResultConsumer.java |  10 ++
 .../hbase/client/ReversedScannerCallable.java   |  10 +-
 .../hadoop/hbase/client/ScanResultConsumer.java |   9 ++
 .../hadoop/hbase/client/ScannerCallable.java|  88 ++
 .../client/SimpleRawScanResultConsumer.java |  84 ++
 .../hbase/client/SimpleScanResultConsumer.java  |  75 +
 .../hadoop/hbase/client/TestAsyncTableScan.java |  42 -
 .../hbase/client/TestAsyncTableScanMetrics.java | 159 +++
 .../hbase/client/TestRawAsyncTableScan.java |  52 --
 17 files changed, 526 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b4bb821/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index fa7aa81..2c1693d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.*;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
@@ -29,6 +28,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
@@ -51,6 +51,8 @@ class AsyncClientScanner {
   // AsyncScanSingleRegionRpcRetryingCaller will modify this scan object 
directly.
   private final Scan scan;
 
+  private final ScanMetrics scanMetrics;
+
   private final RawScanResultConsumer consumer;
 
   private final TableName tableName;
@@ -88,29 +90,46 @@ class AsyncClientScanner {
 this.rpcTimeoutNs = rpcTimeoutNs;
 this.startLogErrorsCnt = startLogErrorsCnt;
 this.resultCache = createScanResultCache(scan);
+if (scan.isScanMetricsEnabled()) {
+  this.scanMetrics = new ScanMetrics();
+  consumer.onScanMetricsCreated(scanMetrics);
+} else {
+  this.scanMetrics = null;
+}
   }
 
   private static final class OpenScannerResponse {
 
 public final HRegionLocation loc;
 
+public final boolean isRegionServerRemote;
+
 public final ClientService.Interface stub;
 
 public final HBaseRpcController controller;
 
 public final ScanResponse resp;
 
-public OpenScannerResponse(HRegionLocation loc, Interface stub, 
HBaseRpcController controller,
-ScanResponse resp) {
+public OpenScannerResponse(HRegionLocation loc, boolean 
isRegionServerRemote, Interface stub,
+HBaseRpcController controller, ScanResponse resp) {
   this.loc = loc;
+  this.isRegionServerRemote = isRegionServerRemote;
   this.stub = stub;
   this.controller = controller;
   this.resp = resp;
 }
   }
 
+  private int openScannerTries;
+
   private CompletableFuture 
callOpenScanner(HBaseRpcController controller,
   HRegionLoc

hbase git commit: HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch (Guangxu Cheng)

2017-03-20 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 5b4bb8217 -> 4088f822a


HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch 
(Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4088f822
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4088f822
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4088f822

Branch: refs/heads/master
Commit: 4088f822a449acc39c2408a287f820ec26acabf4
Parents: 5b4bb82
Author: tedyu 
Authored: Mon Mar 20 09:26:34 2017 -0700
Committer: tedyu 
Committed: Mon Mar 20 09:26:34 2017 -0700

--
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java |  3 +++
 .../apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 35563c5..9219c23 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -132,6 +132,9 @@ public class RSGroupInfo {
 sb.append(", ");
 sb.append(" Servers:");
 sb.append(this.servers);
+sb.append(", ");
+sb.append(" Tables:");
+sb.append(this.tables);
 return sb.toString();
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index e8cdb78..e5c89c3 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -694,6 +694,7 @@ public abstract class TestRSGroupsBase {
 
   @Test
   public void testMoveServersAndTables() throws Exception {
+LOG.info("testMoveServersAndTables");
 final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 
1);
 //create table
 final byte[] familyNameBytes = Bytes.toBytes("f");
@@ -718,6 +719,12 @@ public abstract class TestRSGroupsBase {
   }
 }
 
+LOG.debug("Print group info : " + rsGroupAdmin.listRSGroups());
+int oldDefaultGroupServerSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size();
+int oldDefaultGroupTableSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size();
+
 //test fail bogus server move
 try {
   
rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:")),
@@ -742,9 +749,9 @@ public abstract class TestRSGroupsBase {
 }
 
 //verify default group info
-Assert.assertEquals(3,
+Assert.assertEquals(oldDefaultGroupServerSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size());
-Assert.assertEquals(4,
+Assert.assertEquals(oldDefaultGroupTableSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size());
 
 //verify new group info



[15/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
new file mode 100644
index 000..e90d5c1
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Simple MR input format for HFiles.
+ * This code was borrowed from Apache Crunch project.
+ * Updated to the recent version of HBase.
+ */
+public class HFileInputFormat extends FileInputFormat {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(HFileInputFormat.class);
+
+  /**
+   * File filter that removes all "hidden" files. This might be something 
worth removing from
+   * a more general purpose utility; it accounts for the presence of metadata 
files created
+   * in the way we're doing exports.
+   */
+  static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() {
+@Override
+public boolean accept(Path p) {
+  String name = p.getName();
+  return !name.startsWith("_") && !name.startsWith(".");
+}
+  };
+
+  /**
+   * Record reader for HFiles.
+   */
+  private static class HFileRecordReader extends RecordReader {
+
+private Reader in;
+protected Configuration conf;
+private HFileScanner scanner;
+
+/**
+ * A private cache of the key value so it doesn't need to be loaded twice 
from the scanner.
+ */
+private Cell value = null;
+private long count;
+private boolean seeked = false;
+
+@Override
+public void initialize(InputSplit split, TaskAttemptContext context)
+throws IOException, InterruptedException {
+  FileSplit fileSplit = (FileSplit) split;
+  conf = context.getConfiguration();
+  Path path = fileSplit.getPath();
+  FileSystem fs = path.getFileSystem(conf);
+  LOG.info("Initialize HFileRecordReader for {}", path);
+  this.in = HFile.createReader(fs, path, conf);
+
+  // The file info must be loaded before the scanner can be used.
+  // This seems like a bug in HBase, but it's easily worked around.
+  this.in.loadFileInfo();
+  this.scanner = in.getScanner(false, false);
+
+}
+
+
+@Override
+public boolean nextKeyValue() throws IOException, InterruptedException {
+  boolean hasNext;
+  if (!seeked) {
+LOG.info("Seeking to start");
+hasNext = scanner.seekTo();
+seeked = true;
+  } else {
+hasNext = scanner.next();
+  }
+  if (!hasNext) {
+return false;
+  }
+  value = scanner.getCell();
+  count++;
+  return true;
+}
+
+@Override
+public NullWritable getCurrentKey() throws IOException, 
InterruptedException {
+  return NullWritable.get();
+}
+
+@Override
+public Cell getCurrentValue() throws IOException, InterruptedException {
+  return value;
+}
+
+@Override
+public float getProgress() throws IOException, InterruptedException {
+ 

[04/28] hbase git commit: HBASE-17792 Use a shared thread pool for AtomicityWriter, AtomicGetReader, AtomicScanReader's connections in TestAcidGuarantees (Huaxiang Sun)

2017-03-20 Thread syuanjiang
HBASE-17792 Use a shared thread pool for AtomicityWriter, AtomicGetReader, 
AtomicScanReader's connections in TestAcidGuarantees (Huaxiang Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c19490b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c19490b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c19490b

Branch: refs/heads/hbase-12439
Commit: 7c19490bac854c4da6457b1edadb1e244924fa3d
Parents: 6fb44f7
Author: Michael Stack 
Authored: Thu Mar 16 15:15:28 2017 -0700
Committer: Michael Stack 
Committed: Thu Mar 16 15:15:28 2017 -0700

--
 .../apache/hadoop/hbase/TestAcidGuarantees.java | 51 
 1 file changed, 42 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c19490b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
index 569ca89..15250ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
@@ -21,6 +21,11 @@ package org.apache.hadoop.hbase;
 import java.io.IOException;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
@@ -43,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -85,6 +91,7 @@ public class TestAcidGuarantees implements Tool {
 
   // when run as main
   private Configuration conf;
+  private ExecutorService sharedPool = null;
 
   private void createTableIfMissing(boolean useMob)
 throws IOException {
@@ -117,12 +124,38 @@ public class TestAcidGuarantees implements Tool {
   conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
0.9);
 }
 util = new HBaseTestingUtility(conf);
+sharedPool = createThreadPool();
   }
 
   public void setHBaseTestingUtil(HBaseTestingUtility util) {
 this.util = util;
   }
 
+  private ExecutorService createThreadPool() {
+
+int maxThreads = 256;
+int coreThreads = 128;
+
+long keepAliveTime = 60;
+BlockingQueue workQueue =
+  new LinkedBlockingQueue(maxThreads *
+  HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
+
+ThreadPoolExecutor tpe = new ThreadPoolExecutor(
+coreThreads,
+maxThreads,
+keepAliveTime,
+TimeUnit.SECONDS,
+workQueue,
+Threads.newDaemonThreadFactory(toString() + "-shared"));
+tpe.allowCoreThreadTimeOut(true);
+return tpe;
+  }
+
+  public ExecutorService getSharedThreadPool() {
+return sharedPool;
+  }
+
   /**
* Thread that does random full-row writes into a table.
*/
@@ -136,11 +169,11 @@ public class TestAcidGuarantees implements Tool {
 AtomicLong numWritten = new AtomicLong();
 
 public AtomicityWriter(TestContext ctx, byte targetRows[][],
-   byte targetFamilies[][]) throws IOException {
+   byte targetFamilies[][], ExecutorService pool) 
throws IOException {
   super(ctx);
   this.targetRows = targetRows;
   this.targetFamilies = targetFamilies;
-  connection = ConnectionFactory.createConnection(ctx.getConf());
+  connection = ConnectionFactory.createConnection(ctx.getConf(), pool);
   table = connection.getTable(TABLE_NAME);
 }
 public void doAnAction() throws Exception {
@@ -182,11 +215,11 @@ public class TestAcidGuarantees implements Tool {
 AtomicLong numRead = new AtomicLong();
 
 public AtomicGetReader(TestContext ctx, byte targetRow[],
-   byte targetFamilies[][]) throws IOException {
+   byte targetFamilies[][], ExecutorService pool) 
throws IOException {
   super(ctx);
   this.targetRow = targetRow;
   this.targetFamilies = targetFamilies;
-  connection = ConnectionFactory.createConnection(ctx.getConf());
+  connection = ConnectionFactory.createConnection(ctx.getConf(), pool);
   table = connecti

[21/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
new file mode 100644
index 000..4cad101
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
@@ -0,0 +1,7013 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class BackupProtos {
+  private BackupProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  /**
+   * Protobuf enum {@code hbase.pb.BackupType}
+   */
+  public enum BackupType
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * FULL = 0;
+ */
+FULL(0),
+/**
+ * INCREMENTAL = 1;
+ */
+INCREMENTAL(1),
+;
+
+/**
+ * FULL = 0;
+ */
+public static final int FULL_VALUE = 0;
+/**
+ * INCREMENTAL = 1;
+ */
+public static final int INCREMENTAL_VALUE = 1;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static BackupType valueOf(int value) {
+  return forNumber(value);
+}
+
+public static BackupType forNumber(int value) {
+  switch (value) {
+case 0: return FULL;
+case 1: return INCREMENTAL;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+BackupType> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public BackupType findValueByNumber(int number) {
+  return BackupType.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final BackupType[] VALUES = values();
+
+public static BackupType valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private BackupType(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+  }
+
+  public interface ServerTimestampOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.ServerTimestamp)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+boolean hasServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerNameOrBuilder();
+
+/**
+ * optional uint64 timestamp = 2;
+ */
+boolean hasTimestamp();
+/**
+ * optional uint64 timestamp = 2;
+ */
+long getTimestamp();
+  }
+  /**
+   * 
+   **
+   * ServerTimestamp keeps last WAL roll time per Region Server
+   * 
+   *
+   * Protobuf type {@code hbase.pb.ServerTimestamp}
+   */
+  public  static final

[06/28] hbase git commit: HBASE-17426 Inconsistent environment variable names for enabling JMX

2017-03-20 Thread syuanjiang
HBASE-17426 Inconsistent environment variable names for enabling JMX


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8ad3add0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8ad3add0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8ad3add0

Branch: refs/heads/hbase-12439
Commit: 8ad3add0d4a469141928ab0048cf7d9d5236c5b0
Parents: e2a070c
Author: CHIA-PING TSAI 
Authored: Fri Mar 17 09:09:36 2017 +0800
Committer: CHIA-PING TSAI 
Committed: Fri Mar 17 09:10:06 2017 +0800

--
 bin/hbase-config.sh | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8ad3add0/bin/hbase-config.sh
--
diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh
index e001e83..2e95ae7 100644
--- a/bin/hbase-config.sh
+++ b/bin/hbase-config.sh
@@ -96,6 +96,9 @@ HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
 HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}"
 # List of hbase secondary masters.
 HBASE_BACKUP_MASTERS="${HBASE_BACKUP_MASTERS:-$HBASE_CONF_DIR/backup-masters}"
+if [ -n "$HBASE_JMX_BASE" ] && [ -z "$HBASE_JMX_OPTS" ]; then
+  HBASE_JMX_OPTS="$HBASE_JMX_BASE"
+fi
 # Thrift JMX opts
 if [ -n "$HBASE_JMX_OPTS" ] && [ -z "$HBASE_THRIFT_JMX_OPTS" ]; then
   HBASE_THRIFT_JMX_OPTS="$HBASE_JMX_OPTS 
-Dcom.sun.management.jmxremote.port=10103"



[02/28] hbase git commit: Revert "guard against NPE while reading FileTrailer and HFileBlock"

2017-03-20 Thread syuanjiang
Revert "guard against NPE while reading FileTrailer and HFileBlock"

This reverts commit 201c8382508da1266d11e04d3c7cbef42e0a256a.

Reverted because missing JIRA number. Fixing...


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a4068dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a4068dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a4068dc

Branch: refs/heads/hbase-12439
Commit: 9a4068dcf8caec644e6703ffa365a8649bbd336e
Parents: edbd0e4
Author: Michael Stack 
Authored: Thu Mar 16 14:53:25 2017 -0700
Committer: Michael Stack 
Committed: Thu Mar 16 14:53:25 2017 -0700

--
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java |  3 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |  2 +-
 .../apache/hadoop/hbase/io/hfile/HFileUtil.java | 43 
 3 files changed, 2 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a4068dc/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 1854236..7eac9c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -388,8 +388,7 @@ public class FixedFileTrailer {
   bufferSize = (int) fileSize;
 }
 
-HFileUtil.seekOnMultipleSources(istream, seekPoint);
-
+istream.seek(seekPoint);
 ByteBuffer buf = ByteBuffer.allocate(bufferSize);
 istream.readFully(buf.array(), buf.arrayOffset(),
 buf.arrayOffset() + buf.limit());

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a4068dc/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 0b140b6..fba15ba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1512,7 +1512,7 @@ public class HFileBlock implements Cacheable {
   if (!pread && streamLock.tryLock()) {
 // Seek + read. Better for scanning.
 try {
-  HFileUtil.seekOnMultipleSources(istream, fileOffset);
+  istream.seek(fileOffset);
 
   long realOffset = istream.getPos();
   if (realOffset != fileOffset) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a4068dc/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
deleted file mode 100644
index 835450c..000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-
-public class HFileUtil {
-
-  /** guards against NullPointer
-   * utility which tries to seek on the DFSIS and will try an alternative 
source
-   * if the FSDataInputStream throws an NPE HBASE-17501
-   * @param istream
-   * @param offset
-   * @throws IOException
-   */
-  static public void seekOnMultipleSources(FSDataInputStream istream, long 
offset) throws IOException {
-try {
-  // attempt to seek inside of current blockReader
-  istream.seek(offset);
-} catch (NullPointerException e) {
-  // retry the seek on an alternate copy of the data
- 

[09/28] hbase git commit: HBASE-17706 TableSkewCostFunction improperly computes max skew - revert due to test failure

2017-03-20 Thread syuanjiang
HBASE-17706 TableSkewCostFunction improperly computes max skew - revert due to 
test failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a69c23ab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a69c23ab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a69c23ab

Branch: refs/heads/hbase-12439
Commit: a69c23abfeed9246f308bf470b72a9a8afa46f5d
Parents: 7f0e6f1
Author: tedyu 
Authored: Thu Mar 16 19:07:59 2017 -0700
Committer: tedyu 
Committed: Thu Mar 16 19:07:59 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 21 ---
 .../master/balancer/StochasticLoadBalancer.java | 19 ++---
 .../balancer/TestStochasticLoadBalancer.java| 28 
 3 files changed, 20 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a69c23ab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index c6086f6..b0e088c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -739,15 +739,18 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
   numRegionsPerServerPerTable[newServer][tableIndex]++;
 
-  // if old server had max num regions, assume (for now)
-  // max num regions went down since we moved the region
-  if (oldServer >= 0 &&
-  (numRegionsPerServerPerTable[oldServer][tableIndex] + 1) == 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex]--;
-  }
-  // Now check if new server sets new max
-  numMaxRegionsPerTable[tableIndex] =
-  Math.max(numMaxRegionsPerTable[tableIndex], 
numRegionsPerServerPerTable[newServer][tableIndex]);
+  //check whether this caused maxRegionsPerTable in the new Server to be 
updated
+  if (numRegionsPerServerPerTable[newServer][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[newServer][tableIndex];
+  } else if (oldServer >= 0 && 
(numRegionsPerServerPerTable[oldServer][tableIndex] + 1)
+  == numMaxRegionsPerTable[tableIndex]) {
+//recompute maxRegionsPerTable since the previous value was coming 
from the old server
+for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
+  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+  }
+}
+  }
 
   // update for servers
   int primary = regionIndexToPrimaryIndex[region];

http://git-wip-us.apache.org/repos/asf/hbase/blob/a69c23ab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 2a3582e..8cbdd1e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -272,6 +272,14 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   @Override
   protected boolean needsBalance(Cluster cluster) {
+ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
+if (cs.getNumServers() < MIN_SERVER_BALANCE) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Not running balancer because only " + cs.getNumServers()
++ " active regionserver(s)");
+  }
+  return false;
+}
 if (areSomeRegionReplicasColocated(cluster)) {
   return true;
 }
@@ -298,17 +306,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   + minCostNeedBalance);
   return false;
 }
-
-ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
-if (cs.getNumServers() < MIN_SERVER_BALANCE) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Not running balancer because only " + cs.getNumServers()
-+ " active regionserver(s)");
-  }
-  return false;
-}
-
-
 return true;
   }
 

http://git-wip-us.apache.org/repos/

[05/28] hbase git commit: HBASE-17778 Remove the testing code in the AsyncRequestFutureImpl

2017-03-20 Thread syuanjiang
HBASE-17778 Remove the testing code in the AsyncRequestFutureImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2a070ca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2a070ca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2a070ca

Branch: refs/heads/hbase-12439
Commit: e2a070cae0ab785b771a923146116c0e9f3452a5
Parents: 7c19490
Author: CHIA-PING TSAI 
Authored: Mon Mar 13 14:33:36 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Mar 17 07:49:13 2017 +0800

--
 .../hbase/client/AsyncRequestFutureImpl.java| 49 
 .../hadoop/hbase/client/TestAsyncProcess.java   | 33 +++--
 2 files changed, 40 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a070ca/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index 41431bb..e6e4fd1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -28,7 +28,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -181,13 +180,14 @@ class AsyncRequestFutureImpl implements 
AsyncRequestFuture {
* Runnable (that can be submitted to thread pool) that submits MultiAction 
to a
* single server. The server call is synchronous, therefore we do it on a 
thread pool.
*/
-  private final class SingleServerRequestRunnable implements Runnable {
+  @VisibleForTesting
+  final class SingleServerRequestRunnable implements Runnable {
 private final MultiAction multiAction;
 private final int numAttempt;
 private final ServerName server;
 private final Set callsInProgress;
-private Long heapSize = null;
-private SingleServerRequestRunnable(
+@VisibleForTesting
+SingleServerRequestRunnable(
 MultiAction multiAction, int numAttempt, ServerName server,
 Set callsInProgress) {
   this.multiAction = multiAction;
@@ -196,24 +196,6 @@ class AsyncRequestFutureImpl implements 
AsyncRequestFuture {
   this.callsInProgress = callsInProgress;
 }
 
-@VisibleForTesting
-long heapSize() {
-  if (heapSize != null) {
-return heapSize;
-  }
-  heapSize = 0L;
-  for (Map.Entry> e: 
this.multiAction.actions.entrySet()) {
-List actions = e.getValue();
-for (Action action: actions) {
-  Row row = action.getAction();
-  if (row instanceof Mutation) {
-heapSize += ((Mutation) row).heapSize();
-  }
-}
-  }
-  return heapSize;
-}
-
 @Override
 public void run() {
   AbstractResponse res = null;
@@ -303,7 +285,6 @@ class AsyncRequestFutureImpl implements 
AsyncRequestFuture {
   private final CancellableRegionServerCallable currentCallable;
   private final int operationTimeout;
   private final int rpcTimeout;
-  private final Map> heapSizesByServer = new 
HashMap<>();
   private final AsyncProcess asyncProcess;
 
   /**
@@ -423,20 +404,11 @@ class AsyncRequestFutureImpl implements 
AsyncRequestFuture {
   }
 
   @VisibleForTesting
-  Map> getRequestHeapSize() {
-return heapSizesByServer;
+  SingleServerRequestRunnable createSingleServerRequest(MultiAction 
multiAction, int numAttempt, ServerName server,
+Set callsInProgress) {
+return new SingleServerRequestRunnable(multiAction, numAttempt, server, 
callsInProgress);
   }
 
-  private SingleServerRequestRunnable 
addSingleServerRequestHeapSize(ServerName server,
-SingleServerRequestRunnable runnable) {
-List heapCount = heapSizesByServer.get(server);
-if (heapCount == null) {
-  heapCount = new LinkedList<>();
-  heapSizesByServer.put(server, heapCount);
-}
-heapCount.add(runnable.heapSize());
-return runnable;
-  }
   /**
* Group a list of actions per region servers, and send them.
*
@@ -608,8 +580,8 @@ class AsyncRequestFutureImpl implements 
AsyncRequestFuture {
 asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
   }
   asyncProcess.incTaskCounters(multiAction.getRegions(), server);
-  SingleServerRequestRunnable runnable = 
addSingleServerRequestHeapSize(server,
-  new SingleServerRequestRunnable(multiAction, numAttempt, server, 
callsInProgress));
+  SingleServerRequestRunnable runnable = createS

[20/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 52b0ce5..bb6b40e 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -42223,7 +42223,7 @@ public final class MasterProtos {
* required .hbase.pb.SnapshotDescription snapshot = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
 
   getSnapshotFieldBuilder() {
 if (snapshotBuilder_ == null) {
   snapshotBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -70510,7 +70510,7 @@ public final class MasterProtos {
 
   /**
* 
-   ** Get a run of the CleanerChore
+   ** Get a run of the CleanerChore 
* 
*
* rpc RunCleanerChore(.hbase.pb.RunCleanerChoreRequest) returns 
(.hbase.pb.RunCleanerChoreResponse);
@@ -72424,7 +72424,7 @@ public final class MasterProtos {
 
 /**
  * 
- ** Get a run of the CleanerChore
+ ** Get a run of the CleanerChore 
  * 
  *
  * rpc RunCleanerChore(.hbase.pb.RunCleanerChoreRequest) returns 
(.hbase.pb.RunCleanerChoreResponse);
@@ -76186,32 +76186,32 @@ public final class MasterProtos {
   
internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_RunCleanerChoreRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_RunCleanerChoreRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_RunCleanerChoreResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_RunCleanerChoreResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_SetCleanerChoreRunningRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_SetCleanerChoreRunningRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_SetCleanerChoreRunningResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_SetCleanerChoreRunningResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_IsCleanerChoreEnabledRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_IsCleanerChoreEnabledRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_IsCleanerChoreEnabledResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_IsCleanerChoreEnabledResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/protobuf/Backup.proto
--

[13/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
new file mode 100644
index 000..6b007f9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackupSetRestoreSet extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFullBackupSetRestoreSet.class);
+
+  @Test
+  public void testFullRestoreSetToOtherTable() throws Exception {
+
+LOG.info("Test full restore set");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  assertTrue(backups.size() == 1);
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+
+  LOG.info("backup complete");
+
+  // Restore from set into other table
+  args =
+  new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m",
+  table1_restore.getNameAsString(), "-o" };
+  // Run backup
+  ret = ToolRunner.run(conf1, new RestoreDriver(), args);
+  assertTrue(ret == 0);
+  HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+  assertTrue(hba.tableExists(table1_restore));
+  // Verify number of rows in both tables
+  assertEquals(TEST_UTIL.countRows(table1), 
TEST_UTIL.countRows(table1_restore));
+  TEST_UTIL.deleteTable(table1_restore);
+  LOG.info("restore into other table is complete");
+  hba.close();
+}
+  }
+
+  @Test
+  public void testFullRestoreSetToSameTable() throws Exception {
+
+LOG.info("Test full restore set to same table");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name1";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+
+  LOG.info("backup complete");
+  int count = TEST_UTIL.countRows(table1);
+  TEST_UTIL.deleteTable(table1);
+
+  // Restore from set into other table
+  args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-o" };
+  // Run backup
+  ret 

[03/28] hbase git commit: HBASE-17501 Revert "Revert "guard against NPE while reading FileTrailer and HFileBlock""

2017-03-20 Thread syuanjiang
HBASE-17501 Revert "Revert "guard against NPE while reading FileTrailer and 
HFileBlock""

This reverts commit 9a4068dcf8caec644e6703ffa365a8649bbd336e.

This is a revert of a revert -- i.e. a restore -- just so I can add the
JIRA issue to the commit message.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6fb44f7e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6fb44f7e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6fb44f7e

Branch: refs/heads/hbase-12439
Commit: 6fb44f7eb8ded5496d2348af21a9d5ca0dd39ab4
Parents: 9a4068d
Author: Michael Stack 
Authored: Thu Mar 16 14:54:17 2017 -0700
Committer: Michael Stack 
Committed: Thu Mar 16 14:54:17 2017 -0700

--
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java |  3 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |  2 +-
 .../apache/hadoop/hbase/io/hfile/HFileUtil.java | 43 
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb44f7e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 7eac9c6..1854236 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -388,7 +388,8 @@ public class FixedFileTrailer {
   bufferSize = (int) fileSize;
 }
 
-istream.seek(seekPoint);
+HFileUtil.seekOnMultipleSources(istream, seekPoint);
+
 ByteBuffer buf = ByteBuffer.allocate(bufferSize);
 istream.readFully(buf.array(), buf.arrayOffset(),
 buf.arrayOffset() + buf.limit());

http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb44f7e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index fba15ba..0b140b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1512,7 +1512,7 @@ public class HFileBlock implements Cacheable {
   if (!pread && streamLock.tryLock()) {
 // Seek + read. Better for scanning.
 try {
-  istream.seek(fileOffset);
+  HFileUtil.seekOnMultipleSources(istream, fileOffset);
 
   long realOffset = istream.getPos();
   if (realOffset != fileOffset) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb44f7e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
new file mode 100644
index 000..835450c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+
+public class HFileUtil {
+
+  /** guards against NullPointer
+   * utility which tries to seek on the DFSIS and will try an alternative 
source
+   * if the FSDataInputStream throws an NPE HBASE-17501
+   * @param istream
+   * @param offset
+   * @throws IOException
+   */
+  static public void seekOnMultipleSources(FSDataInputStream istream, long 
offset) throws IOException {
+try {
+  // attempt to seek inside of current blockReader
+  istream.seek(offset);
+} catch (NullPointe

[01/28] hbase git commit: HBASE-17706 TableSkewCostFunction improperly computes max skew

2017-03-20 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 e67eb6c42 -> 4088f822a


HBASE-17706 TableSkewCostFunction improperly computes max skew

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/edbd0e49
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/edbd0e49
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/edbd0e49

Branch: refs/heads/hbase-12439
Commit: edbd0e494d7abaf50319b7650e350d52b195fcc9
Parents: e67eb6c
Author: Kahlil Oppenheimer 
Authored: Tue Feb 28 00:33:57 2017 -0500
Committer: tedyu 
Committed: Thu Mar 16 11:57:25 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 21 +++
 .../master/balancer/StochasticLoadBalancer.java | 19 +++--
 .../balancer/TestStochasticLoadBalancer.java| 28 
 3 files changed, 48 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/edbd0e49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index b0e088c..c6086f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -739,18 +739,15 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
   numRegionsPerServerPerTable[newServer][tableIndex]++;
 
-  //check whether this caused maxRegionsPerTable in the new Server to be 
updated
-  if (numRegionsPerServerPerTable[newServer][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[newServer][tableIndex];
-  } else if (oldServer >= 0 && 
(numRegionsPerServerPerTable[oldServer][tableIndex] + 1)
-  == numMaxRegionsPerTable[tableIndex]) {
-//recompute maxRegionsPerTable since the previous value was coming 
from the old server
-for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
-  }
-}
-  }
+  // if old server had max num regions, assume (for now)
+  // max num regions went down since we moved the region
+  if (oldServer >= 0 &&
+  (numRegionsPerServerPerTable[oldServer][tableIndex] + 1) == 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex]--;
+  }
+  // Now check if new server sets new max
+  numMaxRegionsPerTable[tableIndex] =
+  Math.max(numMaxRegionsPerTable[tableIndex], 
numRegionsPerServerPerTable[newServer][tableIndex]);
 
   // update for servers
   int primary = regionIndexToPrimaryIndex[region];

http://git-wip-us.apache.org/repos/asf/hbase/blob/edbd0e49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 8cbdd1e..2a3582e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -272,14 +272,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   @Override
   protected boolean needsBalance(Cluster cluster) {
-ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
-if (cs.getNumServers() < MIN_SERVER_BALANCE) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Not running balancer because only " + cs.getNumServers()
-+ " active regionserver(s)");
-  }
-  return false;
-}
 if (areSomeRegionReplicasColocated(cluster)) {
   return true;
 }
@@ -306,6 +298,17 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   + minCostNeedBalance);
   return false;
 }
+
+ClusterLoadState cs = new ClusterLoadState(cluster.clusterState);
+if (cs.getNumServers() < MIN_SERVER_BALANCE) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Not running balancer because only " + cs.getNumServers()
++ " active regionserver(s)");
+  }
+

[23/28] hbase git commit: Add Eshcar Hillel to pom file

2017-03-20 Thread syuanjiang
Add Eshcar Hillel to pom file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b299c138
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b299c138
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b299c138

Branch: refs/heads/hbase-12439
Commit: b299c1388c499b184c06a7b647c649458f6aa1e0
Parents: 75d0f49
Author: eshcar 
Authored: Sun Mar 19 09:58:51 2017 +0200
Committer: eshcar 
Committed: Sun Mar 19 09:58:51 2017 +0200

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b299c138/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 34e3102..86c98a2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -248,6 +248,12 @@
   -8
 
 
+  eshcar
+  Eshcar Hillel
+  esh...@apache.org
+  +2
+
+
   fenghh
   Honghua Feng
   fen...@apache.org



[19/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
new file mode 100644
index 000..c1d5258
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+@InterfaceAudience.Private
+public class BackupAdminImpl implements BackupAdmin {
+  public final static String CHECK_OK = "Checking backup images: OK";
+  public final static String CHECK_FAILED =
+  "Checking backup images: Failed. Some dependencies are missing for 
restore";
+  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
+
+  private final Connection conn;
+
+  public BackupAdminImpl(Connection conn) {
+this.conn = conn;
+  }
+
+  @Override
+  public void close() throws IOException {
+if (conn != null) {
+  conn.close();
+}
+  }
+
+
+  @Override
+  public BackupInfo getBackupInfo(String backupId) throws IOException {
+BackupInfo backupInfo = null;
+try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+  if (backupId == null) {
+ArrayList recentSessions = 
table.getBackupInfos(BackupState.RUNNING);
+if (recentSessions.isEmpty()) {
+  LOG.warn("No ongoing sessions found.");
+  return null;
+}
+// else show status for ongoing session
+// must be one maximum
+return recentSessions.get(0);
+  } else {
+backupInfo = table.readBackupInfo(backupId);
+return backupInfo;
+  }
+}
+  }
+
+  @Override
+  public int deleteBackups(String[] backupIds) throws IOException {
+// TODO: requires Fault tolerance support, failure will leave system
+// in a non-consistent state
+// see HBASE-15227
+int totalDeleted = 0;
+Map> allTablesMap = new HashMap>();
+
+try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
+  for (int i = 0; i < backupIds.length; i++) {
+BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
+if (info != null) {
+  String rootDir = info.getBackupRootDir();
+  HashSet allTables = allTablesMap.get(rootDir);
+  if (allTables == null) {
+allTables = new HashSet();
+allTablesMap.put(rootDir, allTables);
+  }
+  allTables.addAll(info.getTableNames());
+  totalDeleted += deleteBackup(backupIds[i], sysTable);
+}
+  }
+  finalizeDelete(allTablesMap, sysTable);
+}
+return totalDeleted;
+  }
+
+  /*

[22/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75d0f49d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75d0f49d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75d0f49d

Branch: refs/heads/hbase-12439
Commit: 75d0f49dcd9761d32a8dedfaa169844822a9e7a5
Parents: 8e5eeb4
Author: tedyu 
Authored: Sat Mar 18 03:04:19 2017 -0700
Committer: tedyu 
Committed: Sat Mar 18 03:04:19 2017 -0700

--
 bin/hbase   |6 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |4 +-
 .../hbase/client/RpcRetryingCallerImpl.java |3 +-
 .../apache/hadoop/hbase/backup/BackupType.java  |   25 +
 .../hadoop/hbase/util/AbstractHBaseTool.java|   18 +-
 .../hbase/IntegrationTestBackupRestore.java |  311 +
 .../shaded/protobuf/generated/BackupProtos.java | 7013 ++
 .../shaded/protobuf/generated/MasterProtos.java |   18 +-
 .../src/main/protobuf/Backup.proto  |  117 +
 hbase-server/pom.xml|   10 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  128 +
 .../hadoop/hbase/backup/BackupCopyJob.java  |   55 +
 .../hadoop/hbase/backup/BackupDriver.java   |  204 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  545 ++
 .../hadoop/hbase/backup/BackupRequest.java  |  139 +
 .../hbase/backup/BackupRestoreConstants.java|  115 +
 .../hbase/backup/BackupRestoreFactory.java  |   66 +
 .../hadoop/hbase/backup/BackupTableInfo.java|   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  141 +
 .../apache/hadoop/hbase/backup/LogUtils.java|   50 +
 .../hadoop/hbase/backup/RestoreDriver.java  |  265 +
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java |  135 +
 .../hbase/backup/impl/BackupAdminImpl.java  |  524 ++
 .../hbase/backup/impl/BackupCommands.java   |  780 ++
 .../hbase/backup/impl/BackupException.java  |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  472 ++
 .../hbase/backup/impl/BackupManifest.java   |  666 ++
 .../hbase/backup/impl/BackupSystemTable.java| 1376 
 .../backup/impl/FullTableBackupClient.java  |  189 +
 .../backup/impl/IncrementalBackupManager.java   |  344 +
 .../impl/IncrementalTableBackupClient.java  |  216 +
 .../hbase/backup/impl/RestoreTablesClient.java  |  237 +
 .../hbase/backup/impl/TableBackupClient.java|  387 +
 .../backup/mapreduce/HFileSplitterJob.java  |  181 +
 .../mapreduce/MapReduceBackupCopyJob.java   |  344 +
 .../backup/mapreduce/MapReduceRestoreJob.java   |  182 +
 .../hbase/backup/master/BackupLogCleaner.java   |  142 +
 .../master/LogRollMasterProcedureManager.java   |  155 +
 .../regionserver/LogRollBackupSubprocedure.java |  168 +
 .../LogRollBackupSubprocedurePool.java  |  139 +
 .../LogRollRegionServerProcedureManager.java|  185 +
 .../hadoop/hbase/backup/util/BackupSet.java |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  702 ++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  610 ++
 .../BaseCoordinatedStateManager.java|   20 +-
 .../coordination/ZkCoordinatedStateManager.java |   23 +-
 .../hbase/mapreduce/HFileInputFormat.java   |  174 +
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   25 +-
 .../hadoop/hbase/mapreduce/WALInputFormat.java  |   42 +-
 .../hadoop/hbase/mapreduce/WALPlayer.java   |   83 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  129 +-
 .../hbase/master/snapshot/SnapshotManager.java  |4 +-
 .../hbase/procedure/ZKProcedureCoordinator.java |  328 +
 .../procedure/ZKProcedureCoordinatorRpcs.java   |  327 -
 .../flush/MasterFlushTableProcedureManager.java |4 +-
 .../hbase/regionserver/HRegionServer.java   |   17 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |5 +
 .../hadoop/hbase/HBaseTestingUtility.java   |   41 +-
 .../hadoop/hbase/backup/TestBackupBase.java |  293 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 ++
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupMultipleDeletes.java |  159 +
 .../hbase/backup/TestBackupShowHistory.java |  148 +
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java |  511 ++
 .../hadoop/hbase/backup/TestFullBackup.java |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java |  128 +
 .../hadoop/hbase/backup/TestFullRestore.java|  345 +
 .../hbase/backup/TestIncrementalBackup.java |  200 +
 .../TestIncrementalBackupDeleteTable.java   |  129 +

[26/28] hbase git commit: HBASE-17802 Add note that minor versions can add methods to Interfaces

2017-03-20 Thread syuanjiang
HBASE-17802 Add note that minor versions can add methods to Interfaces


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c03a213
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c03a213
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c03a213

Branch: refs/heads/hbase-12439
Commit: 7c03a213ffc074c941333677065031a5c2c12d41
Parents: 261aa94
Author: Michael Stack 
Authored: Fri Mar 17 16:53:47 2017 -0700
Committer: Michael Stack 
Committed: Sun Mar 19 14:51:11 2017 -0700

--
 src/main/asciidoc/_chapters/upgrading.adoc | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c03a213/src/main/asciidoc/_chapters/upgrading.adoc
--
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc 
b/src/main/asciidoc/_chapters/upgrading.adoc
index b0a5565..df5bbfe 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -74,12 +74,15 @@ In addition to the usual API versioning considerations 
HBase has other compatibi
 * An API needs to be deprecated for a major version before we will 
change/remove it.
 * APIs available in a patch version will be available in all later patch 
versions. However, new APIs may be added which will not be available in earlier 
patch versions.
 * New APIs introduced in a patch version will only be added in a source 
compatible way footnote:[See 'Source Compatibility' 
https://blogs.oracle.com/darcy/entry/kinds_of_compatibility]: i.e. code that 
implements public APIs will continue to compile.
-* Example: A user using a newly deprecated API does not need to modify 
application code with HBase API calls until the next major version.
+** Example: A user using a newly deprecated API does not need to modify 
application code with HBase API calls until the next major version.
+* 
 
 .Client Binary compatibility
 * Client code written to APIs available in a given patch release can run 
unchanged (no recompilation needed) against the new jars of later patch 
versions.
 * Client code written to APIs available in a given patch release might not run 
against the old jars from an earlier patch version.
-* Example: Old compiled client code will work unchanged with the new jars.
+** Example: Old compiled client code will work unchanged with the new jars.
+* If a Client implements an HBase Interface, a recompile MAY be required 
upgrading to a newer minor version (See release notes
+for warning about incompatible changes). All effort will be made to provide a 
default implementation so this case should not arise.
 
 .Server-Side Limited API compatibility (taken from Hadoop)
 * Internal APIs are marked as Stable, Evolving, or Unstable



[24/28] hbase git commit: HBASE-17803 PE always re-creates table when we specify the split policy

2017-03-20 Thread syuanjiang
HBASE-17803 PE always re-creates table when we specify the split policy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/23abc900
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/23abc900
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/23abc900

Branch: refs/heads/hbase-12439
Commit: 23abc90068f0ea75f09c3eecf6ef758f1aee9219
Parents: b299c13
Author: CHIA-PING TSAI 
Authored: Sat Mar 18 12:20:07 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sun Mar 19 18:27:54 2017 +0800

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/23abc900/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index f8345b1..3addb1a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -325,7 +325,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 // recreate the table when user has requested presplit or when existing
 // {RegionSplitPolicy,replica count} does not match requested.
 if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
-  || (!isReadCmd && desc != null && desc.getRegionSplitPolicyClassName() 
!= opts.splitPolicy)
+  || (!isReadCmd && desc != null && 
!desc.getRegionSplitPolicyClassName().equals(opts.splitPolicy))
   || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)) {
   needsDelete = true;
   // wait, why did it delete my table?!?



[27/28] hbase git commit: HBASE-17691 Add ScanMetrics support for async scan

2017-03-20 Thread syuanjiang
HBASE-17691 Add ScanMetrics support for async scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b4bb821
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b4bb821
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b4bb821

Branch: refs/heads/hbase-12439
Commit: 5b4bb8217dd4327a89fa29c93ac37bc887d96c2c
Parents: 7c03a21
Author: zhangduo 
Authored: Mon Mar 20 17:12:53 2017 +0800
Committer: zhangduo 
Committed: Mon Mar 20 20:54:04 2017 +0800

--
 .../hadoop/hbase/client/AsyncClientScanner.java |  34 +++-
 .../client/AsyncRpcRetryingCallerFactory.java   |  24 ++-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 ++--
 .../hadoop/hbase/client/AsyncTableBase.java |   9 +-
 .../hadoop/hbase/client/AsyncTableImpl.java |   1 +
 .../hbase/client/AsyncTableResultScanner.java   |   9 +-
 .../hadoop/hbase/client/ClientScanner.java  |   8 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  75 +
 .../hbase/client/RawScanResultConsumer.java |  10 ++
 .../hbase/client/ReversedScannerCallable.java   |  10 +-
 .../hadoop/hbase/client/ScanResultConsumer.java |   9 ++
 .../hadoop/hbase/client/ScannerCallable.java|  88 ++
 .../client/SimpleRawScanResultConsumer.java |  84 ++
 .../hbase/client/SimpleScanResultConsumer.java  |  75 +
 .../hadoop/hbase/client/TestAsyncTableScan.java |  42 -
 .../hbase/client/TestAsyncTableScanMetrics.java | 159 +++
 .../hbase/client/TestRawAsyncTableScan.java |  52 --
 17 files changed, 526 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b4bb821/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index fa7aa81..2c1693d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.*;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
@@ -29,6 +28,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
@@ -51,6 +51,8 @@ class AsyncClientScanner {
   // AsyncScanSingleRegionRpcRetryingCaller will modify this scan object 
directly.
   private final Scan scan;
 
+  private final ScanMetrics scanMetrics;
+
   private final RawScanResultConsumer consumer;
 
   private final TableName tableName;
@@ -88,29 +90,46 @@ class AsyncClientScanner {
 this.rpcTimeoutNs = rpcTimeoutNs;
 this.startLogErrorsCnt = startLogErrorsCnt;
 this.resultCache = createScanResultCache(scan);
+if (scan.isScanMetricsEnabled()) {
+  this.scanMetrics = new ScanMetrics();
+  consumer.onScanMetricsCreated(scanMetrics);
+} else {
+  this.scanMetrics = null;
+}
   }
 
   private static final class OpenScannerResponse {
 
 public final HRegionLocation loc;
 
+public final boolean isRegionServerRemote;
+
 public final ClientService.Interface stub;
 
 public final HBaseRpcController controller;
 
 public final ScanResponse resp;
 
-public OpenScannerResponse(HRegionLocation loc, Interface stub, 
HBaseRpcController controller,
-ScanResponse resp) {
+public OpenScannerResponse(HRegionLocation loc, boolean 
isRegionServerRemote, Interface stub,
+HBaseRpcController controller, ScanResponse resp) {
   this.loc = loc;
+  this.isRegionServerRemote = isRegionServerRemote;
   this.stub = stub;
   this.controller = controller;
   this.resp = resp;
 }
   }
 
+  private int openScannerTries;
+
   private CompletableFuture 
callOpenScanner(HBaseRpcController controller,
   HRegionLocation loc, ClientService.Interface stub) {
+boolean isRegionServerRemote

[17/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
new file mode 100644
index 000..0f1453e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+
+/**
+ * After a full backup was created, the incremental backup will only store the 
changes made after
+ * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
+ * .oldlogs since the last backup timestamp.
+ */
+@InterfaceAudience.Private
+public class IncrementalBackupManager extends BackupManager {
+  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
+
+  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
+super(conn, conf);
+  }
+
+  /**
+   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
+   * in BackupInfo.
+   * @param conn the Connection
+   * @param backupInfo backup info
+   * @return The new HashMap of RS log timestamps after the log roll for this 
incremental backup.
+   * @throws IOException exception
+   */
+  public HashMap getIncrBackupLogFileList(Connection conn, 
BackupInfo backupInfo)
+  throws IOException {
+List logList;
+HashMap newTimestamps;
+HashMap previousTimestampMins;
+
+String savedStartCode = readBackupStartCode();
+
+// key: tableName
+// value: 
+HashMap> previousTimestampMap = 
readLogTimestampMap();
+
+previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
+}
+// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
+if (savedStartCode == null || previousTimestampMins == null
+|| previousTimestampMins.isEmpty()) {
+  throw new IOException(
+  "Cannot read any previous back up timestamps from backup system 
table. "
+  + "In order to create an incremental backup, at least one full 
backup is needed.");
+}
+
+LOG.info("Execute roll log procedure for incremental backup ...");
+HashMap props = new HashMap();
+props.put("backupRoot", backupInfo.getBackupRootDir());
+
+try (Admin admin = conn.getAdmin();) {
+
+  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
+
+}
+newTimestamps = readRegionServerLastLogRollResult();
+
+logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
+List logFromSystemTable =
+ 

[25/28] hbase git commit: HBASE-17803 Addendum fix NPE

2017-03-20 Thread syuanjiang
HBASE-17803 Addendum fix NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/261aa944
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/261aa944
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/261aa944

Branch: refs/heads/hbase-12439
Commit: 261aa9445c3c52e09c10d06168a77d11d0c9b4b4
Parents: 23abc90
Author: Chia-Ping Tsai 
Authored: Sun Mar 19 19:05:25 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sun Mar 19 19:05:25 2017 +0800

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/261aa944/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 3addb1a..40e50cf 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -43,6 +43,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -325,7 +326,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 // recreate the table when user has requested presplit or when existing
 // {RegionSplitPolicy,replica count} does not match requested.
 if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
-  || (!isReadCmd && desc != null && 
!desc.getRegionSplitPolicyClassName().equals(opts.splitPolicy))
+  || (!isReadCmd && desc != null &&
+  !StringUtils.equals(desc.getRegionSplitPolicyClassName(), 
opts.splitPolicy))
   || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)) {
   needsDelete = true;
   // wait, why did it delete my table?!?



[11/28] hbase git commit: add chia7712 to pom.xml

2017-03-20 Thread syuanjiang
add chia7712 to pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/154e58ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/154e58ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/154e58ef

Branch: refs/heads/hbase-12439
Commit: 154e58ef3255195eeaf35eb7db62e9beb83aab35
Parents: 0ed50f4
Author: Chia-Ping Tsai 
Authored: Sat Mar 18 03:15:17 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sat Mar 18 03:15:17 2017 +0800

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/154e58ef/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 535573d..34e3102 100644
--- a/pom.xml
+++ b/pom.xml
@@ -206,6 +206,12 @@
   +8
 
 
+  chia7712
+  Chia-Ping Tsai
+  chia7...@apache.org
+  +8
+
+
   ddas
   Devaraj Das
   d...@apache.org



[14/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
new file mode 100644
index 000..ec88549
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -0,0 +1,293 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class is only a base for other integration-level backup tests. Do not 
add tests here.
+ * TestBackupSmallTests is where tests that don't require bring machines 
up/down should go All other
+ * tests should have their own classes and extend this one
+ */
+public class TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
+
+  protected static Configuration conf1;
+  protected static Configuration conf2;
+
+  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseTestingUtility TEST_UTIL2;
+  protected static TableName table1 = TableName.valueOf("table1");
+  protected static HTableDescriptor table1Desc;
+  protected static TableName table2 = TableName.valueOf("table2");
+  protected static TableName table3 = TableName.valueOf("table3");
+  protected static TableName table4 = TableName.valueOf("table4");
+
+  protected static TableName table1_restore = 
TableName.valueOf("ns1:table1_restore");
+  protected static TableName table2_restore = 
TableName.valueOf("ns2:table2_restore");
+  protected static TableName table3_restore = 
TableName.valueOf("ns3:table3_restore");
+  protected static TableName table4_restore = 
TableName.valueOf("ns4:table4_restore");
+
+  protected static final int NB_ROWS_IN_BATCH = 99;
+  protected static final byte[] qualName = Bytes.toBytes("q1");
+  protected static final byte[] famName = Bytes.toBytes("f");
+
+  protected static String BACKUP_ROOT_DIR = "/backupUT";
+  protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
+  protected static String provider = "defaultProvider";
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+TEST_UTIL = new HBaseTestingUtility();
+conf1 = TEST_UTIL.getConfiguration();
+conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
+BackupManager.decorateM

[18/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
new file mode 100644
index 000..b8adac9
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -0,0 +1,666 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+/**
+ * Backup manifest contains all the meta data of a backup image. The manifest 
info will be bundled
+ * as manifest file together with data. So that each backup image will contain 
all the info needed
+ * for restore. BackupManifest is a storage container for BackupImage.
+ * It is responsible for storing/reading backup image data and has some 
additional utility methods.
+ *
+ */
+@InterfaceAudience.Private
+public class BackupManifest {
+
+  private static final Log LOG = LogFactory.getLog(BackupManifest.class);
+
+  // manifest file name
+  public static final String MANIFEST_FILE_NAME = ".backup.manifest";
+
+  /**
+   *  Backup image, the dependency graph is made up by series of backup images
+   *  BackupImage contains all the relevant information to restore the backup 
and
+   *  is used during restore operation
+   */
+
+  public static class BackupImage implements Comparable {
+
+static class Builder {
+  BackupImage image;
+
+  Builder() {
+image = new BackupImage();
+  }
+
+  Builder withBackupId(String backupId) {
+image.setBackupId(backupId);
+return this;
+  }
+
+  Builder withType(BackupType type) {
+image.setType(type);
+return this;
+  }
+
+  Builder withRootDir(String rootDir) {
+image.setRootDir(rootDir);
+return this;
+  }
+
+  Builder withTableList(List tableList) {
+image.setTableList(tableList);
+return this;
+  }
+
+  Builder withStartTime(long startTime) {
+image.setStartTs(startTime);
+return this;
+  }
+
+  Builder withCompleteTime(long completeTime) {
+image.setCompleteTs(completeTime);
+return this;
+  }
+
+  BackupImage build() {
+return image;
+  }
+
+}
+
+private String backupId;
+private BackupType type;
+private String rootDir;
+private List tableList;
+private long startTs;
+private long completeTs;
+private ArrayList ancestors;
+private HashMap> incrTimeRanges;
+
+static Builder newBuilder() {
+  return new Builder();
+}
+
+public BackupImage() {
+  super();
+}
+
+private BackupImage(String backupId, BackupType type, String rootDir,
+List tableList, long startTs, long completeTs) {
+  this.backupId = backupId;
+  this.type = type;
+  this.rootDir = rootDir;
+  this.tableList = tableList;
+  this.startTs = startTs;
+

[28/28] hbase git commit: HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch (Guangxu Cheng)

2017-03-20 Thread syuanjiang
HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch 
(Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4088f822
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4088f822
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4088f822

Branch: refs/heads/hbase-12439
Commit: 4088f822a449acc39c2408a287f820ec26acabf4
Parents: 5b4bb82
Author: tedyu 
Authored: Mon Mar 20 09:26:34 2017 -0700
Committer: tedyu 
Committed: Mon Mar 20 09:26:34 2017 -0700

--
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java |  3 +++
 .../apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 35563c5..9219c23 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -132,6 +132,9 @@ public class RSGroupInfo {
 sb.append(", ");
 sb.append(" Servers:");
 sb.append(this.servers);
+sb.append(", ");
+sb.append(" Tables:");
+sb.append(this.tables);
 return sb.toString();
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index e8cdb78..e5c89c3 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -694,6 +694,7 @@ public abstract class TestRSGroupsBase {
 
   @Test
   public void testMoveServersAndTables() throws Exception {
+LOG.info("testMoveServersAndTables");
 final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 
1);
 //create table
 final byte[] familyNameBytes = Bytes.toBytes("f");
@@ -718,6 +719,12 @@ public abstract class TestRSGroupsBase {
   }
 }
 
+LOG.debug("Print group info : " + rsGroupAdmin.listRSGroups());
+int oldDefaultGroupServerSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size();
+int oldDefaultGroupTableSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size();
+
 //test fail bogus server move
 try {
   
rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:")),
@@ -742,9 +749,9 @@ public abstract class TestRSGroupsBase {
 }
 
 //verify default group info
-Assert.assertEquals(3,
+Assert.assertEquals(oldDefaultGroupServerSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size());
-Assert.assertEquals(4,
+Assert.assertEquals(oldDefaultGroupTableSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size());
 
 //verify new group info



[16/28] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
new file mode 100644
index 000..47e428c
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.MetricsMaster;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure.Procedure;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Master procedure manager for coordinated cluster-wide WAL roll operation, 
which is run during
+ * backup operation, see {@link MasterProcedureManager} and and {@link 
RegionServerProcedureManager}
+ */
+@InterfaceAudience.Private
+public class LogRollMasterProcedureManager extends MasterProcedureManager {
+
+  public static final String ROLLLOG_PROCEDURE_SIGNATURE = "rolllog-proc";
+  public static final String ROLLLOG_PROCEDURE_NAME = "rolllog";
+  private static final Log LOG = 
LogFactory.getLog(LogRollMasterProcedureManager.class);
+
+  private MasterServices master;
+  private ProcedureCoordinator coordinator;
+  private boolean done;
+
+  @Override
+  public void stop(String why) {
+LOG.info("stop: " + why);
+  }
+
+  @Override
+  public boolean isStopped() {
+return false;
+  }
+
+  @Override
+  public void initialize(MasterServices master, MetricsMaster metricsMaster)
+  throws KeeperException, IOException, UnsupportedOperationException {
+this.master = master;
+this.done = false;
+
+// setup the default procedure coordinator
+String name = master.getServerName().toString();
+ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1);
+BaseCoordinatedStateManager coordManager =
+(BaseCoordinatedStateManager) CoordinatedStateManagerFactory
+.getCoordinatedStateManager(master.getConfiguration());
+coordManager.initialize(master);
+
+ProcedureCoordinatorRpcs comms =
+coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), 
name);
+
+this.coordinator = new ProcedureCoordinator(comms, tpool);
+  }
+
+  @Override
+  public String getProcedureSignature() {
+return ROLLLOG_PROCEDURE_SIGNATURE;
+  }
+
+  @Override
+  public void execProcedure(ProcedureDescription desc) throws IOException {
+if (!isBackupEnabled()) {
+  LOG.warn("Backup is not enabled. Check your " + 
BackupRestoreConstants.BACKUP_ENABLE_KEY
+  + " setting");
+  return;
+}
+this.done = false;
+// start the process on the RS
+ForeignExceptionDispatcher monitor = new 

[08/28] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-03-20 Thread syuanjiang
HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same 
time (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f0e6f1c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f0e6f1c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f0e6f1c

Branch: refs/heads/hbase-12439
Commit: 7f0e6f1c9ec6423aee96dcbdd4240fb76f45d5a9
Parents: 8ad3add
Author: Andrew Purtell 
Authored: Thu Mar 16 18:37:40 2017 -0700
Committer: Andrew Purtell 
Committed: Thu Mar 16 18:37:40 2017 -0700

--
 .../protobuf/generated/RSGroupAdminProtos.java  | 1759 +-
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   10 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   24 +-
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   22 +
 .../hbase/rsgroup/RSGroupAdminServer.java   |  172 ++
 .../hbase/rsgroup/RSGroupInfoManager.java   |   10 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   24 +
 .../src/main/protobuf/RSGroupAdmin.proto|   12 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  111 ++
 .../rsgroup/VerifyingRSGroupAdminClient.java|6 +
 .../hbase/coprocessor/MasterObserver.java   |   18 +
 .../hbase/master/MasterCoprocessorHost.java |   26 +
 .../hbase/security/access/AccessController.java |6 +
 .../hbase/coprocessor/TestMasterObserver.java   |   10 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   14 +
 hbase-shell/src/main/ruby/shell.rb  |1 +
 16 files changed, 2202 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f0e6f1c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d1f4bd..ca1db1e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -10754,6 +10754,1621 @@ public final class RSGroupAdminProtos {
 // 
@@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse)
   }
 
+  public interface MoveServersAndTablesRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string target_group = 1;
+/**
+ * required string target_group = 1;
+ */
+boolean hasTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+java.lang.String getTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+com.google.protobuf.ByteString
+getTargetGroupBytes();
+
+// repeated .hbase.pb.ServerName servers = 2;
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated .hbase.pb.TableName table_name = 3;
+/**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+
java.util.List
 
+getTableNameList();
+/**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName(int index);
+/**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+int getTableNameCount();
+/**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+java.util.List 
+getTableNameOrBuilderList();
+/**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder(
+int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.MoveServersAndTablesRequest}
+   */
+  public static final class MoveServersAndTablesRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements MoveServersAndTablesRequestOrBuilder {
+// Use MoveServersAndTablesRequest.newBuilder() to construct.
+private 
MoveServersAndTablesRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(build

[07/28] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-03-20 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f0e6f1c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 811cf71..3c0cccf 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -138,6 +138,139 @@ public class RSGroupAdminServer implements RSGroupAdmin {
 else regions.addFirst(hri);
   }
 
+  /**
+   * Check servers and tables.
+   * Fail if nulls or if servers and tables not belong to the same group
+   * @param servers servers to move
+   * @param tables tables to move
+   * @param targetGroupName target group name
+   * @throws IOException
+   */
+  private void checkServersAndTables(Set servers, Set 
tables,
+ String targetGroupName) throws 
IOException {
+// Presume first server's source group. Later ensure all servers are from 
this group.
+Address firstServer = servers.iterator().next();
+RSGroupInfo tmpSrcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer);
+if (tmpSrcGrp == null) {
+  // Be careful. This exception message is tested for in 
TestRSGroupsBase...
+  throw new ConstraintException("Source RSGroup for server " + firstServer
+  + " does not exist.");
+}
+RSGroupInfo srcGrp = new RSGroupInfo(tmpSrcGrp);
+if (srcGrp.getName().equals(targetGroupName)) {
+  throw new ConstraintException( "Target RSGroup " + targetGroupName +
+  " is same as source " + srcGrp.getName() + " RSGroup.");
+}
+// Only move online servers
+checkOnlineServersOnly(servers);
+
+// Ensure all servers are of same rsgroup.
+for (Address server: servers) {
+  String tmpGroup = 
rsGroupInfoManager.getRSGroupOfServer(server).getName();
+  if (!tmpGroup.equals(srcGrp.getName())) {
+throw new ConstraintException("Move server request should only come 
from one source " +
+"RSGroup. Expecting only " + srcGrp.getName() + " but contains 
" + tmpGroup);
+  }
+}
+
+// Ensure all tables and servers are of same rsgroup.
+for (TableName table : tables) {
+  String tmpGroup = rsGroupInfoManager.getRSGroupOfTable(table);
+  if (!tmpGroup.equals(srcGrp.getName())) {
+throw new ConstraintException("Move table request should only come 
from one source " +
+"RSGroup. Expecting only " + srcGrp.getName() + " but contains 
" + tmpGroup);
+  }
+}
+
+if (srcGrp.getServers().size() <= servers.size()
+&& srcGrp.getTables().size() > tables.size() ) {
+  throw new ConstraintException("Cannot leave a RSGroup " + 
srcGrp.getName() +
+  " that contains tables without servers to host them.");
+}
+  }
+
+  /**
+   * @param servers the servers that will move to new group
+   * @param targetGroupName the target group name
+   * @param tables The regions of tables assigned to these servers will not 
unassign
+   * @throws IOException
+   */
+  private void unassignRegionFromServers(Set servers, String 
targetGroupName,
+ Set tables) throws IOException 
{
+boolean foundRegionsToUnassign;
+RSGroupInfo targetGrp = getRSGroupInfo(targetGroupName);
+Set allSevers = new HashSet<>(servers);
+do {
+  foundRegionsToUnassign = false;
+  for (Iterator iter = allSevers.iterator(); iter.hasNext();) {
+Address rs = iter.next();
+// Get regions that are associated with this server and filter regions 
by tables.
+List regions = new ArrayList<>();
+for (HRegionInfo region : getRegions(rs)) {
+  if (!tables.contains(region.getTable())) {
+regions.add(region);
+  }
+}
+
+LOG.info("Unassigning " + regions.size() +
+" region(s) from " + rs + " for server move to " + 
targetGroupName);
+if (!regions.isEmpty()) {
+  for (HRegionInfo region: regions) {
+// Regions might get assigned from tables of target group so we 
need to filter
+if (!targetGrp.containsTable(region.getTable())) {
+  this.master.getAssignmentManager().unassign(region);
+  if (master.getAssignmentManager().getRegionStates().
+  getRegionState(region).isFailedOpen()) {
+continue;
+  }
+  foundRegionsToUnassign = true;
+}
+  }
+}
+if (!foundRegionsToUnassign) {
+  iter.remove();
+}
+  }
+  try {
+rsGroupInfoManager.wait(1000);
+  } catch (InterruptedException e) {
+LOG.warn("Sleep interrupt

[10/28] hbase git commit: HBASE-17707 New More Accurate Table Skew cost function/generator - revert due to test failure

2017-03-20 Thread syuanjiang
HBASE-17707 New More Accurate Table Skew cost function/generator - revert due 
to test failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ed50f42
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ed50f42
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ed50f42

Branch: refs/heads/hbase-12439
Commit: 0ed50f42e71599a86738643822e1ec4f2e432026
Parents: a69c23a
Author: tedyu 
Authored: Thu Mar 16 19:08:45 2017 -0700
Committer: tedyu 
Committed: Thu Mar 16 19:08:45 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java |  74 
 .../master/balancer/StochasticLoadBalancer.java | 441 +--
 .../balancer/TestStochasticLoadBalancer.java|  35 +-
 .../balancer/TestStochasticLoadBalancer2.java   |   4 -
 4 files changed, 5 insertions(+), 549 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ed50f42/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index b0e088c..0f1b1a2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -141,7 +140,6 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 int[]   initialRegionIndexToServerIndex;//regionIndex -> serverIndex 
(initial cluster state)
 int[]   regionIndexToTableIndex; //regionIndex -> tableIndex
 int[][] numRegionsPerServerPerTable; //serverIndex -> tableIndex -> # 
regions
-int[]   numRegionsPerTable;  // tableIndex -> number of regions 
that table has
 int[]   numMaxRegionsPerTable;   //tableIndex -> max number of regions 
in a single RS
 int[]   regionIndexToPrimaryIndex;   //regionIndex -> regionIndex of the 
primary
 boolean hasRegionReplicas = false;   //whether there is regions with 
replicas
@@ -332,7 +330,6 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
   numTables = tables.size();
   numRegionsPerServerPerTable = new int[numServers][numTables];
-  numRegionsPerTable = new int[numTables];
 
   for (int i = 0; i < numServers; i++) {
 for (int j = 0; j < numTables; j++) {
@@ -342,7 +339,6 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
   for (int i=0; i < regionIndexToServerIndex.length; i++) {
 if (regionIndexToServerIndex[i] >= 0) {
-  numRegionsPerTable[regionIndexToTableIndex[i]]++;
   
numRegionsPerServerPerTable[regionIndexToServerIndex[i]][regionIndexToTableIndex[i]]++;
 }
   }
@@ -474,76 +470,6 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 }
 
-/**
- * Returns the minimum number of regions of a table T each server would 
store if T were
- * perfectly distributed (i.e. round-robin-ed) across the cluster
- */
-public int minRegionsIfEvenlyDistributed(int table) {
-  return numRegionsPerTable[table] / numServers;
-}
-
-/**
- * Returns the maximum number of regions of a table T each server would 
store if T were
- * perfectly distributed (i.e. round-robin-ed) across the cluster
- */
-public int maxRegionsIfEvenlyDistributed(int table) {
-  int min = minRegionsIfEvenlyDistributed(table);
-  return numRegionsPerTable[table] % numServers == 0 ? min : min + 1;
-}
-
-/**
- * Returns the number of servers that should hold 
maxRegionsIfEvenlyDistributed for a given
- * table. A special case here is if maxRegionsIfEvenlyDistributed == 
minRegionsIfEvenlyDistributed,
- * in which case all servers should hold the max
- */
-public int numServersWithMaxRegionsIfEvenlyDistributed(int table) {
-  int numWithMax = numRegionsPerTable[table] % numServers;
-  if (numWithMax == 0) {
-return numServers;
-  } else {
-return numWithMax;
-  }
-}
-
-/**
- * Returns true iff at least one server in the cluster stores either more 
than the min/max load
- * per server when all regions are evenly distributed across the 

[12/28] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng) - addendum with move_servers_tables_rsgroup.rb

2017-03-20 Thread syuanjiang
HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same 
time (Guangxu Cheng) - addendum with move_servers_tables_rsgroup.rb


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8e5eeb4d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8e5eeb4d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8e5eeb4d

Branch: refs/heads/hbase-12439
Commit: 8e5eeb4db3be14371a38d0d54169a6e5c7b5f983
Parents: 154e58e
Author: tedyu 
Authored: Fri Mar 17 13:55:36 2017 -0700
Committer: tedyu 
Committed: Fri Mar 17 13:55:36 2017 -0700

--
 .../commands/move_servers_tables_rsgroup.rb | 37 
 1 file changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8e5eeb4d/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb
--
diff --git 
a/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb
new file mode 100644
index 000..5337141
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+class MoveServersTablesRsgroup < Command
+  def help
+return <<-EOF
+Reassign RegionServers and Tables from one group to another.
+
+Example:
+
+  hbase> move_servers_tables_rsgroup 
'dest',['server1:port','server2:port'],['table1','table2']
+
+EOF
+  end
+
+  def command(dest, servers, tables)
+rsgroup_admin.move_servers_tables(dest, servers, tables)
+  end
+end
+  end
+end



hbase git commit: HBASE-16084 Cleaned up the stale references in Javadoc

2017-03-20 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 4088f822a -> 55d6dcaf8


HBASE-16084 Cleaned up the stale references in Javadoc

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55d6dcaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55d6dcaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55d6dcaf

Branch: refs/heads/master
Commit: 55d6dcaf877cc5223e679736eb613173229c18be
Parents: 4088f82
Author: Jan Hentschel 
Authored: Sun Mar 19 20:49:28 2017 +0100
Committer: tedyu 
Committed: Mon Mar 20 10:55:36 2017 -0700

--
 .../org/apache/hadoop/hbase/HTableDescriptor.java  | 14 +++---
 .../apache/hadoop/hbase/client/AsyncProcess.java   |  6 +++---
 .../hadoop/hbase/client/ConnectionFactory.java |  2 +-
 .../apache/hadoop/hbase/client/MasterCallable.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Query.java |  2 +-
 .../hbase/client/coprocessor/package-info.java |  8 
 .../FirstKeyValueMatchingQualifiersFilter.java |  2 +-
 .../hadoop/hbase/ipc/ServerRpcController.java  |  4 ++--
 .../hbase/zookeeper/RecoverableZooKeeper.java  |  2 +-
 .../java/org/apache/hadoop/hbase/nio/ByteBuff.java |  2 +-
 .../org/apache/hadoop/hbase/util/OrderedBytes.java |  2 +-
 .../hadoop/hbase/HBaseCommonTestingUtility.java|  3 +--
 .../codec/prefixtree/scanner/CellSearcher.java |  2 +-
 .../store/wal/ProcedureWALFormatReader.java|  3 +--
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |  2 --
 .../hbase/backup/example/HFileArchiveManager.java  |  3 ++-
 .../hadoop/hbase/backup/util/RestoreTool.java  |  4 ++--
 .../apache/hadoop/hbase/constraint/Constraint.java |  8 
 .../hbase/io/hfile/CompoundBloomFilterWriter.java  |  2 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  2 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java |  2 +-
 .../apache/hadoop/hbase/mapreduce/RowCounter.java  |  3 +--
 .../hadoop/hbase/master/TableNamespaceManager.java |  2 +-
 .../master/balancer/StochasticLoadBalancer.java|  4 ++--
 .../regionserver/MiniBatchOperationInProgress.java |  4 ++--
 .../hadoop/hbase/regionserver/StoreFileReader.java |  2 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java  |  8 
 .../regionserver/wal/SequenceIdAccounting.java |  6 +++---
 .../hadoop/hbase/regionserver/wal/SyncFuture.java  |  4 ++--
 .../access/CoprocessorWhitelistMasterObserver.java |  2 +-
 .../hbase/security/access/TableAuthManager.java|  2 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |  6 +++---
 .../apache/hadoop/hbase/TestMetaTableLocator.java  |  8 
 .../hbase/TestPartialResultsFromClientSide.java|  2 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |  4 ++--
 .../hbase/client/TestMultipleTimestamps.java   |  2 +-
 .../TestFirstKeyValueMatchingQualifiersFilter.java |  2 +-
 .../apache/hadoop/hbase/io/hfile/TestSeekTo.java   |  2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 17 +++--
 .../hadoop/hbase/mapreduce/TestImportExport.java   |  2 +-
 .../hadoop/hbase/master/MockRegionServer.java  |  4 ++--
 .../hadoop/hbase/master/TestWarmupRegion.java  |  2 +-
 .../hbase/procedure/TestProcedureMember.java   |  4 ++--
 .../hbase/regionserver/DataBlockEncodingTool.java  |  2 +-
 .../hbase/regionserver/OOMERegionServer.java   |  3 +--
 .../TestRegionMergeTransactionOnCluster.java   |  6 --
 .../org/apache/hadoop/hbase/util/LoadTestTool.java |  2 +-
 .../hadoop/hbase/util/MultiThreadedAction.java |  4 ++--
 48 files changed, 86 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index a49cf1c..25fd896 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -723,7 +723,7 @@ public class HTableDescriptor implements 
Comparable {
   /**
* This sets the class associated with the region split policy which
* determines when a region split should occur.  The class used by
-   * default is defined in {@link 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
+   * default is defined in 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @param clazz the class name
*/
   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
@@ -734,7 +734,7 @@ public class HTableDescriptor implements 
Comparable {
   /**
* This gets the class associated with th

hbase git commit: HBASE-17582 Fix broken drop page cache hint (broken by HBASE-15236).

2017-03-20 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 55d6dcaf8 -> e39e0e634


HBASE-17582 Fix broken drop page cache hint (broken by HBASE-15236).

Change-Id: I2947ab979979f977db7b0c282c4aaf4eb1f26482


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e39e0e63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e39e0e63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e39e0e63

Branch: refs/heads/master
Commit: e39e0e634a2252a352ad799bc2957c72e8d2d2e9
Parents: 55d6dca
Author: Apekshit Sharma 
Authored: Wed Feb 1 23:23:46 2017 -0800
Committer: Apekshit Sharma 
Committed: Mon Mar 20 11:19:51 2017 -0700

--
 .../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e39e0e63/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index ca7dfd4..ab6b0ef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -129,7 +129,7 @@ public class StoreFileScanner implements KeyValueScanner {
 List sorted_files = new ArrayList<>(files);
 Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID);
 for (int i = 0; i < sorted_files.size(); i++) {
-  StoreFileReader r = sorted_files.get(i).createReader();
+  StoreFileReader r = sorted_files.get(i).createReader(canUseDrop);
   r.setReplicaStoreFile(isPrimaryReplica);
   StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, 
isCompaction, readPt,
 i, matcher != null ? !matcher.hasNullColumnInQuery() : false);



hbase git commit: Added hbase high performance cookbook to the book resources page on the website

2017-03-20 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master e39e0e634 -> 16900c8c2


Added hbase high performance cookbook to the book resources page on the website


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16900c8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16900c8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16900c8c

Branch: refs/heads/master
Commit: 16900c8c25766456aeb624c19d50ee0c203facfa
Parents: e39e0e6
Author: Michael Stack 
Authored: Mon Mar 20 12:01:22 2017 -0700
Committer: Michael Stack 
Committed: Mon Mar 20 12:01:22 2017 -0700

--
 src/main/site/xdoc/resources.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/16900c8c/src/main/site/xdoc/resources.xml
--
diff --git a/src/main/site/xdoc/resources.xml b/src/main/site/xdoc/resources.xml
index d067c1e..078587c 100644
--- a/src/main/site/xdoc/resources.xml
+++ b/src/main/site/xdoc/resources.xml
@@ -37,6 +37,10 @@ under the License.
 http://www.packtpub.com/hbase-administration-for-optimum-database-performance-cookbook/book";>HBase
 Administration Cookbook by Yifeng Jiang.  Publisher: PACKT Publishing, 
Release: Expected August 2012, Pages: 335.
 
 
+
+  https://www.packtpub.com/big-data-and-business-intelligence/hbase-high-performance-cookbook";>HBase
 High Performance Cookbook by Ruchir Choudhry.  Publisher: PACKT 
Publishing, Release: January 2017, Pages: 350.
+
+
 
 
 



[2/2] hbase git commit: HBASE-17794 Swap "violation" for "snapshot" where appropriate

2017-03-20 Thread elserj
HBASE-17794 Swap "violation" for "snapshot" where appropriate

A couple of variables and comments in which violation is incorrectly
used to describe what the code is doing. This was a hold over from early
implementation -- need to scrub these out for clarity.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f8fe3ce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f8fe3ce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f8fe3ce

Branch: refs/heads/HBASE-16961
Commit: 4f8fe3ce043d6f51ebe5346e978b6ff0ff734635
Parents: 14a6b6b
Author: Josh Elser 
Authored: Thu Mar 16 19:26:14 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 15:02:42 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java| 4 ++--
 hbase-protocol-shaded/src/main/protobuf/Quota.proto| 2 +-
 .../org/apache/hadoop/hbase/quotas/QuotaObserverChore.java | 6 +++---
 .../apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java| 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f8fe3ce/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index ed5da95..725f170 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -227,7 +227,7 @@ public class QuotaTableUtil {
   }
 
   /**
-   * Creates a {@link Scan} which returns only quota violations from the quota 
table.
+   * Creates a {@link Scan} which returns only quota snapshots from the quota 
table.
*/
   public static Scan makeQuotaSnapshotScan() {
 Scan s = new Scan();
@@ -245,7 +245,7 @@ public class QuotaTableUtil {
* will throw an {@link IllegalArgumentException}.
*
* @param result A row from the quota table.
-   * @param snapshots A map of violations to add the result of this method 
into.
+   * @param snapshots A map of snapshots to add the result of this method into.
*/
   public static void extractQuotaSnapshot(
   Result result, Map snapshots) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f8fe3ce/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 1a6d5ed..364c58b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -98,7 +98,7 @@ message SpaceLimitRequest {
 }
 
 // Represents the state of a quota on a table. Either the quota is not in 
violation
-// or it is in violatino there is a violation policy which should be in effect.
+// or it is in violation there is a violation policy which should be in effect.
 message SpaceQuotaStatus {
   optional SpaceViolationPolicy policy = 1;
   optional bool in_violation = 2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f8fe3ce/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 94c5c87..254f2a1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -532,9 +532,9 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given table.
+   * Stores the quota state for the given table.
*/
-  void setTableQuotaViolation(TableName table, SpaceQuotaSnapshot snapshot) {
+  void setTableQuotaSnapshot(TableName table, SpaceQuotaSnapshot snapshot) {
 this.tableQuotaSnapshots.put(table, snapshot);
   }
 
@@ -552,7 +552,7 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given namespace.
+   * Stores the quota state for the given namespace.
*/
   void setNamespaceQuotaSnapshot(String namespace, SpaceQuotaSnapshot 
snapshot) {
 this.namespaceQuotaSnapshots.put(namespace, snapshot);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f8fe3ce/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java
---

[1/2] hbase git commit: HBASE-17003 Documentation updates for space quotas

2017-03-20 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-16961 e30e82ae9 -> 4f8fe3ce0


HBASE-17003 Documentation updates for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14a6b6b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14a6b6b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14a6b6b7

Branch: refs/heads/HBASE-16961
Commit: 14a6b6b70c8fdcd8dbaa97d118071905d6f4e67c
Parents: e30e82a
Author: Josh Elser 
Authored: Thu Mar 16 16:21:14 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 15:02:11 2017 -0400

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 64 ++-
 1 file changed, 63 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14a6b6b7/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index b156ee5..4cefa03 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1705,7 +1705,7 @@ handling multiple workloads:
 
 [[quota]]
 === Quotas
-HBASE-11598 introduces quotas, which allow you to throttle requests based on
+HBASE-11598 introduces RPC quotas, which allow you to throttle requests based 
on
 the following limits:
 
 . <>
@@ -1885,6 +1885,68 @@ at the same time and that fewer scans can be executed at 
the same time. A value
 `0.9` will give more queue/handlers to scans, so the number of scans executed 
will
 increase and the number of gets will decrease.
 
+[[space-quotas]]
+=== Space Quotas
+
+link:https://issues.apache.org/jira/browse/HBASE-16961[HBASE-16961] introduces 
a new type of
+quotas for HBase to leverage: filesystem quotas. These "space" quotas limit 
the amount of space
+on the filesystem that HBase namespaces and tables can consume. If a user, 
malicious or ignorant,
+has the ability to write data into HBase, with enough time, that user can 
effectively crash HBase
+(or worse HDFS) by consuming all available space. When there is no filesystem 
space available,
+HBase crashes because it can no longer create/sync data to the write-ahead log.
+
+This feature allows a for a limit to be set on the size of a table or 
namespace. When a space quota is set
+on a namespace, the quota's limit applies to the sum of usage of all tables in 
that namespace.
+When a table with a quota exists in a namespace with a quota, the table quota 
takes priority
+over the namespace quota. This allows for a scenario where a large limit can 
be placed on
+a collection of tables, but a single table in that collection can have a 
fine-grained limit set.
+
+The existing `set_quota` and `list_quota` HBase shell commands can be used to 
interact with
+space quotas. Space quotas are quotas with a `TYPE` of `SPACE` and have 
`LIMIT` and `POLICY`
+attributes. The `LIMIT` is a string that refers to the amount of space on the 
filesystem
+that the quota subject (e.g. the table or namespace) may consume. For example, 
valid values
+of `LIMIT` are `'10G'`, `'2T'`, or `'256M'`. The `POLICY` refers to the action 
that HBase will
+take when the quota subject's usage exceeds the `LIMIT`. The following are 
valid `POLICY` values.
+
+* `NO_INSERTS` - No new data may be written (e.g. `Put`, `Increment`, 
`Append`).
+* `NO_WRITES` - Same as `NO_INSERTS` but `Deletes` are also disallowed.
+* `NO_WRITES_COMPACTIONS` - Same as `NO_WRITES` but compactions are also 
disallowed.
+* `DISABLE` - The table(s) are disabled, preventing all read/write access.
+
+.Setting simple space quotas
+
+# Sets a quota on the table 't1' with a limit of 1GB, disallowing 
Puts/Increments/Appends when the table exceeds 1GB
+hbase> set_quota TYPE => SPACE, TABLE => 't1', LIMIT => '1G', POLICY => 
NO_INSERTS
+
+# Sets a quota on the namespace 'ns1' with a limit of 50TB, disallowing 
Puts/Increments/Appends/Deletes
+hbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '50T', POLICY => 
NO_WRITES
+
+# Sets a quota on the table 't3' with a limit of 2TB, disallowing any writes 
and compactions when the table exceeds 2TB.
+hbase> set_quota TYPE => SPACE, TABLE => 't3', LIMIT => '2T', POLICY => 
NO_WRITES_COMPACTIONS
+
+# Sets a quota on the table 't2' with a limit of 50GB, disabling the table 
when it exceeds 50GB
+hbase> set_quota TYPE => SPACE, TABLE => 't2', LIMIT => '50G', POLICY => 
DISABLE
+
+
+Consider the following scenario to set up quotas on a namespace, overriding 
the quota on tables in that namespace
+
+.Table and Namespace space quotas
+
+hbase> create_namespace 'ns1'
+hbase> create 'ns1:t1'
+hbase> create 'ns1:t2'
+hbase> create 'ns1:t3'
+hbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '100T', POLICY => 
NO_INSERTS
+hbase

hbase git commit: HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

2017-03-20 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-16961 4f8fe3ce0 -> 1393da806


HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

When a table or namespace is deleted, it would be nice to automatically
delete the quota on said table/NS. It's possible that not all people
would want this functionality so we can leave it up to the user to
configure this Observer.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1393da80
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1393da80
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1393da80

Branch: refs/heads/HBASE-16961
Commit: 1393da806123b85ac77ac4b8f4678829ed71b59c
Parents: 4f8fe3c
Author: Josh Elser 
Authored: Thu Mar 16 18:54:01 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 17:04:40 2017 -0400

--
 .../hbase/quotas/MasterSpaceQuotaObserver.java  |  85 ++
 .../quotas/TestMasterSpaceQuotaObserver.java| 169 +++
 src/main/asciidoc/_chapters/ops_mgt.adoc|  17 ++
 3 files changed, 271 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1393da80/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
new file mode 100644
index 000..1a2fb14
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+
+/**
+ * An observer to automatically delete space quotas when a table/namespace
+ * are deleted.
+ */
+@InterfaceAudience.Private
+public class MasterSpaceQuotaObserver extends BaseMasterObserver {
+  private CoprocessorEnvironment cpEnv;
+  private Configuration conf;
+  private boolean quotasEnabled = false;
+
+  @Override
+  public void start(CoprocessorEnvironment ctx) throws IOException {
+this.cpEnv = ctx;
+this.conf = cpEnv.getConfiguration();
+this.quotasEnabled = QuotaUtil.isQuotaEnabled(conf);
+  }
+
+  @Override
+  public void postDeleteTable(
+  ObserverContext ctx, TableName tableName) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getTableQuota(master.getConnection(), tableName);
+if (null != quotas && quotas.hasSpace()) {
+  QuotaSettings settings = 
QuotaSettingsFactory.removeTableSpaceLimit(tableName);
+  try (Admin admin = conn.getAdmin()) {
+admin.setQuota(settings);
+  }
+}
+  }
+
+  @Override
+  public void postDeleteNamespace(
+  ObserverContext ctx, String namespace) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getNamespaceQuota(master.getConnection(), 
namespace);
+if (n

hbase git commit: Fix hanging tag on site resources page

2017-03-20 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 16900c8c2 -> a41b1852d


Fix hanging tag on site resources page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a41b1852
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a41b1852
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a41b1852

Branch: refs/heads/master
Commit: a41b1852da5d445f711237afaf5a58f26998ed6b
Parents: 16900c8
Author: Michael Stack 
Authored: Mon Mar 20 14:54:56 2017 -0700
Committer: Michael Stack 
Committed: Mon Mar 20 14:54:56 2017 -0700

--
 src/main/site/xdoc/resources.xml | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a41b1852/src/main/site/xdoc/resources.xml
--
diff --git a/src/main/site/xdoc/resources.xml b/src/main/site/xdoc/resources.xml
index 078587c..19548b6 100644
--- a/src/main/site/xdoc/resources.xml
+++ b/src/main/site/xdoc/resources.xml
@@ -36,7 +36,6 @@ under the License.
 
 http://www.packtpub.com/hbase-administration-for-optimum-database-performance-cookbook/book";>HBase
 Administration Cookbook by Yifeng Jiang.  Publisher: PACKT Publishing, 
Release: Expected August 2012, Pages: 335.
 
-
 
   https://www.packtpub.com/big-data-and-business-intelligence/hbase-high-performance-cookbook";>HBase
 High Performance Cookbook by Ruchir Choudhry.  Publisher: PACKT 
Publishing, Release: January 2017, Pages: 350.
 



hbase git commit: HBASE-17175 backport HBASE-17127 (Locate region should fail fast if underlying Connection already closed) to 1.3.1

2017-03-20 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 226b6fa44 -> 98b5d2cd4


HBASE-17175 backport HBASE-17127 (Locate region should fail fast if underlying 
Connection already closed) to 1.3.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98b5d2cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98b5d2cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98b5d2cd

Branch: refs/heads/branch-1.3
Commit: 98b5d2cd4b0badef5f576cb35c8c8f1df1c88899
Parents: 226b6fa
Author: Yu Li 
Authored: Tue Mar 21 06:05:29 2017 +0800
Committer: Yu Li 
Committed: Tue Mar 21 06:05:29 2017 +0800

--
 .../hadoop/hbase/client/ConnectionManager.java  |  2 +-
 .../hbase/client/TestClientNoCluster.java   | 27 
 2 files changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98b5d2cd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 4e9d208..cba441e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1171,7 +1171,7 @@ class ConnectionManager {
 public RegionLocations locateRegion(final TableName tableName,
   final byte [] row, boolean useCache, boolean retry, int replicaId)
 throws IOException {
-  if (this.closed) throw new IOException(toString() + " closed");
+  if (this.closed) throw new DoNotRetryIOException(toString() + " closed");
   if (tableName== null || tableName.getName().length == 0) {
 throw new IllegalArgumentException(
 "table name cannot be null or zero length");

http://git-wip-us.apache.org/repos/asf/hbase/blob/98b5d2cd/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 2d50c1b..f6968bc 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -39,6 +39,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -286,6 +287,32 @@ public class TestClientNoCluster extends Configured 
implements Tool {
 }
   }
 
+  @Test
+  public void testConnectionClosedOnRegionLocate() throws IOException {
+Configuration testConf = new Configuration(this.conf);
+testConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
+// Go against meta else we will try to find first region for the table on 
construction which
+// means we'll have to do a bunch more mocking. Tests that go against meta 
only should be
+// good for a bit of testing.
+Connection connection = ConnectionFactory.createConnection(testConf);
+Table table = connection.getTable(TableName.META_TABLE_NAME);
+connection.close();
+try {
+  Get get = new Get(Bytes.toBytes("dummyRow"));
+  table.get(get);
+  fail("Should have thrown DoNotRetryException but no exception thrown");
+} catch (Exception e) {
+  if (!(e instanceof DoNotRetryIOException)) {
+String errMsg =
+"Should have thrown DoNotRetryException but actually " + 
e.getClass().getSimpleName();
+LOG.error(errMsg, e);
+fail(errMsg);
+  }
+} finally {
+  table.close();
+}
+  }
+
   /**
* Override to shutdown going to zookeeper for cluster id and meta location.
*/



[29/54] [abbrv] hbase git commit: HBASE-17557 HRegionServer#reportRegionSizesForQuotas() should respond to UnsupportedOperationException

2017-03-20 Thread elserj
HBASE-17557 HRegionServer#reportRegionSizesForQuotas() should respond to 
UnsupportedOperationException


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9ebc1e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9ebc1e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9ebc1e0

Branch: refs/heads/HBASE-16961
Commit: d9ebc1e07ca32daabba9f6a7d5e366ba41afa253
Parents: 6e04223
Author: tedyu 
Authored: Mon Jan 30 07:47:40 2017 -0800
Committer: Josh Elser 
Committed: Mon Mar 20 17:46:38 2017 -0400

--
 .../quotas/FileSystemUtilizationChore.java  | 20 +---
 .../hbase/regionserver/HRegionServer.java   | 24 
 2 files changed, 36 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9ebc1e0/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
index 01540eb..efc17ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -53,6 +53,9 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
   static final String FS_UTILIZATION_MAX_ITERATION_DURATION_KEY = 
"hbase.regionserver.quotas.fs.utilization.chore.max.iteration.millis";
   static final long FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT = 5000L;
 
+  private int numberOfCyclesToSkip = 0, prevNumberOfCyclesToSkip = 0;
+  private static final int CYCLE_UPPER_BOUND = 32;
+
   private final HRegionServer rs;
   private final long maxIterationMillis;
   private Iterator leftoverRegions;
@@ -67,6 +70,10 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
 
   @Override
   protected void chore() {
+if (numberOfCyclesToSkip > 0) {
+  numberOfCyclesToSkip--;
+  return;
+}
 final Map onlineRegionSizes = new HashMap<>();
 final Set onlineRegions = new HashSet<>(rs.getOnlineRegions());
 // Process the regions from the last run if we have any. If we are somehow 
having difficulty
@@ -126,7 +133,14 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
   + skippedSplitParents + " regions due to being the parent of a 
split, and"
   + skippedRegionReplicas + " regions due to being region replicas.");
 }
-reportRegionSizesToMaster(onlineRegionSizes);
+if (!reportRegionSizesToMaster(onlineRegionSizes)) {
+  // backoff reporting
+  numberOfCyclesToSkip = prevNumberOfCyclesToSkip > 0 ? 2 * 
prevNumberOfCyclesToSkip : 1;
+  if (numberOfCyclesToSkip > CYCLE_UPPER_BOUND) {
+numberOfCyclesToSkip = CYCLE_UPPER_BOUND;
+  }
+  prevNumberOfCyclesToSkip = numberOfCyclesToSkip;
+}
   }
 
   /**
@@ -166,8 +180,8 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
*
* @param onlineRegionSizes The computed region sizes to report.
*/
-  void reportRegionSizesToMaster(Map onlineRegionSizes) {
-this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
+  boolean reportRegionSizesToMaster(Map onlineRegionSizes) {
+return this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9ebc1e0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 9be4131..053e4ac 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
@@ -1248,13 +1249,14 @@ public class HRegionServer extends HasThread implements
* Reports the given map of Regions and their size on the filesystem to the 
active Master.
*
* @param onlineRegionSizes A map of region info to size in bytes
+   * @return false if FileSystem

[01/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov) [Forced Update!]

2017-03-20 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-16961 1393da806 -> 8271fd53f (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
new file mode 100644
index 000..6b007f9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackupSetRestoreSet extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFullBackupSetRestoreSet.class);
+
+  @Test
+  public void testFullRestoreSetToOtherTable() throws Exception {
+
+LOG.info("Test full restore set");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  assertTrue(backups.size() == 1);
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+
+  LOG.info("backup complete");
+
+  // Restore from set into other table
+  args =
+  new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m",
+  table1_restore.getNameAsString(), "-o" };
+  // Run backup
+  ret = ToolRunner.run(conf1, new RestoreDriver(), args);
+  assertTrue(ret == 0);
+  HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+  assertTrue(hba.tableExists(table1_restore));
+  // Verify number of rows in both tables
+  assertEquals(TEST_UTIL.countRows(table1), 
TEST_UTIL.countRows(table1_restore));
+  TEST_UTIL.deleteTable(table1_restore);
+  LOG.info("restore into other table is complete");
+  hba.close();
+}
+  }
+
+  @Test
+  public void testFullRestoreSetToSameTable() throws Exception {
+
+LOG.info("Test full restore set to same table");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name1";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+
+  LOG.info("backup complete");
+  int count = TEST_UTIL.countRows(table1);
+  TEST_UTIL.deleteTable(table1);
+
+  // Restore from set into other table
+ 

[42/54] [abbrv] hbase git commit: HBASE-17478 Avoid reporting FS use when quotas are disabled

2017-03-20 Thread elserj
HBASE-17478 Avoid reporting FS use when quotas are disabled

Also, gracefully produce responses when quotas are disabled.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc2517f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc2517f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc2517f5

Branch: refs/heads/HBASE-16961
Commit: cc2517f5e2bc3953e8a34b65cb2bab295f5b5f0f
Parents: 50effd6
Author: Josh Elser 
Authored: Tue Jan 17 14:41:45 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:02:55 2017 -0400

--
 .../hadoop/hbase/master/MasterRpcServices.java  |  4 +++
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 13 +--
 .../hbase/regionserver/HRegionServer.java   |  5 ++-
 .../hbase/quotas/TestMasterQuotaManager.java| 37 
 4 files changed, 56 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2517f5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index f10d4bb..a7a2f94 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -59,6 +59,7 @@ import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -2016,6 +2017,9 @@ public class MasterRpcServices extends RSRpcServices
   RegionSpaceUseReportRequest request) throws ServiceException {
 try {
   master.checkInitialized();
+  if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
+return RegionSpaceUseReportResponse.newBuilder().build();
+  }
   MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
   for (RegionSpaceUse report : request.getSpaceUseList()) {
 quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2517f5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index a5832f9..cb614ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.quotas;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -58,6 +59,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
 @InterfaceStability.Evolving
 public class MasterQuotaManager implements RegionStateListener {
   private static final Log LOG = LogFactory.getLog(MasterQuotaManager.class);
+  private static final Map EMPTY_MAP = 
Collections.unmodifiableMap(
+  new HashMap<>());
 
   private final MasterServices masterServices;
   private NamedLock namespaceLocks;
@@ -529,13 +532,19 @@ public class MasterQuotaManager implements 
RegionStateListener {
   }
 
   public void addRegionSize(HRegionInfo hri, long size) {
-// TODO Make proper API
+if (null == regionSizes) {
+  return;
+}
+// TODO Make proper API?
 // TODO Prevent from growing indefinitely
 regionSizes.put(hri, size);
   }
 
   public Map snapshotRegionSizes() {
-// TODO Make proper API
+if (null == regionSizes) {
+  return EMPTY_MAP;
+}
+// TODO Make proper API?
 return new HashMap<>(regionSizes);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2517f5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 79e2c46..81c58c3 100644
--- 
a/hbase-server/src/mai

[48/54] [abbrv] hbase git commit: HBASE-17568 Better handle stale/missing region size reports

2017-03-20 Thread elserj
HBASE-17568 Better handle stale/missing region size reports

* Expire region reports in the master after a timeout.
* Move regions in violation out of violation when insufficient
region size reports are observed.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/10865b5d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/10865b5d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/10865b5d

Branch: refs/heads/HBASE-16961
Commit: 10865b5dac34d239c7935dda5eb15a3297499bea
Parents: 517eb82
Author: Josh Elser 
Authored: Fri Feb 3 16:33:47 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:19:31 2017 -0400

--
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  86 ++-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |  53 -
 .../hbase/quotas/TestMasterQuotaManager.java|  48 +++-
 .../TestQuotaObserverChoreRegionReports.java| 233 +++
 5 files changed, 412 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/10865b5d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 53560e9..eb2711c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -251,6 +251,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
@@ -2027,8 +2028,9 @@ public class MasterRpcServices extends RSRpcServices
 return RegionSpaceUseReportResponse.newBuilder().build();
   }
   MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
+  final long now = EnvironmentEdgeManager.currentTime();
   for (RegionSpaceUse report : request.getSpaceUseList()) {
-quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());
+quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize(), now);
   }
   return RegionSpaceUseReportResponse.newBuilder().build();
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/10865b5d/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index cb614ea..0622dba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -22,9 +22,12 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -47,6 +50,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Master Quota Manager.
  * It is responsible for initialize the quota table on the first-run and
@@ -68,7 +73,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock userLocks;
   private boolean enabled = false;
   private NamespaceAuditor namespaceQuotaManager;
-  private ConcurrentHashMap regionSizes;
+  private ConcurrentHashMap 
regionSizes;
 
   public MasterQuotaManager(final MasterServices masterServices) {
 this.masterServices = masterServices;
@@ -531,21 +536,88 @@ public class MasterQuotaManager implements 
RegionStateListener {
 }
   }
 
-  public void addRegionSize(HRe

[09/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
new file mode 100644
index 000..4cad101
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
@@ -0,0 +1,7013 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class BackupProtos {
+  private BackupProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  /**
+   * Protobuf enum {@code hbase.pb.BackupType}
+   */
+  public enum BackupType
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * FULL = 0;
+ */
+FULL(0),
+/**
+ * INCREMENTAL = 1;
+ */
+INCREMENTAL(1),
+;
+
+/**
+ * FULL = 0;
+ */
+public static final int FULL_VALUE = 0;
+/**
+ * INCREMENTAL = 1;
+ */
+public static final int INCREMENTAL_VALUE = 1;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static BackupType valueOf(int value) {
+  return forNumber(value);
+}
+
+public static BackupType forNumber(int value) {
+  switch (value) {
+case 0: return FULL;
+case 1: return INCREMENTAL;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+BackupType> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public BackupType findValueByNumber(int number) {
+  return BackupType.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final BackupType[] VALUES = values();
+
+public static BackupType valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private BackupType(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+  }
+
+  public interface ServerTimestampOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.ServerTimestamp)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+boolean hasServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerNameOrBuilder();
+
+/**
+ * optional uint64 timestamp = 2;
+ */
+boolean hasTimestamp();
+/**
+ * optional uint64 timestamp = 2;
+ */
+long getTimestamp();
+  }
+  /**
+   * 
+   **
+   * ServerTimestamp keeps last WAL roll time per Region Server
+   * 
+   *
+   * Protobuf type {@code hbase.pb.ServerTimestamp}
+   */
+  public  static final

[06/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2017-03-20 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
new file mode 100644
index 000..b8adac9
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -0,0 +1,666 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+/**
+ * Backup manifest contains all the meta data of a backup image. The manifest 
info will be bundled
+ * as manifest file together with data. So that each backup image will contain 
all the info needed
+ * for restore. BackupManifest is a storage container for BackupImage.
+ * It is responsible for storing/reading backup image data and has some 
additional utility methods.
+ *
+ */
+@InterfaceAudience.Private
+public class BackupManifest {
+
+  private static final Log LOG = LogFactory.getLog(BackupManifest.class);
+
+  // manifest file name
+  public static final String MANIFEST_FILE_NAME = ".backup.manifest";
+
+  /**
+   *  Backup image, the dependency graph is made up by series of backup images
+   *  BackupImage contains all the relevant information to restore the backup 
and
+   *  is used during restore operation
+   */
+
+  public static class BackupImage implements Comparable {
+
+static class Builder {
+  BackupImage image;
+
+  Builder() {
+image = new BackupImage();
+  }
+
+  Builder withBackupId(String backupId) {
+image.setBackupId(backupId);
+return this;
+  }
+
+  Builder withType(BackupType type) {
+image.setType(type);
+return this;
+  }
+
+  Builder withRootDir(String rootDir) {
+image.setRootDir(rootDir);
+return this;
+  }
+
+  Builder withTableList(List tableList) {
+image.setTableList(tableList);
+return this;
+  }
+
+  Builder withStartTime(long startTime) {
+image.setStartTs(startTime);
+return this;
+  }
+
+  Builder withCompleteTime(long completeTime) {
+image.setCompleteTs(completeTime);
+return this;
+  }
+
+  BackupImage build() {
+return image;
+  }
+
+}
+
+private String backupId;
+private BackupType type;
+private String rootDir;
+private List tableList;
+private long startTs;
+private long completeTs;
+private ArrayList ancestors;
+private HashMap> incrTimeRanges;
+
+static Builder newBuilder() {
+  return new Builder();
+}
+
+public BackupImage() {
+  super();
+}
+
+private BackupImage(String backupId, BackupType type, String rootDir,
+List tableList, long startTs, long completeTs) {
+  this.backupId = backupId;
+  this.type = type;
+  this.rootDir = rootDir;
+  this.tableList = tableList;
+  this.startTs = startTs;
+

[45/54] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-03-20 Thread elserj
HBASE-17428 Implement informational RPCs for space quotas

Create some RPCs that can expose the in-memory state that the
RegionServers and Master hold to drive the space quota "state machine".
Then, create some hbase shell commands to interact with those.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/341cf7e1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/341cf7e1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/341cf7e1

Branch: refs/heads/HBASE-16961
Commit: 341cf7e12e6eac90666c284d2d3f0b5f8a512fbf
Parents: cc2517f
Author: Josh Elser 
Authored: Tue Feb 21 15:36:39 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:19:29 2017 -0400

--
 .../hbase/client/ConnectionImplementation.java  |9 +
 .../hadoop/hbase/client/QuotaStatusCalls.java   |  125 +
 .../client/ShortCircuitMasterConnection.java|7 +
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   77 +
 .../hbase/shaded/protobuf/RequestConverter.java |   33 +
 .../shaded/protobuf/generated/AdminProtos.java  |  394 +-
 .../shaded/protobuf/generated/MasterProtos.java |   92 +-
 .../shaded/protobuf/generated/QuotaProtos.java  | 5686 +-
 .../src/main/protobuf/Admin.proto   |9 +
 .../src/main/protobuf/Master.proto  |4 +
 .../src/main/protobuf/Quota.proto   |   35 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   40 +
 .../hbase/quotas/ActivePolicyEnforcement.java   |8 +
 .../hbase/regionserver/RSRpcServices.java   |   57 +
 .../hadoop/hbase/master/MockRegionServer.java   |   18 +
 .../hbase/quotas/TestQuotaStatusRPCs.java   |  192 +
 hbase-shell/src/main/ruby/hbase/quotas.rb   |   16 +
 hbase-shell/src/main/ruby/shell.rb  |3 +
 .../ruby/shell/commands/list_quota_snapshots.rb |   59 +
 .../shell/commands/list_quota_table_sizes.rb|   47 +
 .../shell/commands/list_quota_violations.rb |   48 +
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  |   24 -
 .../test/ruby/hbase/quotas_test_no_cluster.rb   |   69 +
 23 files changed, 6899 insertions(+), 153 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index adf1496..2024a17 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -94,6 +94,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
@@ -1731,6 +1733,13 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   ListReplicationPeersRequest request) throws ServiceException {
 return stub.listReplicationPeers(controller, request);
   }
+
+  @Override
+  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(
+  RpcController controller, GetSpaceQuotaRegionSizesRequest request)
+  throws ServiceException {
+return stub.getSpaceQuotaRegionSizes(controller, request);
+  }
 };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
new file mode 100644
index 000..f0f385d
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundat

[43/54] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-03-20 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-protocol-shaded/src/main/protobuf/Admin.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 338c80b..1a085e6 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -28,6 +28,7 @@ option optimize_for = SPEED;
 import "ClusterStatus.proto";
 import "HBase.proto";
 import "WAL.proto";
+import "Quota.proto";
 
 message GetRegionInfoRequest {
   required RegionSpecifier region = 1;
@@ -314,4 +315,12 @@ service AdminService {
 
   rpc GetRegionLoad(GetRegionLoadRequest)
 returns(GetRegionLoadResponse);
+
+  /** Fetches the RegionServer's view of space quotas */
+  rpc GetSpaceQuotaSnapshots(GetSpaceQuotaSnapshotsRequest)
+returns(GetSpaceQuotaSnapshotsResponse);
+
+  /** Fetches the RegionServer's space quota active enforcements */
+  rpc GetSpaceQuotaEnforcements(GetSpaceQuotaEnforcementsRequest)
+returns(GetSpaceQuotaEnforcementsResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 4e856c8..58e6f77 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -929,4 +929,8 @@ service MasterService {
   /** Unmark a list of ServerNames marked as draining. */
   rpc removeDrainFromRegionServers(RemoveDrainFromRegionServersRequest)
 returns(RemoveDrainFromRegionServersResponse);
+
+  /** Fetches the Master's view of space quotas */
+  rpc GetSpaceQuotaRegionSizes(GetSpaceQuotaRegionSizesRequest)
+returns(GetSpaceQuotaRegionSizesResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 597b059..2d7e5f5 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -111,3 +111,38 @@ message SpaceQuotaSnapshot {
   optional uint64 usage = 2;
   optional uint64 limit = 3;
 }
+
+message GetSpaceQuotaRegionSizesRequest {
+}
+
+message GetSpaceQuotaRegionSizesResponse {
+  message RegionSizes {
+optional TableName table_name = 1;
+optional uint64 size = 2;
+  }
+  repeated RegionSizes sizes = 1;
+}
+
+message GetSpaceQuotaSnapshotsRequest {
+}
+
+message GetSpaceQuotaSnapshotsResponse {
+  // Cannot use TableName as a map key, do the repeated nested message by hand.
+  message TableQuotaSnapshot {
+optional TableName table_name = 1;
+optional SpaceQuotaSnapshot snapshot = 2;
+  }
+  repeated TableQuotaSnapshot snapshots = 1;
+}
+
+message GetSpaceQuotaEnforcementsRequest {
+}
+
+message GetSpaceQuotaEnforcementsResponse {
+  // Cannot use TableName as a map key, do the repeated nested message by hand.
+  message TableViolationPolicy {
+optional TableName table_name = 1;
+optional SpaceViolationPolicy violation_policy = 2;
+  }
+  repeated TableViolationPolicy violation_policies = 1;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index a7a2f94..53560e9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -21,8 +21,11 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -211,6 +214,9 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.gene

[33/54] [abbrv] hbase git commit: HBASE-16999 Implement master and regionserver synchronization of quota state

2017-03-20 Thread elserj
HBASE-16999 Implement master and regionserver synchronization of quota state

* Implement the RegionServer reading violation from the quota table
* Implement the Master reporting violations to the quota table
* RegionServers need to track its enforced policies


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c544ed1f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c544ed1f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c544ed1f

Branch: refs/heads/HBASE-16961
Commit: c544ed1f556b3458287bb68cd0fa2a0233f6faa6
Parents: a19a95d
Author: Josh Elser 
Authored: Fri Nov 18 15:38:19 2016 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:02:49 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaTableUtil.java |  92 -
 .../org/apache/hadoop/hbase/master/HMaster.java |  35 +++-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |   5 +-
 .../hbase/quotas/RegionServerQuotaManager.java  | 200 ---
 .../quotas/RegionServerRpcQuotaManager.java | 200 +++
 .../quotas/RegionServerSpaceQuotaManager.java   | 169 
 .../quotas/SpaceQuotaViolationNotifier.java |  16 +-
 .../SpaceQuotaViolationNotifierFactory.java |  62 ++
 .../SpaceQuotaViolationNotifierForTest.java |   4 +
 ...SpaceQuotaViolationPolicyRefresherChore.java | 154 ++
 .../TableSpaceQuotaViolationNotifier.java   |  55 +
 .../hbase/regionserver/HRegionServer.java   |  21 +-
 .../hbase/regionserver/RSRpcServices.java   |   7 +-
 .../regionserver/RegionServerServices.java  |  12 +-
 .../hadoop/hbase/MockRegionServerServices.java  |  10 +-
 .../hadoop/hbase/master/MockRegionServer.java   |  10 +-
 .../TestQuotaObserverChoreWithMiniCluster.java  |   2 +
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |  47 +
 .../hadoop/hbase/quotas/TestQuotaThrottle.java  |   4 +-
 .../TestRegionServerSpaceQuotaManager.java  | 127 
 ...SpaceQuotaViolationPolicyRefresherChore.java | 131 
 .../TestTableSpaceQuotaViolationNotifier.java   | 144 +
 22 files changed, 1281 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c544ed1f/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 1640ddc..505e94b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -23,16 +23,20 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -43,7 +47,12 @@ import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.filter.RegexStringComparator;
 import org.apache.hadoop.hbase.filter.RowFilter;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Strings;
 
@@ -52,9 +61,8 @@ import org.apache.hadoop.hbase.util.Strings;
  * 
  * ROW-KEY  FAM/QUALDATA
  *   n. q:s 
- *   n. u:du
  *   t. q:s 
- *   t.
u:du + * t.
u:v * u. q:s * u

[34/54] [abbrv] hbase git commit: HBASE-17259 API to remove space quotas on a table/namespace

HBASE-17259 API to remove space quotas on a table/namespace


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/187a43f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/187a43f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/187a43f1

Branch: refs/heads/HBASE-16961
Commit: 187a43f10a11b2441b89c554cae4508e92c17853
Parents: b99e9cf
Author: Josh Elser 
Authored: Wed Jan 11 12:47:06 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:02:54 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  |  22 +++
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   6 +-
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  44 -
 .../hbase/quotas/TestQuotaSettingsFactory.java  |  20 +++
 .../shaded/protobuf/generated/QuotaProtos.java  | 157 +++---
 .../src/main/protobuf/Quota.proto   |   1 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 159 ---
 hbase-protocol/src/main/protobuf/Quota.proto|   1 +
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   9 +-
 .../hadoop/hbase/quotas/TestQuotaAdmin.java |  49 +-
 10 files changed, 423 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/187a43f1/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 657554a..25ee782 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -318,6 +318,17 @@ public class QuotaSettingsFactory {
   }
 
   /**
+   * Creates a {@link QuotaSettings} object to remove the FileSystem space 
quota for the given
+   * table.
+   *
+   * @param tableName The name of the table to remove the quota for.
+   * @return A {@link QuotaSettings} object.
+   */
+  public static QuotaSettings removeTableSpaceLimit(TableName tableName) {
+return new SpaceLimitSettings(tableName, true);
+  }
+
+  /**
* Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given
* namespace to the given size in bytes. When the space usage is exceeded by 
all tables in the
* namespace, the provided {@link SpaceViolationPolicy} is enacted on all 
tables in the namespace.
@@ -331,4 +342,15 @@ public class QuotaSettingsFactory {
   final String namespace, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
   }
+
+  /**
+   * Creates a {@link QuotaSettings} object to remove the FileSystem space 
quota for the given
+* namespace.
+   *
+   * @param namespace The namespace to remove the quota on.
+   * @return A {@link QuotaSettings} object.
+   */
+  public static QuotaSettings removeNamespaceSpaceLimit(String namespace) {
+return new SpaceLimitSettings(namespace, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/187a43f1/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 42da811..ca525f0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -390,7 +390,11 @@ public class QuotaTableUtil {
 boolean hasSettings = false;
 hasSettings |= quotas.hasThrottle();
 hasSettings |= quotas.hasBypassGlobals();
-hasSettings |= quotas.hasSpace();
+// Only when there is a space quota, make sure there's actually both 
fields provided
+// Otherwise, it's a noop.
+if (quotas.hasSpace()) {
+  hasSettings |= (quotas.getSpace().hasSoftLimit() && 
quotas.getSpace().hasViolationPolicy());
+}
 return !hasSettings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/187a43f1/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
index e54882e..8ff7623 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/h

[51/54] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

HBASE-17002 JMX metrics and some UI additions for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d1558c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d1558c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d1558c8

Branch: refs/heads/HBASE-16961
Commit: 6d1558c859ebf2986bec6864158cf53e6f54bce9
Parents: 10865b5
Author: Josh Elser 
Authored: Wed Feb 15 14:24:57 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:31:28 2017 -0400

--
 .../hbase/client/ConnectionImplementation.java  |8 +
 .../hadoop/hbase/client/QuotaStatusCalls.java   |   39 +-
 .../client/ShortCircuitMasterConnection.java|8 +
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   41 +
 .../hbase/shaded/protobuf/RequestConverter.java |   11 +
 .../hbase/master/MetricsMasterQuotaSource.java  |   75 +
 .../master/MetricsMasterQuotaSourceFactory.java |   26 +
 .../hbase/master/MetricsMasterWrapper.java  |   13 +
 .../MetricsRegionServerQuotaSource.java |   54 +
 .../MetricsMasterQuotaSourceFactoryImpl.java|   36 +
 .../master/MetricsMasterQuotaSourceImpl.java|  129 +
 ...hadoop.hbase.master.MetricsMasterQuotaSource |   18 +
 ...hbase.master.MetricsMasterQuotaSourceFactory |   18 +
 .../shaded/protobuf/generated/MasterProtos.java |   93 +-
 .../shaded/protobuf/generated/QuotaProtos.java  | 3099 +-
 .../src/main/protobuf/Master.proto  |6 +-
 .../src/main/protobuf/Quota.proto   |   17 +
 .../org/apache/hadoop/hbase/master/HMaster.java |2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   38 +
 .../hadoop/hbase/master/MetricsMaster.java  |   42 +
 .../hbase/master/MetricsMasterWrapperImpl.java  |   42 +-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |   92 +-
 .../resources/hbase-webapps/master/table.jsp|   59 +
 .../hbase/master/TestMasterMetricsWrapper.java  |   17 +
 .../hbase/quotas/TestQuotaStatusRPCs.java   |   83 +
 25 files changed, 4032 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 2024a17..c6adba4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -94,6 +94,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
@@ -1740,6 +1742,12 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   throws ServiceException {
 return stub.getSpaceQuotaRegionSizes(controller, request);
   }
+
+  @Override
+  public GetQuotaStatesResponse getQuotaStates(
+  RpcController controller, GetQuotaStatesRequest request) throws 
ServiceException {
+return stub.getQuotaStates(controller, request);
+  }
 };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
index f0f385d..af36d1e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.Re

[39/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index cc40536..d466e59 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -5778,6 +5778,1284 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest)
   }
 
+  public interface SpaceQuotaStatusOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional .hbase.pb.SpaceViolationPolicy policy = 1;
+/**
+ * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ */
+boolean hasPolicy();
+/**
+ * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy 
getPolicy();
+
+// optional bool in_violation = 2;
+/**
+ * optional bool in_violation = 2;
+ */
+boolean hasInViolation();
+/**
+ * optional bool in_violation = 2;
+ */
+boolean getInViolation();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SpaceQuotaStatus}
+   *
+   * 
+   * Represents the state of a quota on a table. Either the quota is not in 
violation
+   * or it is in violatino there is a violation policy which should be in 
effect.
+   * 
+   */
+  public static final class SpaceQuotaStatus extends
+  com.google.protobuf.GeneratedMessage
+  implements SpaceQuotaStatusOrBuilder {
+// Use SpaceQuotaStatus.newBuilder() to construct.
+private SpaceQuotaStatus(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SpaceQuotaStatus(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SpaceQuotaStatus defaultInstance;
+public static SpaceQuotaStatus getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SpaceQuotaStatus getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SpaceQuotaStatus(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  int rawValue = input.readEnum();
+  
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy 
value = 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue);
+  if (value == null) {
+unknownFields.mergeVarintField(1, rawValue);
+  } else {
+bitField0_ |= 0x0001;
+policy_ = value;
+  }
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  inViolation_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuotaStatus_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache

[41/54] [abbrv] hbase git commit: HBASE-17025 Add shell commands for space quotas

HBASE-17025 Add shell commands for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50effd66
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50effd66
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50effd66

Branch: refs/heads/HBASE-16961
Commit: 50effd66005a1c5e0dda7af853fda92a7d9a123f
Parents: 187a43f
Author: Josh Elser 
Authored: Wed Jan 11 11:55:29 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:02:55 2017 -0400

--
 hbase-shell/src/main/ruby/hbase/quotas.rb   |  62 -
 hbase-shell/src/main/ruby/hbase_constants.rb|   1 +
 .../src/main/ruby/shell/commands/set_quota.rb   |  45 +-
 .../hadoop/hbase/client/AbstractTestShell.java  |   1 +
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  | 137 +++
 hbase-shell/src/test/ruby/tests_runner.rb   |   1 +
 6 files changed, 242 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/50effd66/hbase-shell/src/main/ruby/hbase/quotas.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb 
b/hbase-shell/src/main/ruby/hbase/quotas.rb
index bf2dc63..d99fe72 100644
--- a/hbase-shell/src/main/ruby/hbase/quotas.rb
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -24,14 +24,22 @@ java_import org.apache.hadoop.hbase.quotas.ThrottleType
 java_import org.apache.hadoop.hbase.quotas.QuotaFilter
 java_import org.apache.hadoop.hbase.quotas.QuotaRetriever
 java_import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory
+java_import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 
 module HBaseQuotasConstants
+  # RPC Quota constants
   GLOBAL_BYPASS = 'GLOBAL_BYPASS'
   THROTTLE_TYPE = 'THROTTLE_TYPE'
   THROTTLE = 'THROTTLE'
   REQUEST = 'REQUEST'
   WRITE = 'WRITE'
   READ = 'READ'
+  # Space quota constants
+  SPACE = 'SPACE'
+  NO_INSERTS = 'NO_INSERTS'
+  NO_WRITES = 'NO_WRITES'
+  NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'
+  DISABLE = 'DISABLE'
 end
 
 module Hbase
@@ -107,6 +115,54 @@ module Hbase
   @admin.setQuota(settings)
 end
 
+def limit_space(args)
+  raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? 
and args.kind_of?(Hash))
+  # Let the user provide a raw number
+  if args[LIMIT].is_a?(Numeric)
+limit = args[LIMIT]
+  else
+# Parse a string a 1K, 2G, etc.
+limit = _parse_size(args[LIMIT])
+  end
+  # Extract the policy, failing if something bogus was provided
+  policy = SpaceViolationPolicy.valueOf(args[POLICY])
+  # Create a table or namespace quota
+  if args.key?(TABLE)
+if args.key?(NAMESPACE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.limitTableSpace(TableName.valueOf(args.delete(TABLE)), 
limit, policy)
+  elsif args.key?(NAMESPACE)
+if args.key?(TABLE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.limitNamespaceSpace(args.delete(NAMESPACE), limit, policy)
+  else
+raise(ArgumentError, 'One of TABLE or NAMESPACE must be specified.')
+  end
+  # Apply the quota
+  @admin.setQuota(settings)
+end
+
+def remove_space_limit(args)
+  raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? 
and args.kind_of?(Hash))
+  if args.key?(TABLE)
+if args.key?(NAMESPACE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+table = TableName.valueOf(args.delete(TABLE))
+settings = QuotaSettingsFactory.removeTableSpaceLimit(table)
+  elsif args.key?(NAMESPACE)
+if args.key?(TABLE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.removeNamespaceSpaceLimit(args.delete(NAMESPACE))
+  else
+raise(ArgumentError, 'One of TABLE or NAMESPACE must be specified.')
+  end
+  @admin.setQuota(settings)
+end
+
 def set_global_bypass(bypass, args)
   raise(ArgumentError, "Arguments should be a Hash") unless 
args.kind_of?(Hash)
 
@@ -171,7 +227,7 @@ module Hbase
   return _size_from_str(match[1].to_i, match[2])
 end
   else
-raise "Invalid size limit syntax"
+raise(ArgumentError, "Invalid size limit syntax")
   end
 end
 
@@ -188,7 +244,7 @@ module Hbase
 end
 
 if limit <= 0
-  raise "Invalid throttle limit, must be greater then 0"
+  raise(ArgumentError, "Invalid throttle limit, must be greater then 
0")
 end
 
 case match[3]
@@ -200,7 +256,7 @@ modul

[50/54] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index f207472..d88efdc 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -13024,6 +13024,3031 @@ public final class QuotaProtos {
 
   }
 
+  public interface GetQuotaStatesRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.GetQuotaStatesRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetQuotaStatesRequest}
+   */
+  public  static final class GetQuotaStatesRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.GetQuotaStatesRequest)
+  GetQuotaStatesRequestOrBuilder {
+// Use GetQuotaStatesRequest.newBuilder() to construct.
+private 
GetQuotaStatesRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private GetQuotaStatesRequest() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private GetQuotaStatesRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetQuotaStatesRequest_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetQuotaStatesRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+public int getSerializedSize() {
+  int size = memoizedSize;
+  if (size != -1) return size;
+
+  size = 0;
+  size += unknownFields.getSerializedSize();
+  memoizedSize = size;
+  return size;
+}
+
+private static final long serialVersionUID = 0L;
+@java.lang.Override
+public boolean equals(final java.lang.Object obj) {
+  if (obj == this) {
+   return true;
+ 

[08/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 52b0ce5..bb6b40e 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -42223,7 +42223,7 @@ public final class MasterProtos {
* required .hbase.pb.SnapshotDescription snapshot = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
 
   getSnapshotFieldBuilder() {
 if (snapshotBuilder_ == null) {
   snapshotBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -70510,7 +70510,7 @@ public final class MasterProtos {
 
   /**
* 
-   ** Get a run of the CleanerChore
+   ** Get a run of the CleanerChore 
* 
*
* rpc RunCleanerChore(.hbase.pb.RunCleanerChoreRequest) returns 
(.hbase.pb.RunCleanerChoreResponse);
@@ -72424,7 +72424,7 @@ public final class MasterProtos {
 
 /**
  * 
- ** Get a run of the CleanerChore
+ ** Get a run of the CleanerChore 
  * 
  *
  * rpc RunCleanerChore(.hbase.pb.RunCleanerChoreRequest) returns 
(.hbase.pb.RunCleanerChoreResponse);
@@ -76186,32 +76186,32 @@ public final class MasterProtos {
   
internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_RunCleanerChoreRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_RunCleanerChoreRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_RunCleanerChoreResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_RunCleanerChoreResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_SetCleanerChoreRunningRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_SetCleanerChoreRunningRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_SetCleanerChoreRunningResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_SetCleanerChoreRunningResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_IsCleanerChoreEnabledRequest_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_IsCleanerChoreEnabledRequest_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_IsCleanerChoreEnabledResponse_descriptor;
-  private static final
+  private static final 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   
internal_static_hbase_pb_IsCleanerChoreEnabledResponse_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-protocol-shaded/src/main/protobuf/Backup.proto
--

[28/54] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

HBASE-17000 Implement computation of online region sizes and report to the 
Master

Includes a trivial implementation of the Master-side collection to
avoid. Only enough to write a test to verify RS collection.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e042231
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e042231
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e042231

Branch: refs/heads/HBASE-16961
Commit: 6e0422319c570c1d7428d76383e0b54d6f8afc8c
Parents: 7271549
Author: Josh Elser 
Authored: Mon Nov 7 13:46:42 2016 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 17:45:33 2017 -0400

--
 .../generated/RegionServerStatusProtos.java | 2071 +-
 .../src/main/protobuf/RegionServerStatus.proto  |   22 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   19 +
 .../quotas/FileSystemUtilizationChore.java  |  205 ++
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   15 +
 .../hbase/regionserver/HRegionServer.java   |   72 +
 .../quotas/TestFileSystemUtilizationChore.java  |  357 +++
 .../hadoop/hbase/quotas/TestRegionSizeUse.java  |  194 ++
 .../TestRegionServerRegionSpaceUseReport.java   |   99 +
 9 files changed, 3032 insertions(+), 22 deletions(-)
--




[53/54] [abbrv] hbase git commit: HBASE-17794 Swap "violation" for "snapshot" where appropriate

HBASE-17794 Swap "violation" for "snapshot" where appropriate

A couple of variables and comments in which violation is incorrectly
used to describe what the code is doing. This was a hold over from early
implementation -- need to scrub these out for clarity.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6238590a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6238590a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6238590a

Branch: refs/heads/HBASE-16961
Commit: 6238590a653153da8928133bba1a8f4e3c5f7a75
Parents: 201da14
Author: Josh Elser 
Authored: Thu Mar 16 19:26:14 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 18:31:30 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java| 4 ++--
 hbase-protocol-shaded/src/main/protobuf/Quota.proto| 2 +-
 .../org/apache/hadoop/hbase/quotas/QuotaObserverChore.java | 6 +++---
 .../apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java| 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6238590a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index ed5da95..725f170 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -227,7 +227,7 @@ public class QuotaTableUtil {
   }
 
   /**
-   * Creates a {@link Scan} which returns only quota violations from the quota 
table.
+   * Creates a {@link Scan} which returns only quota snapshots from the quota 
table.
*/
   public static Scan makeQuotaSnapshotScan() {
 Scan s = new Scan();
@@ -245,7 +245,7 @@ public class QuotaTableUtil {
* will throw an {@link IllegalArgumentException}.
*
* @param result A row from the quota table.
-   * @param snapshots A map of violations to add the result of this method 
into.
+   * @param snapshots A map of snapshots to add the result of this method into.
*/
   public static void extractQuotaSnapshot(
   Result result, Map snapshots) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6238590a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 1a6d5ed..364c58b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -98,7 +98,7 @@ message SpaceLimitRequest {
 }
 
 // Represents the state of a quota on a table. Either the quota is not in 
violation
-// or it is in violatino there is a violation policy which should be in effect.
+// or it is in violation there is a violation policy which should be in effect.
 message SpaceQuotaStatus {
   optional SpaceViolationPolicy policy = 1;
   optional bool in_violation = 2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6238590a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 94c5c87..254f2a1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -532,9 +532,9 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given table.
+   * Stores the quota state for the given table.
*/
-  void setTableQuotaViolation(TableName table, SpaceQuotaSnapshot snapshot) {
+  void setTableQuotaSnapshot(TableName table, SpaceQuotaSnapshot snapshot) {
 this.tableQuotaSnapshots.put(table, snapshot);
   }
 
@@ -552,7 +552,7 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given namespace.
+   * Stores the quota state for the given namespace.
*/
   void setNamespaceQuotaSnapshot(String namespace, SpaceQuotaSnapshot 
snapshot) {
 this.namespaceQuotaSnapshots.put(namespace, snapshot);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6238590a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java
---

[24/54] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes line lengths (Josh Elser)

HBASE-16995 Build client Java API and client protobuf messages - addendum fixes 
line lengths (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9684493c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9684493c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9684493c

Branch: refs/heads/HBASE-16961
Commit: 9684493c6fa9ea302dc7913668ec3d953dd3bfc8
Parents: d688893
Author: tedyu 
Authored: Mon Nov 21 13:00:27 2016 -0800
Committer: Josh Elser 
Committed: Mon Mar 20 17:39:21 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  | 20 
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  8 
 .../hbase/shaded/protobuf/ProtobufUtil.java |  9 +
 3 files changed, 21 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9684493c/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index a91163f..657554a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -130,7 +130,8 @@ public class QuotaSettingsFactory {
 
   static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota 
protoQuota) {
 if ((null == table && null == namespace) || (null != table && null != 
namespace)) {
-  throw new IllegalArgumentException("Can only construct 
SpaceLimitSettings for a table or namespace.");
+  throw new IllegalArgumentException(
+  "Can only construct SpaceLimitSettings for a table or namespace.");
 }
 if (null != table) {
   return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
@@ -302,29 +303,32 @@ public class QuotaSettingsFactory {
*/
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table to the given size in bytes.
-   * When the space usage is exceeded by the table, the provided {@link 
SpaceViolationPolicy} is enacted on the table.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table
+   * to the given size in bytes. When the space usage is exceeded by the 
table, the provided
+   * {@link SpaceViolationPolicy} is enacted on the table.
*
* @param tableName The name of the table on which the quota should be 
applied.
* @param sizeLimit The limit of a table's size in bytes.
* @param violationPolicy The action to take when the quota is exceeded.
* @return An {@link QuotaSettings} object.
*/
-  public static QuotaSettings limitTableSpace(final TableName tableName, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitTableSpace(
+  final TableName tableName, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
   }
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given namespace to the given size in bytes.
-   * When the space usage is exceeded by all tables in the namespace, the 
provided {@link SpaceViolationPolicy} is enacted on
-   * all tables in the namespace.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given
+   * namespace to the given size in bytes. When the space usage is exceeded by 
all tables in the
+   * namespace, the provided {@link SpaceViolationPolicy} is enacted on all 
tables in the namespace.
*
* @param namespace The namespace on which the quota should be applied.
* @param sizeLimit The limit of the namespace's size in bytes.
* @param violationPolicy The action to take when the the quota is exceeded.
* @return An {@link QuotaSettings} object.
*/
-  public static QuotaSettings limitNamespaceSpace(final String namespace, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitNamespaceSpace(
+  final String namespace, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9684493c/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
 
b/hbase-client/src/main/java/org/apa

[49/54] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 58e6f77..90a7f07 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -930,7 +930,11 @@ service MasterService {
   rpc removeDrainFromRegionServers(RemoveDrainFromRegionServersRequest)
 returns(RemoveDrainFromRegionServersResponse);
 
-  /** Fetches the Master's view of space quotas */
+  /** Fetches the Master's view of space utilization */
   rpc GetSpaceQuotaRegionSizes(GetSpaceQuotaRegionSizesRequest)
 returns(GetSpaceQuotaRegionSizesResponse);
+
+  /** Fetches the Master's view of quotas */
+  rpc GetQuotaStates(GetQuotaStatesRequest)
+returns(GetQuotaStatesResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 2d7e5f5..1a6d5ed 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -119,6 +119,7 @@ message GetSpaceQuotaRegionSizesResponse {
   message RegionSizes {
 optional TableName table_name = 1;
 optional uint64 size = 2;
+
   }
   repeated RegionSizes sizes = 1;
 }
@@ -146,3 +147,19 @@ message GetSpaceQuotaEnforcementsResponse {
   }
   repeated TableViolationPolicy violation_policies = 1;
 }
+
+message GetQuotaStatesRequest {
+}
+
+message GetQuotaStatesResponse {
+  message TableQuotaSnapshot {
+optional TableName table_name = 1;
+optional SpaceQuotaSnapshot snapshot = 2;
+  }
+  message NamespaceQuotaSnapshot {
+optional string namespace = 1;
+optional SpaceQuotaSnapshot snapshot = 2;
+  }
+  repeated TableQuotaSnapshot table_snapshots = 1;
+  repeated NamespaceQuotaSnapshot ns_snapshots = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index bc4987a..6cfddaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -907,7 +907,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   // Create the quota snapshot notifier
   spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
   spaceQuotaSnapshotNotifier.initialize(getClusterConnection());
-  this.quotaObserverChore = new QuotaObserverChore(this);
+  this.quotaObserverChore = new QuotaObserverChore(this, 
getMasterMetrics());
   // Start the chore to read the region FS space reports and act on them
   getChoreService().scheduleChore(quotaObserverChore);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1558c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index eb2711c..1c4abc0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -62,7 +62,9 @@ import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -214,8 +216,12 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesReques

[15/54] [abbrv] hbase git commit: HBASE-17691 Add ScanMetrics support for async scan

HBASE-17691 Add ScanMetrics support for async scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b4bb821
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b4bb821
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b4bb821

Branch: refs/heads/HBASE-16961
Commit: 5b4bb8217dd4327a89fa29c93ac37bc887d96c2c
Parents: 7c03a21
Author: zhangduo 
Authored: Mon Mar 20 17:12:53 2017 +0800
Committer: zhangduo 
Committed: Mon Mar 20 20:54:04 2017 +0800

--
 .../hadoop/hbase/client/AsyncClientScanner.java |  34 +++-
 .../client/AsyncRpcRetryingCallerFactory.java   |  24 ++-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 ++--
 .../hadoop/hbase/client/AsyncTableBase.java |   9 +-
 .../hadoop/hbase/client/AsyncTableImpl.java |   1 +
 .../hbase/client/AsyncTableResultScanner.java   |   9 +-
 .../hadoop/hbase/client/ClientScanner.java  |   8 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  75 +
 .../hbase/client/RawScanResultConsumer.java |  10 ++
 .../hbase/client/ReversedScannerCallable.java   |  10 +-
 .../hadoop/hbase/client/ScanResultConsumer.java |   9 ++
 .../hadoop/hbase/client/ScannerCallable.java|  88 ++
 .../client/SimpleRawScanResultConsumer.java |  84 ++
 .../hbase/client/SimpleScanResultConsumer.java  |  75 +
 .../hadoop/hbase/client/TestAsyncTableScan.java |  42 -
 .../hbase/client/TestAsyncTableScanMetrics.java | 159 +++
 .../hbase/client/TestRawAsyncTableScan.java |  52 --
 17 files changed, 526 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b4bb821/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index fa7aa81..2c1693d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.*;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
@@ -29,6 +28,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
@@ -51,6 +51,8 @@ class AsyncClientScanner {
   // AsyncScanSingleRegionRpcRetryingCaller will modify this scan object 
directly.
   private final Scan scan;
 
+  private final ScanMetrics scanMetrics;
+
   private final RawScanResultConsumer consumer;
 
   private final TableName tableName;
@@ -88,29 +90,46 @@ class AsyncClientScanner {
 this.rpcTimeoutNs = rpcTimeoutNs;
 this.startLogErrorsCnt = startLogErrorsCnt;
 this.resultCache = createScanResultCache(scan);
+if (scan.isScanMetricsEnabled()) {
+  this.scanMetrics = new ScanMetrics();
+  consumer.onScanMetricsCreated(scanMetrics);
+} else {
+  this.scanMetrics = null;
+}
   }
 
   private static final class OpenScannerResponse {
 
 public final HRegionLocation loc;
 
+public final boolean isRegionServerRemote;
+
 public final ClientService.Interface stub;
 
 public final HBaseRpcController controller;
 
 public final ScanResponse resp;
 
-public OpenScannerResponse(HRegionLocation loc, Interface stub, 
HBaseRpcController controller,
-ScanResponse resp) {
+public OpenScannerResponse(HRegionLocation loc, boolean 
isRegionServerRemote, Interface stub,
+HBaseRpcController controller, ScanResponse resp) {
   this.loc = loc;
+  this.isRegionServerRemote = isRegionServerRemote;
   this.stub = stub;
   this.controller = controller;
   this.resp = resp;
 }
   }
 
+  private int openScannerTries;
+
   private CompletableFuture 
callOpenScanner(HBaseRpcController controller,
   HRegionLocation loc, ClientService.Interface stub) {
+boolean isRegionServerRemote

[21/54] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

http://git-wip-us.apache.org/repos/asf/hbase/blob/87fa04cc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index d14336a..a715115 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -239,12 +239,20 @@ public final class QuotaProtos {
  * THROTTLE = 1;
  */
 THROTTLE(1),
+/**
+ * SPACE = 2;
+ */
+SPACE(2),
 ;
 
 /**
  * THROTTLE = 1;
  */
 public static final int THROTTLE_VALUE = 1;
+/**
+ * SPACE = 2;
+ */
+public static final int SPACE_VALUE = 2;
 
 
 public final int getNumber() {
@@ -262,6 +270,7 @@ public final class QuotaProtos {
 public static QuotaType forNumber(int value) {
   switch (value) {
 case 1: return THROTTLE;
+case 2: return SPACE;
 default: return null;
   }
 }
@@ -311,6 +320,150 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * 
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+   *
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   */
+  public enum SpaceViolationPolicy
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * 
+ * Disable the table(s)
+ * 
+ *
+ * DISABLE = 1;
+ */
+DISABLE(1),
+/**
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ *
+ * NO_WRITES_COMPACTIONS = 2;
+ */
+NO_WRITES_COMPACTIONS(2),
+/**
+ * 
+ * No writes or bulk-loads
+ * 
+ *
+ * NO_WRITES = 3;
+ */
+NO_WRITES(3),
+/**
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ *
+ * NO_INSERTS = 4;
+ */
+NO_INSERTS(4),
+;
+
+/**
+ * 
+ * Disable the table(s)
+ * 
+ *
+ * DISABLE = 1;
+ */
+public static final int DISABLE_VALUE = 1;
+/**
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ *
+ * NO_WRITES_COMPACTIONS = 2;
+ */
+public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+/**
+ * 
+ * No writes or bulk-loads
+ * 
+ *
+ * NO_WRITES = 3;
+ */
+public static final int NO_WRITES_VALUE = 3;
+/**
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ *
+ * NO_INSERTS = 4;
+ */
+public static final int NO_INSERTS_VALUE = 4;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static SpaceViolationPolicy valueOf(int value) {
+  return forNumber(value);
+}
+
+public static SpaceViolationPolicy forNumber(int value) {
+  switch (value) {
+case 1: return DISABLE;
+case 2: return NO_WRITES_COMPACTIONS;
+case 3: return NO_WRITES;
+case 4: return NO_INSERTS;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+SpaceViolationPolicy> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public SpaceViolationPolicy findValueByNumber(int number) {
+  return SpaceViolationPolicy.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+}
+
+private static final SpaceViolationPolicy[] VALUES = values();
+
+public static SpaceViolationPolicy valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalAr

[32/54] [abbrv] hbase git commit: HBASE-16999 Implement master and regionserver synchronization of quota state

http://git-wip-us.apache.org/repos/asf/hbase/blob/c544ed1f/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
new file mode 100644
index 000..4a7000c
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.ArgumentMatcher;
+
+/**
+ * Test case for {@link TableSpaceQuotaViolationNotifier}.
+ */
+@Category(SmallTests.class)
+public class TestTableSpaceQuotaViolationNotifier {
+
+  private TableSpaceQuotaViolationNotifier notifier;
+  private Connection conn;
+
+  @Before
+  public void setup() throws Exception {
+notifier = new TableSpaceQuotaViolationNotifier();
+conn = mock(Connection.class);
+notifier.initialize(conn);
+  }
+
+  @Test
+  public void testToViolation() throws Exception {
+final TableName tn = TableName.valueOf("inviolation");
+final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_INSERTS;
+final Table quotaTable = mock(Table.class);
+
when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable);
+
+final Put expectedPut = new Put(Bytes.toBytes("t." + 
tn.getNameAsString()));
+final SpaceQuota protoQuota = SpaceQuota.newBuilder()
+.setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(policy))
+.build();
+expectedPut.addColumn(Bytes.toBytes("u"), Bytes.toBytes("v"), 
protoQuota.toByteArray());
+
+notifier.transitionTableToViolation(tn, policy);
+
+verify(quotaTable).put(argThat(new SingleCellPutMatcher(expectedPut)));
+  }
+
+  @Test
+  public void testToObservance() throws Exception {
+final TableName tn = TableName.valueOf("notinviolation");
+final Table quotaTable = mock(Table.class);
+
when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable);
+
+final Delete expectedDelete = new Delete(Bytes.toBytes("t." + 
tn.getNameAsString()));
+expectedDelete.addColumn(Bytes.toBytes("u"), Bytes.toBytes("v"));
+
+notifier.transitionTableToObservance(tn);
+
+verify(quotaTable).delete(argThat(new 
SingleCellDeleteMatcher(expectedDelete)));
+  }
+
+  /**
+   * Parameterized for Puts.
+   */
+  private static class SingleCellPutMatcher extends 
SingleCellMutationMatcher {
+private SingleCellPutMatcher(Put expected) {
+  super(expected);
+}
+  }
+
+  /**
+   * Parameterized for Deletes.
+   */
+  private static class SingleCellDeleteMatcher extends 
SingleCellMutationMatcher {
+private SingleCellDeleteMatcher(Delete expected) {
+  super(expected);
+}
+  }
+
+  /**
+   * Quick hack to verify a Mutation with one column.
+   */
+  private static class SingleCellMutationMatcher extends ArgumentMatcher 
{
+private final Mutation expected;
+
+private SingleCellMutationMatcher(Mutation expected) {
+  this.

[16/54] [abbrv] hbase git commit: HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch (Guangxu Cheng)

HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch 
(Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4088f822
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4088f822
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4088f822

Branch: refs/heads/HBASE-16961
Commit: 4088f822a449acc39c2408a287f820ec26acabf4
Parents: 5b4bb82
Author: tedyu 
Authored: Mon Mar 20 09:26:34 2017 -0700
Committer: tedyu 
Committed: Mon Mar 20 09:26:34 2017 -0700

--
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java |  3 +++
 .../apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 35563c5..9219c23 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -132,6 +132,9 @@ public class RSGroupInfo {
 sb.append(", ");
 sb.append(" Servers:");
 sb.append(this.servers);
+sb.append(", ");
+sb.append(" Tables:");
+sb.append(this.tables);
 return sb.toString();
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4088f822/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index e8cdb78..e5c89c3 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -694,6 +694,7 @@ public abstract class TestRSGroupsBase {
 
   @Test
   public void testMoveServersAndTables() throws Exception {
+LOG.info("testMoveServersAndTables");
 final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 
1);
 //create table
 final byte[] familyNameBytes = Bytes.toBytes("f");
@@ -718,6 +719,12 @@ public abstract class TestRSGroupsBase {
   }
 }
 
+LOG.debug("Print group info : " + rsGroupAdmin.listRSGroups());
+int oldDefaultGroupServerSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size();
+int oldDefaultGroupTableSize =
+
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size();
+
 //test fail bogus server move
 try {
   
rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:")),
@@ -742,9 +749,9 @@ public abstract class TestRSGroupsBase {
 }
 
 //verify default group info
-Assert.assertEquals(3,
+Assert.assertEquals(oldDefaultGroupServerSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size());
-Assert.assertEquals(4,
+Assert.assertEquals(oldDefaultGroupTableSize,
 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size());
 
 //verify new group info



[26/54] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e042231/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 1c373ee..23ddd43 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -141,6 +141,22 @@ message SplitTableRegionResponse {
   optional uint64 proc_id = 1;
 }
 
+message RegionSpaceUse {
+  optional RegionInfo region = 1; // A region identifier
+  optional uint64 size = 2; // The size in bytes of the region
+}
+
+/**
+ * Reports filesystem usage for regions.
+ */
+message RegionSpaceUseReportRequest {
+  repeated RegionSpaceUse space_use = 1;
+}
+
+message RegionSpaceUseReportResponse {
+
+}
+
 service RegionServerStatusService {
   /** Called when a region server first starts. */
   rpc RegionServerStartup(RegionServerStartupRequest)
@@ -182,4 +198,10 @@ service RegionServerStatusService {
*/
   rpc getProcedureResult(GetProcedureResultRequest)
 returns(GetProcedureResultResponse);
+
+  /**
+   * Reports Region filesystem space use
+   */
+  rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest)
+returns(RegionSpaceUseReportResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e042231/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index f86f800..f10d4bb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -216,6 +217,9 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
@@ -2006,4 +2010,19 @@ public class MasterRpcServices extends RSRpcServices
   throw new ServiceException(e);
 }
   }
+
+  @Override
+  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController 
controller,
+  RegionSpaceUseReportRequest request) throws ServiceException {
+try {
+  master.checkInitialized();
+  MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
+  for (RegionSpaceUse report : request.getSpaceUseList()) {
+quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());
+  }
+  return RegionSpaceUseReportResponse.newBuilder().build();
+} catch (Exception e) {
+  throw new ServiceException(e);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e042231/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
new file mode 100644
index 000..01540eb
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or mor

[23/54] [abbrv] hbase git commit: HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota table (Josh Elser)

HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota 
table (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72715494
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72715494
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72715494

Branch: refs/heads/HBASE-16961
Commit: 727154949e47d3411f76bc967b0ed687fe672134
Parents: 9684493
Author: tedyu 
Authored: Sat Dec 3 14:30:48 2016 -0800
Committer: Josh Elser 
Committed: Mon Mar 20 17:39:21 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaTableUtil.java |  13 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  30 +
 .../hadoop/hbase/quotas/TestQuotaAdmin.java | 125 ++-
 3 files changed, 165 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/72715494/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 116dd0c..1640ddc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -52,7 +52,9 @@ import org.apache.hadoop.hbase.util.Strings;
  * 
  * ROW-KEY  FAM/QUALDATA
  *   n. q:s 
+ *   n. u:du
  *   t.
q:s + * t.
u:du * u. q:s * u. q:s.
* u. q:s.: @@ -71,6 +73,7 @@ public class QuotaTableUtil { protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s"); protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s."); + protected static final byte[] QUOTA_QUALIFIER_DISKUSAGE = Bytes.toBytes("du"); protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); @@ -298,11 +301,16 @@ public class QuotaTableUtil { * Quotas protobuf helpers */ protected static Quotas quotasFromData(final byte[] data) throws IOException { +return quotasFromData(data, 0, data.length); + } + + protected static Quotas quotasFromData( + final byte[] data, int offset, int length) throws IOException { int magicLen = ProtobufMagic.lengthOfPBMagic(); -if (!ProtobufMagic.isPBMagicPrefix(data, 0, magicLen)) { +if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) { throw new IOException("Missing pb magic prefix"); } -return Quotas.parseFrom(new ByteArrayInputStream(data, magicLen, data.length - magicLen)); +return Quotas.parseFrom(new ByteArrayInputStream(data, offset + magicLen, length - magicLen)); } protected static byte[] quotasToData(final Quotas data) throws IOException { @@ -316,6 +324,7 @@ public class QuotaTableUtil { boolean hasSettings = false; hasSettings |= quotas.hasThrottle(); hasSettings |= quotas.hasBypassGlobals(); +hasSettings |= quotas.hasSpace(); return !hasSettings; } http://git-wip-us.apache.org/repos/asf/hbase/blob/72715494/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 5dab2e3..1469268 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.shaded.protobuf.generated.Quota

[22/54] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/87fa04cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/87fa04cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/87fa04cc

Branch: refs/heads/HBASE-16961
Commit: 87fa04cce2e2701e1a2d425ea89677d97386377c
Parents: 16900c8
Author: tedyu 
Authored: Thu Nov 17 10:19:52 2016 -0800
Committer: Josh Elser 
Committed: Mon Mar 20 17:39:17 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  |   47 +
 .../apache/hadoop/hbase/quotas/QuotaType.java   |1 +
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  166 ++
 .../hbase/quotas/SpaceViolationPolicy.java  |   44 +
 .../hbase/shaded/protobuf/ProtobufUtil.java |   51 +
 .../hbase/quotas/TestQuotaSettingsFactory.java  |  148 ++
 .../hbase/quotas/TestSpaceLimitSettings.java|  119 ++
 .../shaded/protobuf/generated/MasterProtos.java |  584 --
 .../shaded/protobuf/generated/QuotaProtos.java  | 1739 +-
 .../src/main/protobuf/Master.proto  |2 +
 .../src/main/protobuf/Quota.proto   |   21 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 1682 -
 hbase-protocol/src/main/protobuf/Quota.proto|   21 +
 13 files changed, 4291 insertions(+), 334 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/87fa04cc/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 1a8b934..a91163f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRe
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -91,6 +92,9 @@ public class QuotaSettingsFactory {
 if (quotas.getBypassGlobals() == true) {
   settings.add(new QuotaGlobalsSettingsBypass(userName, tableName, 
namespace, true));
 }
+if (quotas.hasSpace()) {
+  settings.add(fromSpace(tableName, namespace, quotas.getSpace()));
+}
 return settings;
   }
 
@@ -124,6 +128,18 @@ public class QuotaSettingsFactory {
 return settings;
   }
 
+  static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota 
protoQuota) {
+if ((null == table && null == namespace) || (null != table && null != 
namespace)) {
+  throw new IllegalArgumentException("Can only construct 
SpaceLimitSettings for a table or namespace.");
+}
+if (null != table) {
+  return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
+} else {
+  // namespace must be non-null
+  return SpaceLimitSettings.fromSpaceQuota(namespace, protoQuota);
+}
+  }
+
   /* ==
*  RPC Throttle
*/
@@ -280,4 +296,35 @@ public class QuotaSettingsFactory {
   public static QuotaSettings bypassGlobals(final String userName, final 
boolean bypassGlobals) {
 return new QuotaGlobalsSettingsBypass(userName, null, null, bypassGlobals);
   }
+
+  /* ==
+   *  FileSystem Space Settings
+   */
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table to the given size in bytes.
+   * When the space usage is exceeded by the table, the provided {@link 
SpaceViolationPolicy} is enacted on the table.
+   *
+   * @param tableName The name of the table on which the quota should be 
applied.
+   * @param sizeLimit The limit of a table's size in bytes.
+   * @param violationPolicy The action to take when the quota is exceeded.
+   * @return An {@link QuotaSettings} object.
+   */
+  public static QuotaSettings limitTableSpace(final TableName tableName, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
+  }
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given namespace to the given size in bytes.
+   * When the space usage is exceeded by all tabl

[13/54] [abbrv] hbase git commit: HBASE-17803 Addendum fix NPE

HBASE-17803 Addendum fix NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/261aa944
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/261aa944
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/261aa944

Branch: refs/heads/HBASE-16961
Commit: 261aa9445c3c52e09c10d06168a77d11d0c9b4b4
Parents: 23abc90
Author: Chia-Ping Tsai 
Authored: Sun Mar 19 19:05:25 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sun Mar 19 19:05:25 2017 +0800

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/261aa944/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 3addb1a..40e50cf 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -43,6 +43,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -325,7 +326,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 // recreate the table when user has requested presplit or when existing
 // {RegionSplitPolicy,replica count} does not match requested.
 if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
-  || (!isReadCmd && desc != null && 
!desc.getRegionSplitPolicyClassName().equals(opts.splitPolicy))
+  || (!isReadCmd && desc != null &&
+  !StringUtils.equals(desc.getRegionSplitPolicyClassName(), 
opts.splitPolicy))
   || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)) {
   needsDelete = true;
   // wait, why did it delete my table?!?



[11/54] [abbrv] hbase git commit: Add Eshcar Hillel to pom file

Add Eshcar Hillel to pom file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b299c138
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b299c138
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b299c138

Branch: refs/heads/HBASE-16961
Commit: b299c1388c499b184c06a7b647c649458f6aa1e0
Parents: 75d0f49
Author: eshcar 
Authored: Sun Mar 19 09:58:51 2017 +0200
Committer: eshcar 
Committed: Sun Mar 19 09:58:51 2017 +0200

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b299c138/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 34e3102..86c98a2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -248,6 +248,12 @@
   -8
 
 
+  eshcar
+  Eshcar Hillel
+  esh...@apache.org
+  +2
+
+
   fenghh
   Honghua Feng
   fen...@apache.org



[18/54] [abbrv] hbase git commit: HBASE-17582 Fix broken drop page cache hint (broken by HBASE-15236).

HBASE-17582 Fix broken drop page cache hint (broken by HBASE-15236).

Change-Id: I2947ab979979f977db7b0c282c4aaf4eb1f26482


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e39e0e63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e39e0e63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e39e0e63

Branch: refs/heads/HBASE-16961
Commit: e39e0e634a2252a352ad799bc2957c72e8d2d2e9
Parents: 55d6dca
Author: Apekshit Sharma 
Authored: Wed Feb 1 23:23:46 2017 -0800
Committer: Apekshit Sharma 
Committed: Mon Mar 20 11:19:51 2017 -0700

--
 .../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e39e0e63/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index ca7dfd4..ab6b0ef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -129,7 +129,7 @@ public class StoreFileScanner implements KeyValueScanner {
 List sorted_files = new ArrayList<>(files);
 Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID);
 for (int i = 0; i < sorted_files.size(); i++) {
-  StoreFileReader r = sorted_files.get(i).createReader();
+  StoreFileReader r = sorted_files.get(i).createReader(canUseDrop);
   r.setReplicaStoreFile(isPrimaryReplica);
   StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, 
isCompaction, readPt,
 i, matcher != null ? !matcher.hasNullColumnInQuery() : false);



[10/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75d0f49d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75d0f49d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75d0f49d

Branch: refs/heads/HBASE-16961
Commit: 75d0f49dcd9761d32a8dedfaa169844822a9e7a5
Parents: 8e5eeb4
Author: tedyu 
Authored: Sat Mar 18 03:04:19 2017 -0700
Committer: tedyu 
Committed: Sat Mar 18 03:04:19 2017 -0700

--
 bin/hbase   |6 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |4 +-
 .../hbase/client/RpcRetryingCallerImpl.java |3 +-
 .../apache/hadoop/hbase/backup/BackupType.java  |   25 +
 .../hadoop/hbase/util/AbstractHBaseTool.java|   18 +-
 .../hbase/IntegrationTestBackupRestore.java |  311 +
 .../shaded/protobuf/generated/BackupProtos.java | 7013 ++
 .../shaded/protobuf/generated/MasterProtos.java |   18 +-
 .../src/main/protobuf/Backup.proto  |  117 +
 hbase-server/pom.xml|   10 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  128 +
 .../hadoop/hbase/backup/BackupCopyJob.java  |   55 +
 .../hadoop/hbase/backup/BackupDriver.java   |  204 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  545 ++
 .../hadoop/hbase/backup/BackupRequest.java  |  139 +
 .../hbase/backup/BackupRestoreConstants.java|  115 +
 .../hbase/backup/BackupRestoreFactory.java  |   66 +
 .../hadoop/hbase/backup/BackupTableInfo.java|   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  141 +
 .../apache/hadoop/hbase/backup/LogUtils.java|   50 +
 .../hadoop/hbase/backup/RestoreDriver.java  |  265 +
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java |  135 +
 .../hbase/backup/impl/BackupAdminImpl.java  |  524 ++
 .../hbase/backup/impl/BackupCommands.java   |  780 ++
 .../hbase/backup/impl/BackupException.java  |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  472 ++
 .../hbase/backup/impl/BackupManifest.java   |  666 ++
 .../hbase/backup/impl/BackupSystemTable.java| 1376 
 .../backup/impl/FullTableBackupClient.java  |  189 +
 .../backup/impl/IncrementalBackupManager.java   |  344 +
 .../impl/IncrementalTableBackupClient.java  |  216 +
 .../hbase/backup/impl/RestoreTablesClient.java  |  237 +
 .../hbase/backup/impl/TableBackupClient.java|  387 +
 .../backup/mapreduce/HFileSplitterJob.java  |  181 +
 .../mapreduce/MapReduceBackupCopyJob.java   |  344 +
 .../backup/mapreduce/MapReduceRestoreJob.java   |  182 +
 .../hbase/backup/master/BackupLogCleaner.java   |  142 +
 .../master/LogRollMasterProcedureManager.java   |  155 +
 .../regionserver/LogRollBackupSubprocedure.java |  168 +
 .../LogRollBackupSubprocedurePool.java  |  139 +
 .../LogRollRegionServerProcedureManager.java|  185 +
 .../hadoop/hbase/backup/util/BackupSet.java |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  702 ++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  610 ++
 .../BaseCoordinatedStateManager.java|   20 +-
 .../coordination/ZkCoordinatedStateManager.java |   23 +-
 .../hbase/mapreduce/HFileInputFormat.java   |  174 +
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   25 +-
 .../hadoop/hbase/mapreduce/WALInputFormat.java  |   42 +-
 .../hadoop/hbase/mapreduce/WALPlayer.java   |   83 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  129 +-
 .../hbase/master/snapshot/SnapshotManager.java  |4 +-
 .../hbase/procedure/ZKProcedureCoordinator.java |  328 +
 .../procedure/ZKProcedureCoordinatorRpcs.java   |  327 -
 .../flush/MasterFlushTableProcedureManager.java |4 +-
 .../hbase/regionserver/HRegionServer.java   |   17 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |5 +
 .../hadoop/hbase/HBaseTestingUtility.java   |   41 +-
 .../hadoop/hbase/backup/TestBackupBase.java |  293 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 ++
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupMultipleDeletes.java |  159 +
 .../hbase/backup/TestBackupShowHistory.java |  148 +
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java |  511 ++
 .../hadoop/hbase/backup/TestFullBackup.java |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java |  128 +
 .../hadoop/hbase/backup/TestFullRestore.java|  345 +
 .../hbase/backup/TestIncrementalBackup.java |  200 +
 .../TestIncrementalBackupDeleteTable.java   |  129 +

[36/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
index c493b25..943c898 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -22,16 +22,12 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -40,20 +36,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -62,7 +53,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Multimap;
 
@@ -72,11 +62,8 @@ import com.google.common.collect.Multimap;
 @Category(LargeTests.class)
 public class TestQuotaObserverChoreWithMiniCluster {
   private static final Log LOG = 
LogFactory.getLog(TestQuotaObserverChoreWithMiniCluster.class);
-  private static final int SIZE_PER_VALUE = 256;
-  private static final String F1 = "f1";
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static final AtomicLong COUNTER = new AtomicLong(0);
-  private static final long ONE_MEGABYTE = 1024L * 1024L;
   private static final long DEFAULT_WAIT_MILLIS = 500;
 
   @Rule
@@ -84,18 +71,19 @@ public class TestQuotaObserverChoreWithMiniCluster {
 
   private HMaster master;
   private QuotaObserverChore chore;
-  private SpaceQuotaViolationNotifierForTest violationNotifier;
+  private SpaceQuotaSnapshotNotifierForTest snapshotNotifier;
+  private SpaceQuotaHelperForTests helper;
 
   @BeforeClass
   public static void setUp() throws Exception {
 Configuration conf = TEST_UTIL.getConfiguration();
 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 
1000);
 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 
1000);
-conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_DELAY_KEY, 1000);
-conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_PERIOD_KEY, 1000);
+conf.setInt(QuotaObserverChore.QUOTA_OBSERVER_CHORE_DELAY_KEY, 1000);
+conf.setInt(QuotaObserverChore.QUOTA_OBSERVER_CHORE_PERIOD_KEY, 1000);
 conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
-conf.setClass(SpaceQuotaViolationNotifierFactory.VIOLATION_NOTIFIER_KEY,
-SpaceQuotaViolationNotifierForTest.class, 
SpaceQuotaViolationNotifier.class);
+conf.setClass(SpaceQuotaSnapshotNotifierFactory.SNAPSHOT_NOTIFIER_KEY,
+SpaceQuotaSnapshotNotifierForTest.class, 
SpaceQuotaSnapshotNotifier.class);
 TEST_UTIL.startMiniCluster(1);
   }
 
@@ -131,40 +119,55 @@ public class TestQuotaObserverChoreWithMiniCluster {
 }
 
 master = TEST_UTIL.getMiniHBaseCluster().getMaster();
-violationNotifier =
-(SpaceQuotaViolationNotifierForTest) 
master.getSpaceQuotaViolationNotifier();
-violationNotifier.clearTableViolations();
+snapshotNotifier =
+(SpaceQuotaSnapshotNotifierForTest) 
master.getSpaceQuotaSnapshotNotifier();
+snapshotNotifier.clearSnapshots();
 chore = master.getQuotaObserverChore();
+helpe

[46/54] [abbrv] hbase git commit: HBASE-17516 Correctly handle case where table and NS quotas both apply

HBASE-17516 Correctly handle case where table and NS quotas both apply

The logic surrounding when a table and namespace quota both apply
to a table was incorrect, leading to a case where a table quota
violation which should have fired did not because of the less-strict
namespace quota.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a44e50f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a44e50f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a44e50f5

Branch: refs/heads/HBASE-16961
Commit: a44e50f59f444acd6a87d83c1c3e9b16dc6575f2
Parents: 341cf7e
Author: Josh Elser 
Authored: Wed Feb 22 18:32:55 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:19:31 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaObserverChore.java | 10 ++-
 .../TestQuotaObserverChoreWithMiniCluster.java  | 66 
 .../hbase/quotas/TestQuotaStatusRPCs.java   | 21 ++-
 .../hadoop/hbase/quotas/TestSpaceQuotas.java| 32 +-
 4 files changed, 97 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a44e50f5/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 973ac8c..b9f4592 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -287,7 +287,8 @@ public class QuotaObserverChore extends ScheduledChore {
   // We want to have a policy of "NONE", moving out of violation
   if (!targetStatus.isInViolation()) {
 for (TableName tableInNS : tablesByNamespace.get(namespace)) {
-  if 
(!tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
+  // If there is a quota on this table in violation
+  if 
(tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
 // Table-level quota violation policy is being applied here.
 if (LOG.isTraceEnabled()) {
   LOG.trace("Not activating Namespace violation policy because a 
Table violation"
@@ -298,16 +299,21 @@ public class QuotaObserverChore extends ScheduledChore {
 this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
   }
 }
+  // We want to move into violation at the NS level
   } else {
 // Moving tables in the namespace into violation or to a different 
violation policy
 for (TableName tableInNS : tablesByNamespace.get(namespace)) {
-  if 
(tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
+  final SpaceQuotaSnapshot tableQuotaSnapshot =
+tableSnapshotStore.getCurrentState(tableInNS);
+  final boolean hasTableQuota = QuotaSnapshotStore.NO_QUOTA != 
tableQuotaSnapshot;
+  if (hasTableQuota && 
tableQuotaSnapshot.getQuotaStatus().isInViolation()) {
 // Table-level quota violation policy is being applied here.
 if (LOG.isTraceEnabled()) {
   LOG.trace("Not activating Namespace violation policy because a 
Table violation"
   + " policy is already in effect for " + tableInNS);
 }
   } else {
+// No table quota present or a table quota present that is not in 
violation
 LOG.info(tableInNS + " moving into violation of namespace space 
quota with policy " + targetStatus.getPolicy());
 this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a44e50f5/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
index 943c898..63198a8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -193,40 +193,42 @@ public class TestQuotaObserverChoreWithMiniCluster {
 
 helper.writeData(tn1, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE);
 admin.flush(tn1);
-Map violatedQuotas = 
snapshotNotifier.copySnapshots();
+Map snapshots = 
snapshotNotifier.copySnapshots();

[30/54] [abbrv] hbase git commit: HBASE-16998 Implement Master-side analysis of region space reports

http://git-wip-us.apache.org/repos/asf/hbase/blob/a19a95df/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
new file mode 100644
index 000..98236c2
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -0,0 +1,596 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimap;
+
+/**
+ * Test class for {@link QuotaObserverChore} that uses a live HBase cluster.
+ */
+@Category(LargeTests.class)
+public class TestQuotaObserverChoreWithMiniCluster {
+  private static final Log LOG = 
LogFactory.getLog(TestQuotaObserverChoreWithMiniCluster.class);
+  private static final int SIZE_PER_VALUE = 256;
+  private static final String F1 = "f1";
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final AtomicLong COUNTER = new AtomicLong(0);
+  private static final long ONE_MEGABYTE = 1024L * 1024L;
+  private static final long DEFAULT_WAIT_MILLIS = 500;
+
+  @Rule
+  public TestName testName = new TestName();
+
+  private HMaster master;
+  private QuotaObserverChore chore;
+  private SpaceQuotaViolationNotifierForTest violationNotifier;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+Configuration conf = TEST_UTIL.getConfiguration();
+conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 
1000);
+conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 
1000);
+conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_DELAY_KEY, 1000);
+conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_PERIOD_KEY, 1000);
+conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void removeAllQuotas() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+

[19/54] [abbrv] hbase git commit: Added hbase high performance cookbook to the book resources page on the website

Added hbase high performance cookbook to the book resources page on the website


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16900c8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16900c8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16900c8c

Branch: refs/heads/HBASE-16961
Commit: 16900c8c25766456aeb624c19d50ee0c203facfa
Parents: e39e0e6
Author: Michael Stack 
Authored: Mon Mar 20 12:01:22 2017 -0700
Committer: Michael Stack 
Committed: Mon Mar 20 12:01:22 2017 -0700

--
 src/main/site/xdoc/resources.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/16900c8c/src/main/site/xdoc/resources.xml
--
diff --git a/src/main/site/xdoc/resources.xml b/src/main/site/xdoc/resources.xml
index d067c1e..078587c 100644
--- a/src/main/site/xdoc/resources.xml
+++ b/src/main/site/xdoc/resources.xml
@@ -37,6 +37,10 @@ under the License.
 http://www.packtpub.com/hbase-administration-for-optimum-database-performance-cookbook/book";>HBase
 Administration Cookbook by Yifeng Jiang.  Publisher: PACKT Publishing, 
Release: Expected August 2012, Pages: 335.
 
 
+
+  https://www.packtpub.com/big-data-and-business-intelligence/hbase-high-performance-cookbook";>HBase
 High Performance Cookbook by Ruchir Choudhry.  Publisher: PACKT 
Publishing, Release: January 2017, Pages: 350.
+
+
 
 
 



[40/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

HBASE-17001 Enforce quota violation policies in the RegionServer

The nuts-and-bolts of filesystem quotas. The Master must inform
RegionServers of the violation of a quota by a table. The RegionServer
must apply the violation policy as configured. Need to ensure
that the proper interfaces exist to satisfy all necessary policies.

This required a massive rewrite of the internal tracking by
the general space quota feature. Instead of tracking "violations",
we need to start tracking "usage". This allows us to make the decision
at the RegionServer level as to when the files in a bulk load request
should be accept or rejected which ultimately lets us avoid bulk loads
dramatically exceeding a configured space quota.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b99e9cf9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b99e9cf9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b99e9cf9

Branch: refs/heads/HBASE-16961
Commit: b99e9cf94d5af3af20f8bedfe9a9a681132f8550
Parents: c544ed1
Author: Josh Elser 
Authored: Thu Dec 15 13:27:56 2016 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:02:54 2017 -0400

--
 .../hbase/quotas/QuotaExceededException.java|4 +
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   47 +-
 .../hadoop/hbase/quotas/SpaceQuotaSnapshot.java |  192 +++
 .../shaded/protobuf/generated/QuotaProtos.java  | 1384 +-
 .../src/main/protobuf/Quota.proto   |   15 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 1324 -
 hbase-protocol/src/main/protobuf/Quota.proto|   15 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   29 +-
 .../hbase/quotas/ActivePolicyEnforcement.java   |   86 ++
 .../quotas/NamespaceQuotaSnapshotStore.java |  127 ++
 .../quotas/NamespaceQuotaViolationStore.java|  127 --
 .../hadoop/hbase/quotas/QuotaObserverChore.java |  344 +++--
 .../hadoop/hbase/quotas/QuotaSnapshotStore.java |   96 ++
 .../hbase/quotas/QuotaViolationStore.java   |   89 --
 .../quotas/RegionServerSpaceQuotaManager.java   |  179 ++-
 .../hbase/quotas/SpaceLimitingException.java|   95 ++
 .../hbase/quotas/SpaceQuotaRefresherChore.java  |  225 +++
 .../quotas/SpaceQuotaSnapshotNotifier.java  |   45 +
 .../SpaceQuotaSnapshotNotifierFactory.java  |   62 +
 .../quotas/SpaceQuotaViolationNotifier.java |   54 -
 .../SpaceQuotaViolationNotifierFactory.java |   62 -
 .../SpaceQuotaViolationNotifierForTest.java |   54 -
 ...SpaceQuotaViolationPolicyRefresherChore.java |  154 --
 .../quotas/SpaceViolationPolicyEnforcement.java |   91 ++
 .../SpaceViolationPolicyEnforcementFactory.java |   95 ++
 .../hbase/quotas/TableQuotaSnapshotStore.java   |  127 ++
 .../hbase/quotas/TableQuotaViolationStore.java  |  127 --
 .../quotas/TableSpaceQuotaSnapshotNotifier.java |   52 +
 .../TableSpaceQuotaViolationNotifier.java   |   55 -
 .../AbstractViolationPolicyEnforcement.java |  118 ++
 ...LoadVerifyingViolationPolicyEnforcement.java |   50 +
 .../DisableTableViolationPolicyEnforcement.java |   80 +
 .../NoInsertsViolationPolicyEnforcement.java|   55 +
 ...esCompactionsViolationPolicyEnforcement.java |   64 +
 .../NoWritesViolationPolicyEnforcement.java |   54 +
 .../hbase/regionserver/CompactSplitThread.java  |   12 +
 .../hbase/regionserver/RSRpcServices.java   |   92 +-
 .../hbase/quotas/SpaceQuotaHelperForTests.java  |  228 +++
 .../SpaceQuotaSnapshotNotifierForTest.java  |   55 +
 .../quotas/TestActivePolicyEnforcement.java |   74 +
 .../quotas/TestFileSystemUtilizationChore.java  |3 +-
 .../TestNamespaceQuotaViolationStore.java   |   16 +-
 .../hbase/quotas/TestQuotaObserverChore.java|   30 +-
 .../TestQuotaObserverChoreWithMiniCluster.java  |  351 ++---
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |   34 +-
 .../TestRegionServerSpaceQuotaManager.java  |  174 ++-
 ...SpaceQuotaViolationPolicyRefresherChore.java |  193 ++-
 .../hadoop/hbase/quotas/TestSpaceQuotas.java|  452 ++
 .../quotas/TestTableQuotaViolationStore.java|   22 +-
 .../TestTableSpaceQuotaViolationNotifier.java   |   48 +-
 .../hbase/quotas/TestTablesWithQuotas.java  |8 +-
 .../BaseViolationPolicyEnforcement.java |   31 +
 ...kLoadCheckingViolationPolicyEnforcement.java |  142 ++
 ...tDisableTableViolationPolicyEnforcement.java |   59 +
 ...TestNoInsertsViolationPolicyEnforcement.java |   57 +
 ...esCompactionsViolationPolicyEnforcement.java |   58 +
 .../TestNoWritesViolationPolicyEnforcement.java |   57 +
 57 files changed, 6492 insertions(+), 1481 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java
-

[38/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 8b127d9..973ac8c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -37,9 +37,8 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.quotas.QuotaViolationStore.ViolationState;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -54,51 +53,51 @@ import com.google.common.collect.Multimap;
 @InterfaceAudience.Private
 public class QuotaObserverChore extends ScheduledChore {
   private static final Log LOG = LogFactory.getLog(QuotaObserverChore.class);
-  static final String VIOLATION_OBSERVER_CHORE_PERIOD_KEY =
-  "hbase.master.quotas.violation.observer.chore.period";
-  static final int VIOLATION_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 
5 minutes in millis
+  static final String QUOTA_OBSERVER_CHORE_PERIOD_KEY =
+  "hbase.master.quotas.observer.chore.period";
+  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
 
-  static final String VIOLATION_OBSERVER_CHORE_DELAY_KEY =
-  "hbase.master.quotas.violation.observer.chore.delay";
-  static final long VIOLATION_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final String QUOTA_OBSERVER_CHORE_DELAY_KEY =
+  "hbase.master.quotas.observer.chore.delay";
+  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
 
-  static final String VIOLATION_OBSERVER_CHORE_TIMEUNIT_KEY =
-  "hbase.master.quotas.violation.observer.chore.timeunit";
-  static final String VIOLATION_OBSERVER_CHORE_TIMEUNIT_DEFAULT = 
TimeUnit.MILLISECONDS.name();
+  static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY =
+  "hbase.master.quotas.observer.chore.timeunit";
+  static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_DEFAULT = 
TimeUnit.MILLISECONDS.name();
 
-  static final String VIOLATION_OBSERVER_CHORE_REPORT_PERCENT_KEY =
-  "hbase.master.quotas.violation.observer.report.percent";
-  static final double VIOLATION_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT= 0.95;
+  static final String QUOTA_OBSERVER_CHORE_REPORT_PERCENT_KEY =
+  "hbase.master.quotas.observer.report.percent";
+  static final double QUOTA_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT= 0.95;
 
   private final Connection conn;
   private final Configuration conf;
   private final MasterQuotaManager quotaManager;
   /*
-   * Callback that changes in quota violation are passed to.
+   * Callback that changes in quota snapshots are passed to.
*/
-  private final SpaceQuotaViolationNotifier violationNotifier;
+  private final SpaceQuotaSnapshotNotifier snapshotNotifier;
 
   /*
-   * Preserves the state of quota violations for tables and namespaces
+   * Preserves the state of quota snapshots for tables and namespaces
*/
-  private final Map tableQuotaViolationStates;
-  private final Map namespaceQuotaViolationStates;
+  private final Map tableQuotaSnapshots;
+  private final Map namespaceQuotaSnapshots;
 
   /*
-   * Encapsulates logic for moving tables/namespaces into or out of quota 
violation
+   * Encapsulates logic for tracking the state of a table/namespace WRT space 
quotas
*/
-  private QuotaViolationStore tableViolationStore;
-  private QuotaViolationStore namespaceViolationStore;
+  private QuotaSnapshotStore tableSnapshotStore;
+  private QuotaSnapshotStore namespaceSnapshotStore;
 
   public QuotaObserverChore(HMaster master) {
 this(
 master.getConnection(), master.getConfiguration(),
-master.getSpaceQuotaViolationNotifier(), 
master.getMasterQuotaManager(),
+master.getSpaceQuotaSnapshotNotifier(), master.getMasterQuotaManager(),
 master);
   }
 
   QuotaObserverChore(
-  Connection conn, Configuration conf, SpaceQuotaViolationNotifier 
violationNotifier,
+  Connection conn, Configuration conf, SpaceQuotaSnapshotNotifier 
snapshotNotifier,
   MasterQuotaManager quotaManager, Stoppable stopper) {
 super(
 QuotaObserverChore.class.getSimpleName(), stopper

[35/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
new file mode 100644
index 000..ec8f1bf
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas.policies;
+
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class BaseViolationPolicyEnforcement {
+
+  static final Append APPEND = new Append(Bytes.toBytes("foo"));
+  static final Delete DELETE = new Delete(Bytes.toBytes("foo"));
+  static final Increment INCREMENT = new Increment(Bytes.toBytes("foo"));
+  static final Put PUT = new Put(Bytes.toBytes("foo"));
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
new file mode 100644
index 000..abe1b9d
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas.policies;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
+import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBulkLoadCheckingViolationPolicyEnforcement {
+
+  FileSystem fs;
+  RegionServerServices rss;
+  TableName tableName;
+  SpaceViolationPolicyEnforcement policy;
+
+  @Before
+  public void setup() {
+fs = mock(FileSystem.class);
+rss = mock(RegionServerServices.class);
+tableName = TableName.valueOf("foo");
+policy = new BulkLoadVerifyingViolationPolicyEnforcement();
+  }
+
+  @Test
+  public void testFilesUnderLimit() throws Exception {
+final List paths = new ArrayList<>();
+final List 

[52/54] [abbrv] hbase git commit: HBASE-17003 Documentation updates for space quotas

HBASE-17003 Documentation updates for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/201da141
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/201da141
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/201da141

Branch: refs/heads/HBASE-16961
Commit: 201da1411c96735cdf6a8f0aa1e8c84894acd417
Parents: 6d1558c
Author: Josh Elser 
Authored: Thu Mar 16 16:21:14 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 18:31:30 2017 -0400

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 64 ++-
 1 file changed, 63 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/201da141/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index e4c077f..f9009f3 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1705,7 +1705,7 @@ handling multiple workloads:
 
 [[quota]]
 === Quotas
-HBASE-11598 introduces quotas, which allow you to throttle requests based on
+HBASE-11598 introduces RPC quotas, which allow you to throttle requests based 
on
 the following limits:
 
 . <>
@@ -1885,6 +1885,68 @@ at the same time and that fewer scans can be executed at 
the same time. A value
 `0.9` will give more queue/handlers to scans, so the number of scans executed 
will
 increase and the number of gets will decrease.
 
+[[space-quotas]]
+=== Space Quotas
+
+link:https://issues.apache.org/jira/browse/HBASE-16961[HBASE-16961] introduces 
a new type of
+quotas for HBase to leverage: filesystem quotas. These "space" quotas limit 
the amount of space
+on the filesystem that HBase namespaces and tables can consume. If a user, 
malicious or ignorant,
+has the ability to write data into HBase, with enough time, that user can 
effectively crash HBase
+(or worse HDFS) by consuming all available space. When there is no filesystem 
space available,
+HBase crashes because it can no longer create/sync data to the write-ahead log.
+
+This feature allows a for a limit to be set on the size of a table or 
namespace. When a space quota is set
+on a namespace, the quota's limit applies to the sum of usage of all tables in 
that namespace.
+When a table with a quota exists in a namespace with a quota, the table quota 
takes priority
+over the namespace quota. This allows for a scenario where a large limit can 
be placed on
+a collection of tables, but a single table in that collection can have a 
fine-grained limit set.
+
+The existing `set_quota` and `list_quota` HBase shell commands can be used to 
interact with
+space quotas. Space quotas are quotas with a `TYPE` of `SPACE` and have 
`LIMIT` and `POLICY`
+attributes. The `LIMIT` is a string that refers to the amount of space on the 
filesystem
+that the quota subject (e.g. the table or namespace) may consume. For example, 
valid values
+of `LIMIT` are `'10G'`, `'2T'`, or `'256M'`. The `POLICY` refers to the action 
that HBase will
+take when the quota subject's usage exceeds the `LIMIT`. The following are 
valid `POLICY` values.
+
+* `NO_INSERTS` - No new data may be written (e.g. `Put`, `Increment`, 
`Append`).
+* `NO_WRITES` - Same as `NO_INSERTS` but `Deletes` are also disallowed.
+* `NO_WRITES_COMPACTIONS` - Same as `NO_WRITES` but compactions are also 
disallowed.
+* `DISABLE` - The table(s) are disabled, preventing all read/write access.
+
+.Setting simple space quotas
+
+# Sets a quota on the table 't1' with a limit of 1GB, disallowing 
Puts/Increments/Appends when the table exceeds 1GB
+hbase> set_quota TYPE => SPACE, TABLE => 't1', LIMIT => '1G', POLICY => 
NO_INSERTS
+
+# Sets a quota on the namespace 'ns1' with a limit of 50TB, disallowing 
Puts/Increments/Appends/Deletes
+hbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '50T', POLICY => 
NO_WRITES
+
+# Sets a quota on the table 't3' with a limit of 2TB, disallowing any writes 
and compactions when the table exceeds 2TB.
+hbase> set_quota TYPE => SPACE, TABLE => 't3', LIMIT => '2T', POLICY => 
NO_WRITES_COMPACTIONS
+
+# Sets a quota on the table 't2' with a limit of 50GB, disabling the table 
when it exceeds 50GB
+hbase> set_quota TYPE => SPACE, TABLE => 't2', LIMIT => '50G', POLICY => 
DISABLE
+
+
+Consider the following scenario to set up quotas on a namespace, overriding 
the quota on tables in that namespace
+
+.Table and Namespace space quotas
+
+hbase> create_namespace 'ns1'
+hbase> create 'ns1:t1'
+hbase> create 'ns1:t2'
+hbase> create 'ns1:t3'
+hbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '100T', POLICY => 
NO_INSERTS
+hbase> set_quota TYPE => SPACE, TABLE => 'ns1:t2', LIMIT => '200G', POLICY => 
NO_WRITES
+h

[44/54] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

http://git-wip-us.apache.org/repos/asf/hbase/blob/341cf7e1/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index d8cd701..f207472 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -7476,6 +7476,5554 @@ public final class QuotaProtos {
 
   }
 
+  public interface GetSpaceQuotaRegionSizesRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.GetSpaceQuotaRegionSizesRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetSpaceQuotaRegionSizesRequest}
+   */
+  public  static final class GetSpaceQuotaRegionSizesRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.GetSpaceQuotaRegionSizesRequest)
+  GetSpaceQuotaRegionSizesRequestOrBuilder {
+// Use GetSpaceQuotaRegionSizesRequest.newBuilder() to construct.
+private 
GetSpaceQuotaRegionSizesRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private GetSpaceQuotaRegionSizesRequest() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private GetSpaceQuotaRegionSizesRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetSpaceQuotaRegionSizesRequest_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetSpaceQuotaRegionSizesRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+public int getSerializedSize() {
+  int size = memoizedSize;
+  if (size != -1) return size;
+
+  size = 0;
+  size += unknownFields.getSerializedSize();
+  memoizedSize = size;
+  return size;
+}
+
+private static final long serialVersionUID = 0

[02/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
new file mode 100644
index 000..ec88549
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -0,0 +1,293 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class is only a base for other integration-level backup tests. Do not 
add tests here.
+ * TestBackupSmallTests is where tests that don't require bring machines 
up/down should go All other
+ * tests should have their own classes and extend this one
+ */
+public class TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
+
+  protected static Configuration conf1;
+  protected static Configuration conf2;
+
+  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseTestingUtility TEST_UTIL2;
+  protected static TableName table1 = TableName.valueOf("table1");
+  protected static HTableDescriptor table1Desc;
+  protected static TableName table2 = TableName.valueOf("table2");
+  protected static TableName table3 = TableName.valueOf("table3");
+  protected static TableName table4 = TableName.valueOf("table4");
+
+  protected static TableName table1_restore = 
TableName.valueOf("ns1:table1_restore");
+  protected static TableName table2_restore = 
TableName.valueOf("ns2:table2_restore");
+  protected static TableName table3_restore = 
TableName.valueOf("ns3:table3_restore");
+  protected static TableName table4_restore = 
TableName.valueOf("ns4:table4_restore");
+
+  protected static final int NB_ROWS_IN_BATCH = 99;
+  protected static final byte[] qualName = Bytes.toBytes("q1");
+  protected static final byte[] famName = Bytes.toBytes("f");
+
+  protected static String BACKUP_ROOT_DIR = "/backupUT";
+  protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
+  protected static String provider = "defaultProvider";
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+TEST_UTIL = new HBaseTestingUtility();
+conf1 = TEST_UTIL.getConfiguration();
+conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
+BackupManager.decorateM

[03/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
new file mode 100644
index 000..e90d5c1
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Simple MR input format for HFiles.
+ * This code was borrowed from Apache Crunch project.
+ * Updated to the recent version of HBase.
+ */
+public class HFileInputFormat extends FileInputFormat {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(HFileInputFormat.class);
+
+  /**
+   * File filter that removes all "hidden" files. This might be something 
worth removing from
+   * a more general purpose utility; it accounts for the presence of metadata 
files created
+   * in the way we're doing exports.
+   */
+  static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() {
+@Override
+public boolean accept(Path p) {
+  String name = p.getName();
+  return !name.startsWith("_") && !name.startsWith(".");
+}
+  };
+
+  /**
+   * Record reader for HFiles.
+   */
+  private static class HFileRecordReader extends RecordReader {
+
+private Reader in;
+protected Configuration conf;
+private HFileScanner scanner;
+
+/**
+ * A private cache of the key value so it doesn't need to be loaded twice 
from the scanner.
+ */
+private Cell value = null;
+private long count;
+private boolean seeked = false;
+
+@Override
+public void initialize(InputSplit split, TaskAttemptContext context)
+throws IOException, InterruptedException {
+  FileSplit fileSplit = (FileSplit) split;
+  conf = context.getConfiguration();
+  Path path = fileSplit.getPath();
+  FileSystem fs = path.getFileSystem(conf);
+  LOG.info("Initialize HFileRecordReader for {}", path);
+  this.in = HFile.createReader(fs, path, conf);
+
+  // The file info must be loaded before the scanner can be used.
+  // This seems like a bug in HBase, but it's easily worked around.
+  this.in.loadFileInfo();
+  this.scanner = in.getScanner(false, false);
+
+}
+
+
+@Override
+public boolean nextKeyValue() throws IOException, InterruptedException {
+  boolean hasNext;
+  if (!seeked) {
+LOG.info("Seeking to start");
+hasNext = scanner.seekTo();
+seeked = true;
+  } else {
+hasNext = scanner.next();
+  }
+  if (!hasNext) {
+return false;
+  }
+  value = scanner.getCell();
+  count++;
+  return true;
+}
+
+@Override
+public NullWritable getCurrentKey() throws IOException, 
InterruptedException {
+  return NullWritable.get();
+}
+
+@Override
+public Cell getCurrentValue() throws IOException, InterruptedException {
+  return value;
+}
+
+@Override
+public float getProgress() throws IOException, InterruptedException {
+ 

[37/54] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
new file mode 100644
index 000..6b754b9
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
+import 
org.apache.hadoop.hbase.quotas.policies.BulkLoadVerifyingViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.DisableTableViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoInsertsViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoWritesCompactionsViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoWritesViolationPolicyEnforcement;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+
+/**
+ * A factory class for instantiating {@link SpaceViolationPolicyEnforcement} 
instances.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SpaceViolationPolicyEnforcementFactory {
+
+  private static final SpaceViolationPolicyEnforcementFactory INSTANCE =
+  new SpaceViolationPolicyEnforcementFactory();
+
+  private SpaceViolationPolicyEnforcementFactory() {}
+
+  /**
+   * Returns an instance of this factory.
+   */
+  public static SpaceViolationPolicyEnforcementFactory getInstance() {
+return INSTANCE;
+  }
+
+  /**
+   * Constructs the appropriate {@link SpaceViolationPolicyEnforcement} for 
tables that are
+   * in violation of their space quota.
+   */
+  public SpaceViolationPolicyEnforcement create(
+  RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot 
snapshot) {
+SpaceViolationPolicyEnforcement enforcement;
+SpaceQuotaStatus status = snapshot.getQuotaStatus();
+if (!status.isInViolation()) {
+  throw new IllegalArgumentException(tableName + " is not in violation. 
Snapshot=" + snapshot);
+}
+switch (status.getPolicy()) {
+  case DISABLE:
+enforcement = new DisableTableViolationPolicyEnforcement();
+break;
+  case NO_WRITES_COMPACTIONS:
+enforcement = new NoWritesCompactionsViolationPolicyEnforcement();
+break;
+  case NO_WRITES:
+enforcement = new NoWritesViolationPolicyEnforcement();
+break;
+  case NO_INSERTS:
+enforcement = new NoInsertsViolationPolicyEnforcement();
+break;
+  default:
+throw new IllegalArgumentException("Unhandled SpaceViolationPolicy: " 
+ status.getPolicy());
+}
+enforcement.initialize(rss, tableName, snapshot);
+return enforcement;
+  }
+
+  /**
+   * Creates the "default" {@link SpaceViolationPolicyEnforcement} for a table 
that isn't in
+   * violation. This is used to have uniform policy checking for tables in and 
not quotas.
+   */
+  public SpaceViolationPolicyEnforcement createWithoutViolation(
+  RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot 
snapshot) {
+SpaceQuotaStatus status = snapshot.getQuotaStatus();
+if (status.isInViolation()) {
+  throw new IllegalArgumentException(
+  tableName + " is in violation. Logic error. Snapshot=" + snapshot);
+}
+BulkLoadVerifyingViolationPolicyEnforcement enforcement = new 
BulkLoadVerifyingViolationPolicyEnforcement();
+enforcement.initialize(rss, tableName, snapshot);
+return enforcement;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b99e9cf9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java
---

[14/54] [abbrv] hbase git commit: HBASE-17802 Add note that minor versions can add methods to Interfaces

HBASE-17802 Add note that minor versions can add methods to Interfaces


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c03a213
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c03a213
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c03a213

Branch: refs/heads/HBASE-16961
Commit: 7c03a213ffc074c941333677065031a5c2c12d41
Parents: 261aa94
Author: Michael Stack 
Authored: Fri Mar 17 16:53:47 2017 -0700
Committer: Michael Stack 
Committed: Sun Mar 19 14:51:11 2017 -0700

--
 src/main/asciidoc/_chapters/upgrading.adoc | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c03a213/src/main/asciidoc/_chapters/upgrading.adoc
--
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc 
b/src/main/asciidoc/_chapters/upgrading.adoc
index b0a5565..df5bbfe 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -74,12 +74,15 @@ In addition to the usual API versioning considerations 
HBase has other compatibi
 * An API needs to be deprecated for a major version before we will 
change/remove it.
 * APIs available in a patch version will be available in all later patch 
versions. However, new APIs may be added which will not be available in earlier 
patch versions.
 * New APIs introduced in a patch version will only be added in a source 
compatible way footnote:[See 'Source Compatibility' 
https://blogs.oracle.com/darcy/entry/kinds_of_compatibility]: i.e. code that 
implements public APIs will continue to compile.
-* Example: A user using a newly deprecated API does not need to modify 
application code with HBase API calls until the next major version.
+** Example: A user using a newly deprecated API does not need to modify 
application code with HBase API calls until the next major version.
+* 
 
 .Client Binary compatibility
 * Client code written to APIs available in a given patch release can run 
unchanged (no recompilation needed) against the new jars of later patch 
versions.
 * Client code written to APIs available in a given patch release might not run 
against the old jars from an earlier patch version.
-* Example: Old compiled client code will work unchanged with the new jars.
+** Example: Old compiled client code will work unchanged with the new jars.
+* If a Client implements an HBase Interface, a recompile MAY be required 
upgrading to a newer minor version (See release notes
+for warning about incompatible changes). All effort will be made to provide a 
default implementation so this case should not arise.
 
 .Server-Side Limited API compatibility (taken from Hadoop)
 * Internal APIs are marked as Stable, Evolving, or Unstable



[25/54] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes white spaces (Josh Elser)

HBASE-16995 Build client Java API and client protobuf messages - addendum fixes 
white spaces (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d688893c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d688893c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d688893c

Branch: refs/heads/HBASE-16961
Commit: d688893c99cf23d826d0832f0ae8bee0c041a660
Parents: 87fa04c
Author: tedyu 
Authored: Thu Nov 17 10:42:18 2016 -0800
Committer: Josh Elser 
Committed: Mon Mar 20 17:39:21 2017 -0400

--
 .../hbase/quotas/TestQuotaSettingsFactory.java|  2 +-
 .../shaded/protobuf/generated/MasterProtos.java   |  2 +-
 .../shaded/protobuf/generated/QuotaProtos.java| 18 +-
 .../hbase/protobuf/generated/QuotaProtos.java |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d688893c/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
index 17015d6..e0012a7 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
@@ -44,7 +44,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestQuotaSettingsFactory {
-  
+
   @Test
   public void testAllQuotasAddedToList() {
 final SpaceQuota spaceQuota = SpaceQuota.newBuilder()

http://git-wip-us.apache.org/repos/asf/hbase/blob/d688893c/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index cfa1bf3..668b4f6 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -63752,7 +63752,7 @@ public final class MasterProtos {
* optional .hbase.pb.SpaceLimitRequest space_limit = 8;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder>
   getSpaceLimitFieldBuilder() {
 if (spaceLimitBuilder_ == null) {
   spaceLimitBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<

http://git-wip-us.apache.org/repos/asf/hbase/blob/d688893c/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index a715115..673fb2c 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -4362,7 +4362,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota space = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.gene

[17/54] [abbrv] hbase git commit: HBASE-16084 Cleaned up the stale references in Javadoc

HBASE-16084 Cleaned up the stale references in Javadoc

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55d6dcaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55d6dcaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55d6dcaf

Branch: refs/heads/HBASE-16961
Commit: 55d6dcaf877cc5223e679736eb613173229c18be
Parents: 4088f82
Author: Jan Hentschel 
Authored: Sun Mar 19 20:49:28 2017 +0100
Committer: tedyu 
Committed: Mon Mar 20 10:55:36 2017 -0700

--
 .../org/apache/hadoop/hbase/HTableDescriptor.java  | 14 +++---
 .../apache/hadoop/hbase/client/AsyncProcess.java   |  6 +++---
 .../hadoop/hbase/client/ConnectionFactory.java |  2 +-
 .../apache/hadoop/hbase/client/MasterCallable.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Query.java |  2 +-
 .../hbase/client/coprocessor/package-info.java |  8 
 .../FirstKeyValueMatchingQualifiersFilter.java |  2 +-
 .../hadoop/hbase/ipc/ServerRpcController.java  |  4 ++--
 .../hbase/zookeeper/RecoverableZooKeeper.java  |  2 +-
 .../java/org/apache/hadoop/hbase/nio/ByteBuff.java |  2 +-
 .../org/apache/hadoop/hbase/util/OrderedBytes.java |  2 +-
 .../hadoop/hbase/HBaseCommonTestingUtility.java|  3 +--
 .../codec/prefixtree/scanner/CellSearcher.java |  2 +-
 .../store/wal/ProcedureWALFormatReader.java|  3 +--
 .../apache/hadoop/hbase/backup/HFileArchiver.java  |  2 --
 .../hbase/backup/example/HFileArchiveManager.java  |  3 ++-
 .../hadoop/hbase/backup/util/RestoreTool.java  |  4 ++--
 .../apache/hadoop/hbase/constraint/Constraint.java |  8 
 .../hbase/io/hfile/CompoundBloomFilterWriter.java  |  2 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  2 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java |  2 +-
 .../apache/hadoop/hbase/mapreduce/RowCounter.java  |  3 +--
 .../hadoop/hbase/master/TableNamespaceManager.java |  2 +-
 .../master/balancer/StochasticLoadBalancer.java|  4 ++--
 .../regionserver/MiniBatchOperationInProgress.java |  4 ++--
 .../hadoop/hbase/regionserver/StoreFileReader.java |  2 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java  |  8 
 .../regionserver/wal/SequenceIdAccounting.java |  6 +++---
 .../hadoop/hbase/regionserver/wal/SyncFuture.java  |  4 ++--
 .../access/CoprocessorWhitelistMasterObserver.java |  2 +-
 .../hbase/security/access/TableAuthManager.java|  2 +-
 .../apache/hadoop/hbase/HBaseTestingUtility.java   |  6 +++---
 .../apache/hadoop/hbase/TestMetaTableLocator.java  |  8 
 .../hbase/TestPartialResultsFromClientSide.java|  2 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |  4 ++--
 .../hbase/client/TestMultipleTimestamps.java   |  2 +-
 .../TestFirstKeyValueMatchingQualifiersFilter.java |  2 +-
 .../apache/hadoop/hbase/io/hfile/TestSeekTo.java   |  2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 17 +++--
 .../hadoop/hbase/mapreduce/TestImportExport.java   |  2 +-
 .../hadoop/hbase/master/MockRegionServer.java  |  4 ++--
 .../hadoop/hbase/master/TestWarmupRegion.java  |  2 +-
 .../hbase/procedure/TestProcedureMember.java   |  4 ++--
 .../hbase/regionserver/DataBlockEncodingTool.java  |  2 +-
 .../hbase/regionserver/OOMERegionServer.java   |  3 +--
 .../TestRegionMergeTransactionOnCluster.java   |  6 --
 .../org/apache/hadoop/hbase/util/LoadTestTool.java |  2 +-
 .../hadoop/hbase/util/MultiThreadedAction.java |  4 ++--
 48 files changed, 86 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index a49cf1c..25fd896 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -723,7 +723,7 @@ public class HTableDescriptor implements 
Comparable {
   /**
* This sets the class associated with the region split policy which
* determines when a region split should occur.  The class used by
-   * default is defined in {@link 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
+   * default is defined in 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @param clazz the class name
*/
   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
@@ -734,7 +734,7 @@ public class HTableDescriptor implements 
Comparable {
   /**
* This gets the class associated with the region split policy which
* determines when a region split should occu

[05/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
new file mode 100644
index 000..0f1453e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+
+/**
+ * After a full backup was created, the incremental backup will only store the 
changes made after
+ * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
+ * .oldlogs since the last backup timestamp.
+ */
+@InterfaceAudience.Private
+public class IncrementalBackupManager extends BackupManager {
+  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
+
+  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
+super(conn, conf);
+  }
+
+  /**
+   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
+   * in BackupInfo.
+   * @param conn the Connection
+   * @param backupInfo backup info
+   * @return The new HashMap of RS log timestamps after the log roll for this 
incremental backup.
+   * @throws IOException exception
+   */
+  public HashMap getIncrBackupLogFileList(Connection conn, 
BackupInfo backupInfo)
+  throws IOException {
+List logList;
+HashMap newTimestamps;
+HashMap previousTimestampMins;
+
+String savedStartCode = readBackupStartCode();
+
+// key: tableName
+// value: 
+HashMap> previousTimestampMap = 
readLogTimestampMap();
+
+previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
+}
+// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
+if (savedStartCode == null || previousTimestampMins == null
+|| previousTimestampMins.isEmpty()) {
+  throw new IOException(
+  "Cannot read any previous back up timestamps from backup system 
table. "
+  + "In order to create an incremental backup, at least one full 
backup is needed.");
+}
+
+LOG.info("Execute roll log procedure for incremental backup ...");
+HashMap props = new HashMap();
+props.put("backupRoot", backupInfo.getBackupRootDir());
+
+try (Admin admin = conn.getAdmin();) {
+
+  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
+
+}
+newTimestamps = readRegionServerLastLogRollResult();
+
+logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
+List logFromSystemTable =
+ 

[20/54] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

http://git-wip-us.apache.org/repos/asf/hbase/blob/87fa04cc/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 05894b9..1925828 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -217,12 +217,20 @@ public final class QuotaProtos {
  * THROTTLE = 1;
  */
 THROTTLE(0, 1),
+/**
+ * SPACE = 2;
+ */
+SPACE(1, 2),
 ;
 
 /**
  * THROTTLE = 1;
  */
 public static final int THROTTLE_VALUE = 1;
+/**
+ * SPACE = 2;
+ */
+public static final int SPACE_VALUE = 2;
 
 
 public final int getNumber() { return value; }
@@ -230,6 +238,7 @@ public final class QuotaProtos {
 public static QuotaType valueOf(int value) {
   switch (value) {
 case 1: return THROTTLE;
+case 2: return SPACE;
 default: return null;
   }
 }
@@ -281,6 +290,142 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   *
+   * 
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+   */
+  public enum SpaceViolationPolicy
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * DISABLE = 1;
+ *
+ * 
+ * Disable the table(s)
+ * 
+ */
+DISABLE(0, 1),
+/**
+ * NO_WRITES_COMPACTIONS = 2;
+ *
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ */
+NO_WRITES_COMPACTIONS(1, 2),
+/**
+ * NO_WRITES = 3;
+ *
+ * 
+ * No writes or bulk-loads
+ * 
+ */
+NO_WRITES(2, 3),
+/**
+ * NO_INSERTS = 4;
+ *
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ */
+NO_INSERTS(3, 4),
+;
+
+/**
+ * DISABLE = 1;
+ *
+ * 
+ * Disable the table(s)
+ * 
+ */
+public static final int DISABLE_VALUE = 1;
+/**
+ * NO_WRITES_COMPACTIONS = 2;
+ *
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ */
+public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+/**
+ * NO_WRITES = 3;
+ *
+ * 
+ * No writes or bulk-loads
+ * 
+ */
+public static final int NO_WRITES_VALUE = 3;
+/**
+ * NO_INSERTS = 4;
+ *
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ */
+public static final int NO_INSERTS_VALUE = 4;
+
+
+public final int getNumber() { return value; }
+
+public static SpaceViolationPolicy valueOf(int value) {
+  switch (value) {
+case 1: return DISABLE;
+case 2: return NO_WRITES_COMPACTIONS;
+case 3: return NO_WRITES;
+case 4: return NO_INSERTS;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public SpaceViolationPolicy findValueByNumber(int number) {
+  return SpaceViolationPolicy.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+}
+
+private static final SpaceViolationPolicy[] VALUES = values();
+
+public static SpaceViolationPolicy valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private SpaceViolationPolicy(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy)
+  }
+
   public interface TimedQuotaOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
@@ -3315,6 +3460,20 @@ public final class QuotaProtos {
 

[12/54] [abbrv] hbase git commit: HBASE-17803 PE always re-creates table when we specify the split policy

HBASE-17803 PE always re-creates table when we specify the split policy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/23abc900
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/23abc900
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/23abc900

Branch: refs/heads/HBASE-16961
Commit: 23abc90068f0ea75f09c3eecf6ef758f1aee9219
Parents: b299c13
Author: CHIA-PING TSAI 
Authored: Sat Mar 18 12:20:07 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sun Mar 19 18:27:54 2017 +0800

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/23abc900/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index f8345b1..3addb1a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -325,7 +325,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 // recreate the table when user has requested presplit or when existing
 // {RegionSplitPolicy,replica count} does not match requested.
 if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
-  || (!isReadCmd && desc != null && desc.getRegionSplitPolicyClassName() 
!= opts.splitPolicy)
+  || (!isReadCmd && desc != null && 
!desc.getRegionSplitPolicyClassName().equals(opts.splitPolicy))
   || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)) {
   needsDelete = true;
   // wait, why did it delete my table?!?



[54/54] [abbrv] hbase git commit: HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

When a table or namespace is deleted, it would be nice to automatically
delete the quota on said table/NS. It's possible that not all people
would want this functionality so we can leave it up to the user to
configure this Observer.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8271fd53
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8271fd53
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8271fd53

Branch: refs/heads/HBASE-16961
Commit: 8271fd53f81ebba9be9e0f7e8f2cdae6db2a3d71
Parents: 6238590
Author: Josh Elser 
Authored: Thu Mar 16 18:54:01 2017 -0400
Committer: Josh Elser 
Committed: Mon Mar 20 18:48:51 2017 -0400

--
 .../hbase/quotas/MasterSpaceQuotaObserver.java  |  85 ++
 .../quotas/TestMasterSpaceQuotaObserver.java| 169 +++
 src/main/asciidoc/_chapters/ops_mgt.adoc|  17 ++
 3 files changed, 271 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8271fd53/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
new file mode 100644
index 000..a3abf32
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+
+/**
+ * An observer to automatically delete space quotas when a table/namespace
+ * are deleted.
+ */
+@InterfaceAudience.Private
+public class MasterSpaceQuotaObserver implements MasterObserver {
+  private CoprocessorEnvironment cpEnv;
+  private Configuration conf;
+  private boolean quotasEnabled = false;
+
+  @Override
+  public void start(CoprocessorEnvironment ctx) throws IOException {
+this.cpEnv = ctx;
+this.conf = cpEnv.getConfiguration();
+this.quotasEnabled = QuotaUtil.isQuotaEnabled(conf);
+  }
+
+  @Override
+  public void postDeleteTable(
+  ObserverContext ctx, TableName tableName) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getTableQuota(master.getConnection(), tableName);
+if (null != quotas && quotas.hasSpace()) {
+  QuotaSettings settings = 
QuotaSettingsFactory.removeTableSpaceLimit(tableName);
+  try (Admin admin = conn.getAdmin()) {
+admin.setQuota(settings);
+  }
+}
+  }
+
+  @Override
+  public void postDeleteNamespace(
+  ObserverContext ctx, String namespace) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getNamespaceQuota(master.getConnection(), 
namespace);
+if (null != quotas && quotas.hasSpace()) {
+  QuotaSettings settings = 
QuotaSettingsFactory

[27/54] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e042231/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index 8f368e9..899ae9b 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10164,6 +10164,1912 @@ public final class RegionServerStatusProtos {
 
   }
 
+  public interface RegionSpaceUseOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUse)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * 
+ * A region identifier
+ * 
+ *
+ * optional .hbase.pb.RegionInfo region = 1;
+ */
+boolean hasRegion();
+/**
+ * 
+ * A region identifier
+ * 
+ *
+ * optional .hbase.pb.RegionInfo region = 1;
+ */
+org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegion();
+/**
+ * 
+ * A region identifier
+ * 
+ *
+ * optional .hbase.pb.RegionInfo region = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder
 getRegionOrBuilder();
+
+/**
+ * 
+ * The size in bytes of the region
+ * 
+ *
+ * optional uint64 size = 2;
+ */
+boolean hasSize();
+/**
+ * 
+ * The size in bytes of the region
+ * 
+ *
+ * optional uint64 size = 2;
+ */
+long getSize();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionSpaceUse}
+   */
+  public  static final class RegionSpaceUse extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUse)
+  RegionSpaceUseOrBuilder {
+// Use RegionSpaceUse.newBuilder() to construct.
+private 
RegionSpaceUse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private RegionSpaceUse() {
+  size_ = 0L;
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private RegionSpaceUse(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder
 subBuilder = null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = region_.toBuilder();
+  }
+  region_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(region_);
+region_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  size_ = input.readUInt64();
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+

[31/54] [abbrv] hbase git commit: HBASE-16998 Implement Master-side analysis of region space reports

HBASE-16998 Implement Master-side analysis of region space reports

Adds a new Chore to the Master that analyzes the reports that are
sent by RegionServers. The Master must then, for all tables with
quotas, determine the tables that are violating quotas and move
those tables into violation. Similarly, tables no longer violating
the quota can be moved out of violation.

The Chore is the "stateful" bit, managing which tables are and
are not in violation. Everything else is just performing
computation and informing the Chore on the updated state.

Added InterfaceAudience annotations and clean up the QuotaObserverChore
constructor. Cleaned up some javadoc and QuotaObserverChore. Reuse
the QuotaViolationStore impl objects.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a19a95df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a19a95df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a19a95df

Branch: refs/heads/HBASE-16961
Commit: a19a95dff66fc205efb032185944d910c8ea79b2
Parents: d9ebc1e
Author: Josh Elser 
Authored: Tue Nov 8 18:55:12 2016 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 17:46:40 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaRetriever.java |  29 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  20 +
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   1 +
 .../quotas/NamespaceQuotaViolationStore.java| 127 
 .../hadoop/hbase/quotas/QuotaObserverChore.java | 618 +++
 .../hbase/quotas/QuotaViolationStore.java   |  89 +++
 .../quotas/SpaceQuotaViolationNotifier.java |  44 ++
 .../SpaceQuotaViolationNotifierForTest.java |  50 ++
 .../hbase/quotas/TableQuotaViolationStore.java  | 127 
 .../TestNamespaceQuotaViolationStore.java   | 156 +
 .../hbase/quotas/TestQuotaObserverChore.java| 106 
 .../TestQuotaObserverChoreWithMiniCluster.java  | 596 ++
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |   4 -
 .../quotas/TestTableQuotaViolationStore.java| 151 +
 .../hbase/quotas/TestTablesWithQuotas.java  | 198 ++
 15 files changed, 2306 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a19a95df/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
index fecd2d1..8cd5cf0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
@@ -22,6 +22,7 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Objects;
 import java.util.Queue;
 
 import org.apache.commons.logging.Log;
@@ -56,11 +57,23 @@ public class QuotaRetriever implements Closeable, 
Iterable {
   private Connection connection;
   private Table table;
 
-  private QuotaRetriever() {
+  /**
+   * Should QutoaRetriever manage the state of the connection, or leave it be.
+   */
+  private boolean isManagedConnection = false;
+
+  QuotaRetriever() {
   }
 
   void init(final Configuration conf, final Scan scan) throws IOException {
-this.connection = ConnectionFactory.createConnection(conf);
+// Set this before creating the connection and passing it down to make sure
+// it's cleaned up if we fail to construct the Scanner.
+this.isManagedConnection = true;
+init(ConnectionFactory.createConnection(conf), scan);
+  }
+
+  void init(final Connection conn, final Scan scan) throws IOException {
+this.connection = Objects.requireNonNull(conn);
 this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME);
 try {
   scanner = table.getScanner(scan);
@@ -79,10 +92,14 @@ public class QuotaRetriever implements Closeable, 
Iterable {
   this.table.close();
   this.table = null;
 }
-if (this.connection != null) {
-  this.connection.close();
-  this.connection = null;
+// Null out the connection on close() even if we didn't explicitly close it
+// to maintain typical semantics.
+if (isManagedConnection) {
+  if (this.connection != null) {
+this.connection.close();
+  }
 }
+this.connection = null;
   }
 
   public QuotaSettings next() throws IOException {
@@ -182,4 +199,4 @@ public class QuotaRetriever implements Closeable, 
Iterable {
 scanner.init(conf, scan);
 return scanner;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a19a95df/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
---

[04/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
new file mode 100644
index 000..47e428c
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.MetricsMaster;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure.Procedure;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Master procedure manager for coordinated cluster-wide WAL roll operation, 
which is run during
+ * backup operation, see {@link MasterProcedureManager} and and {@link 
RegionServerProcedureManager}
+ */
+@InterfaceAudience.Private
+public class LogRollMasterProcedureManager extends MasterProcedureManager {
+
+  public static final String ROLLLOG_PROCEDURE_SIGNATURE = "rolllog-proc";
+  public static final String ROLLLOG_PROCEDURE_NAME = "rolllog";
+  private static final Log LOG = 
LogFactory.getLog(LogRollMasterProcedureManager.class);
+
+  private MasterServices master;
+  private ProcedureCoordinator coordinator;
+  private boolean done;
+
+  @Override
+  public void stop(String why) {
+LOG.info("stop: " + why);
+  }
+
+  @Override
+  public boolean isStopped() {
+return false;
+  }
+
+  @Override
+  public void initialize(MasterServices master, MetricsMaster metricsMaster)
+  throws KeeperException, IOException, UnsupportedOperationException {
+this.master = master;
+this.done = false;
+
+// setup the default procedure coordinator
+String name = master.getServerName().toString();
+ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1);
+BaseCoordinatedStateManager coordManager =
+(BaseCoordinatedStateManager) CoordinatedStateManagerFactory
+.getCoordinatedStateManager(master.getConfiguration());
+coordManager.initialize(master);
+
+ProcedureCoordinatorRpcs comms =
+coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), 
name);
+
+this.coordinator = new ProcedureCoordinator(comms, tpool);
+  }
+
+  @Override
+  public String getProcedureSignature() {
+return ROLLLOG_PROCEDURE_SIGNATURE;
+  }
+
+  @Override
+  public void execProcedure(ProcedureDescription desc) throws IOException {
+if (!isBackupEnabled()) {
+  LOG.warn("Backup is not enabled. Check your " + 
BackupRestoreConstants.BACKUP_ENABLE_KEY
+  + " setting");
+  return;
+}
+this.done = false;
+// start the process on the RS
+ForeignExceptionDispatcher monitor = new 

[47/54] [abbrv] hbase git commit: HBASE-17602 Reduce some quota chore periods/delays

HBASE-17602 Reduce some quota chore periods/delays


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/517eb828
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/517eb828
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/517eb828

Branch: refs/heads/HBASE-16961
Commit: 517eb828507c1ae07f7521fdf95b3e7fc454027a
Parents: a44e50f
Author: Josh Elser 
Authored: Tue Feb 7 11:21:08 2017 -0500
Committer: Josh Elser 
Committed: Mon Mar 20 18:19:31 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java  | 4 ++--
 .../org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/517eb828/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index b9f4592..7f894e4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -55,11 +55,11 @@ public class QuotaObserverChore extends ScheduledChore {
   private static final Log LOG = LogFactory.getLog(QuotaObserverChore.class);
   static final String QUOTA_OBSERVER_CHORE_PERIOD_KEY =
   "hbase.master.quotas.observer.chore.period";
-  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
+  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 1; // 1 
minutes in millis
 
   static final String QUOTA_OBSERVER_CHORE_DELAY_KEY =
   "hbase.master.quotas.observer.chore.delay";
-  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 15L; // 15 
seconds in millis
 
   static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY =
   "hbase.master.quotas.observer.chore.timeunit";

http://git-wip-us.apache.org/repos/asf/hbase/blob/517eb828/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
index e1a2693..8587e79 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
@@ -44,11 +44,11 @@ public class SpaceQuotaRefresherChore extends 
ScheduledChore {
 
   static final String POLICY_REFRESHER_CHORE_PERIOD_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.period";
-  static final int POLICY_REFRESHER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
+  static final int POLICY_REFRESHER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 1; // 1 
minute in millis
 
   static final String POLICY_REFRESHER_CHORE_DELAY_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.delay";
-  static final long POLICY_REFRESHER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final long POLICY_REFRESHER_CHORE_DELAY_DEFAULT = 1000L * 15L; // 15 
seconds in millis
 
   static final String POLICY_REFRESHER_CHORE_TIMEUNIT_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.timeunit";



[07/54] [abbrv] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d0f49d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
new file mode 100644
index 000..c1d5258
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+@InterfaceAudience.Private
+public class BackupAdminImpl implements BackupAdmin {
+  public final static String CHECK_OK = "Checking backup images: OK";
+  public final static String CHECK_FAILED =
+  "Checking backup images: Failed. Some dependencies are missing for 
restore";
+  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
+
+  private final Connection conn;
+
+  public BackupAdminImpl(Connection conn) {
+this.conn = conn;
+  }
+
+  @Override
+  public void close() throws IOException {
+if (conn != null) {
+  conn.close();
+}
+  }
+
+
+  @Override
+  public BackupInfo getBackupInfo(String backupId) throws IOException {
+BackupInfo backupInfo = null;
+try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+  if (backupId == null) {
+ArrayList recentSessions = 
table.getBackupInfos(BackupState.RUNNING);
+if (recentSessions.isEmpty()) {
+  LOG.warn("No ongoing sessions found.");
+  return null;
+}
+// else show status for ongoing session
+// must be one maximum
+return recentSessions.get(0);
+  } else {
+backupInfo = table.readBackupInfo(backupId);
+return backupInfo;
+  }
+}
+  }
+
+  @Override
+  public int deleteBackups(String[] backupIds) throws IOException {
+// TODO: requires Fault tolerance support, failure will leave system
+// in a non-consistent state
+// see HBASE-15227
+int totalDeleted = 0;
+Map> allTablesMap = new HashMap>();
+
+try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
+  for (int i = 0; i < backupIds.length; i++) {
+BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
+if (info != null) {
+  String rootDir = info.getBackupRootDir();
+  HashSet allTables = allTablesMap.get(rootDir);
+  if (allTables == null) {
+allTables = new HashSet();
+allTablesMap.put(rootDir, allTables);
+  }
+  allTables.addAll(info.getTableNames());
+  totalDeleted += deleteBackup(backupIds[i], sysTable);
+}
+  }
+  finalizeDelete(allTablesMap, sysTable);
+}
+return totalDeleted;
+  }
+
+  /*

hbase git commit: HBASE-16014 Get and Put constructor argument lists are divergent

Repository: hbase
Updated Branches:
  refs/heads/master a41b1852d -> 9c8f02e4e


HBASE-16014 Get and Put constructor argument lists are divergent

Signed-off-by: CHIA-PING TSAI 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c8f02e4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c8f02e4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c8f02e4

Branch: refs/heads/master
Commit: 9c8f02e4ef3037e8eaf649360ce83a898c3b20e1
Parents: a41b185
Author: brandboat 
Authored: Sat Mar 18 18:02:42 2017 +0800
Committer: CHIA-PING TSAI 
Committed: Tue Mar 21 09:33:24 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java | 22 
 .../org/apache/hadoop/hbase/client/TestGet.java | 12 +++
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8f02e4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index a581ed5..3771aff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -130,6 +131,27 @@ public class Get extends Query
 }
   }
 
+  /**
+   * Create a Get operation for the specified row.
+   * @param row
+   * @param rowOffset
+   * @param rowLength
+   */
+  public Get(byte[] row, int rowOffset, int rowLength) {
+Mutation.checkRow(row, rowOffset, rowLength);
+this.row = Bytes.copy(row, rowOffset, rowLength);
+  }
+
+  /**
+   * Create a Get operation for the specified row.
+   * @param row
+   */
+  public Get(ByteBuffer row) {
+Mutation.checkRow(row);
+this.row = new byte[row.remaining()];
+row.get(this.row);
+  }
+
   public boolean isCheckExistenceOnly() {
 return checkExistenceOnly;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8f02e4/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
index 810f6bc..6a2bb39 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.lang.reflect.InvocationTargetException;
 import java.util.Arrays;
 import java.util.List;
@@ -241,4 +242,15 @@ public class TestGet {
 assertEquals("my.MockFilter", filters.get(1).getClass().getName());
 assertTrue(filters.get(2) instanceof KeyOnlyFilter);
   }
+
+  @Test
+  public void testGetRowConstructor() {
+byte[] row1 = Bytes.toBytes("testRow");
+byte[] row2 = Bytes.toBytes("testtestRow");
+ByteBuffer rowBuffer = ByteBuffer.allocate(16);
+rowBuffer = ByteBuffer.wrap(row1);
+Get get1 = new Get(rowBuffer);
+Get get2 = new Get(row2, 4, 7);
+Assert.assertArrayEquals(get1.getRow(), get2.getRow());
+  }
 }



hbase git commit: HBASE-17805 We should remove BoundedByteBufferPool because it is replaced by ByteBufferPool

Repository: hbase
Updated Branches:
  refs/heads/master 9c8f02e4e -> 7bb0624ba


HBASE-17805 We should remove BoundedByteBufferPool because it is replaced by 
ByteBufferPool


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bb0624b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bb0624b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bb0624b

Branch: refs/heads/master
Commit: 7bb0624bab68d7dd136d0cd54a8f0c74790aca31
Parents: 9c8f02e
Author: CHIA-PING TSAI 
Authored: Mon Mar 20 09:11:53 2017 +0800
Committer: CHIA-PING TSAI 
Committed: Tue Mar 21 09:38:02 2017 +0800

--
 .../hadoop/hbase/io/BoundedByteBufferPool.java  | 194 ---
 .../hbase/io/TestBoundedByteBufferPool.java | 167 
 2 files changed, 361 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb0624b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
deleted file mode 100644
index 7bce0e5..000
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io;
-
-import java.nio.ByteBuffer;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Like Hadoops' ByteBufferPool only you do not specify desired size when 
getting a ByteBuffer.
- * This pool keeps an upper bound on the count of ByteBuffers in the pool and 
on the maximum size
- * of ByteBuffer that it will retain (Hence the pool is 'bounded' as opposed 
to, say,
- * Hadoop's ElasticByteBuffferPool).
- * If a ByteBuffer is bigger than the configured threshold, we will just let 
the ByteBuffer go
- * rather than add it to the pool. If more ByteBuffers than the configured 
maximum instances,
- * we will not add the passed ByteBuffer to the pool; we will just drop it
- * (we will log a WARN in this case that we are at capacity).
- *
- * The intended use case is a reservoir of bytebuffers that an RPC can 
reuse; buffers tend to
- * achieve a particular 'run' size over time give or take a few extremes. Set 
TRACE level on this
- * class for a couple of seconds to get reporting on how it is running when 
deployed.
- *
- * This pool returns off heap ByteBuffers.
- *
- * This class is thread safe.
- */
-@InterfaceAudience.Private
-public class BoundedByteBufferPool {
-  private static final Log LOG = 
LogFactory.getLog(BoundedByteBufferPool.class);
-
-  private final Queue buffers = new ConcurrentLinkedQueue<>();
-
-  @VisibleForTesting
-  int getQueueSize() {
-return buffers.size();
-  }
-
-  private final int maxToCache;
-
-  // Maximum size of a ByteBuffer to retain in pool
-  private final int maxByteBufferSizeToCache;
-
-  // A running average only it only rises, it never recedes
-  private final AtomicInteger runningAverageRef;
-
-  @VisibleForTesting
-  int getRunningAverage() {
-return runningAverageRef.get();
-  }
-
-  // Count (lower 32bit) and total capacity (upper 32bit) of pooled 
bytebuffers.
-  // Both are non-negative. They are equal to or larger than those of the 
actual
-  // queued buffers in any transition.
-  private final AtomicLong stateRef = new AtomicLong();
-
-  @VisibleForTesting
-  static int toCountOfBuffers(long state) {
-return (int)state;
-  }
-
-  @VisibleForTesting
-  static int toTotalCapacity(long state) {
-return (int)(state >>> 32);
-  }
-
-  @VisibleForTesti

hbase git commit: HBASE-17812 Remove RpcConnection from pool in AbstractRpcClient.cancelConnections

Repository: hbase
Updated Branches:
  refs/heads/master 7bb0624ba -> cc59fe4e9


HBASE-17812 Remove RpcConnection from pool in 
AbstractRpcClient.cancelConnections


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc59fe4e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc59fe4e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc59fe4e

Branch: refs/heads/master
Commit: cc59fe4e91ab0099f65566bc90e77e37f8147119
Parents: 7bb0624
Author: zhangduo 
Authored: Mon Mar 20 21:06:06 2017 +0800
Committer: zhangduo 
Committed: Tue Mar 21 10:07:16 2017 +0800

--
 .../main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc59fe4e/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 4df6786..930f37a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -444,6 +444,7 @@ public abstract class AbstractRpcClient implements RpcC
 && remoteId.address.getHostName().equals(sn.getHostname())) {
   LOG.info("The server on " + sn.toString() + " is dead - stopping the 
connection "
   + connection.remoteId);
+  connections.removeValue(remoteId, connection);
   connection.shutdown();
 }
   }



hbase git commit: HBASE-17059 backport HBASE-17039 (SimpleLoadBalancer schedules large amount of invalid region moves) to 1.3.1

Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 98b5d2cd4 -> 446a21fed


HBASE-17059 backport HBASE-17039 (SimpleLoadBalancer schedules large amount of 
invalid region moves) to 1.3.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/446a21fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/446a21fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/446a21fe

Branch: refs/heads/branch-1.3
Commit: 446a21fedd1282c15939eb4c46d13c859beedd7a
Parents: 98b5d2c
Author: Yu Li 
Authored: Tue Mar 21 14:25:58 2017 +0800
Committer: Yu Li 
Committed: Tue Mar 21 14:27:05 2017 +0800

--
 .../hadoop/hbase/master/balancer/SimpleLoadBalancer.java   | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/446a21fe/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 4325585..a354e40 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -273,14 +273,10 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
 serversByLoad.entrySet()) {
   if (maxToTake == 0) break; // no more to take
   int load = server.getKey().getLoad();
-  if (load >= min && load > 0) {
+  if (load >= min) {
 continue; // look for other servers which haven't reached min
   }
   int regionsToPut = min - load;
-  if (regionsToPut == 0)
-  {
-regionsToPut = 1;
-  }
   maxToTake -= regionsToPut;
   underloadedServers.put(server.getKey().getServerName(), regionsToPut);
 }