hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 506726ed2 -> 6661f2d02


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6661f2d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6661f2d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6661f2d0

Branch: refs/heads/master
Commit: 6661f2d0254f1da9d8cbbd717274421a2ddcb95f
Parents: 506726e
Author: tedyu 
Authored: Wed Aug 26 07:23:43 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 07:23:43 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 116 +--
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  94 ++-
 2 files changed, 125 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6661f2d0/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 661400b..a9dd596 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
@@ -160,82 +160,82 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult = CellComparator.COMPARATOR.compareRows(currentCell, 
nextRowKey, 0,
-nextRowKey.length);
-if ((reversed && compareResult < 0) || (!reversed && compareResult > 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValueUtil.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+return compare;
+  } else {
+return -compare;
+  }
+}
+  });
 }
 
 byte[] nextRow() {
-  if (next < 0 || next == nextRows.size()) return null;
-  return nextRows.get(next++);
+  if (nextRows.isEmpty()) {
+   

[2/4] hbase git commit: HBASE-13212: Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/dc79b3c5/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index eb98b42..412d792 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -16424,6 +16424,26 @@ public final class MasterProtos {
  * required .hbase.pb.NamespaceDescriptor namespaceDescriptor = 
1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder
 getNamespaceDescriptorOrBuilder();
+
+// optional uint64 nonce_group = 2 [default = 0];
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+boolean hasNonceGroup();
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+long getNonceGroup();
+
+// optional uint64 nonce = 3 [default = 0];
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+boolean hasNonce();
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+long getNonce();
   }
   /**
* Protobuf type {@code hbase.pb.CreateNamespaceRequest}
@@ -16489,6 +16509,16 @@ public final class MasterProtos {
   bitField0_ |= 0x0001;
   break;
 }
+case 16: {
+  bitField0_ |= 0x0002;
+  nonceGroup_ = input.readUInt64();
+  break;
+}
+case 24: {
+  bitField0_ |= 0x0004;
+  nonce_ = input.readUInt64();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -16551,8 +16581,42 @@ public final class MasterProtos {
   return namespaceDescriptor_;
 }
 
+// optional uint64 nonce_group = 2 [default = 0];
+public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+private long nonceGroup_;
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public boolean hasNonceGroup() {
+  return ((bitField0_ & 0x0002) == 0x0002);
+}
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public long getNonceGroup() {
+  return nonceGroup_;
+}
+
+// optional uint64 nonce = 3 [default = 0];
+public static final int NONCE_FIELD_NUMBER = 3;
+private long nonce_;
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public boolean hasNonce() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public long getNonce() {
+  return nonce_;
+}
+
 private void initFields() {
   namespaceDescriptor_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+  nonceGroup_ = 0L;
+  nonce_ = 0L;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -16577,6 +16641,12 @@ public final class MasterProtos {
   if (((bitField0_ & 0x0001) == 0x0001)) {
 output.writeMessage(1, namespaceDescriptor_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+output.writeUInt64(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeUInt64(3, nonce_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -16590,6 +16660,14 @@ public final class MasterProtos {
 size += com.google.protobuf.CodedOutputStream
   .computeMessageSize(1, namespaceDescriptor_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(3, nonce_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -16618,6 +16696,16 @@ public final class MasterProtos {
 result = result && getNamespaceDescriptor()
 .equals(other.getNamespaceDescriptor());
   }
+  result = result && (hasNonceGroup() == other.hasNonceGroup());
+  if (hasNonceGroup()) {
+result = result && (getNonceGroup()
+== other.getNonceGroup());
+  }
+  result = result && (hasNonce() == other.hasNonce());
+  if (hasNonce()) {
+result = result && (getNonce()
+== other.getNonce());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   re

[1/4] hbase git commit: HBASE-13212: Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/master 6661f2d02 -> dc79b3c5c


http://git-wip-us.apache.org/repos/asf/hbase/blob/dc79b3c5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
new file mode 100644
index 000..c91092a
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceExistException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * The procedure to create a new namespace.
+ */
+@InterfaceAudience.Private
+public class CreateNamespaceProcedure
+extends StateMachineProcedure
+implements TableProcedureInterface {
+  private static final Log LOG = 
LogFactory.getLog(CreateNamespaceProcedure.class);
+
+  private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+  private NamespaceDescriptor nsDescriptor;
+  private Boolean traceEnabled;
+
+  public CreateNamespaceProcedure() {
+this.traceEnabled = null;
+  }
+
+  public CreateNamespaceProcedure(
+  final MasterProcedureEnv env,
+  final NamespaceDescriptor nsDescriptor) throws IOException {
+this.nsDescriptor = nsDescriptor;
+this.traceEnabled = null;
+  }
+
+  @Override
+  protected Flow executeFromState(final MasterProcedureEnv env, final 
CreateNamespaceState state)
+  throws InterruptedException {
+if (isTraceEnabled()) {
+  LOG.trace(this + " execute state=" + state);
+}
+
+try {
+  switch (state) {
+  case CREATE_NAMESPACE_PREPARE:
+prepareCreate(env);
+setNextState(CreateNamespaceState.CREATE_NAMESPACE_CREATE_DIRECTORY);
+break;
+  case CREATE_NAMESPACE_CREATE_DIRECTORY:
+createDirectory(env, nsDescriptor);
+
setNextState(CreateNamespaceState.CREATE_NAMESPACE_INSERT_INTO_NS_TABLE);
+break;
+  case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
+insertIntoNSTable(env, nsDescriptor);
+setNextState(CreateNamespaceState.CREATE_NAMESPACE_UPDATE_ZK);
+break;
+  case CREATE_NAMESPACE_UPDATE_ZK:
+updateZKNamespaceManager(env, nsDescriptor);
+
setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
+break;
+  case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
+setNamespaceQuota(env, nsDescriptor);
+return Flow.NO_MORE_STATE;
+  default:
+throw new UnsupportedOperationException(this + " unhandled state=" + 
state);
+  }
+} catch (IOException e) {
+  LOG.warn("Error trying to create the namespace" + nsDescriptor.getName()
++ " (in state=" + state + ")", e);
+
+  setFailure("master-create-namespace", e);
+}
+return Flow.HAS_MORE_STATE;
+  }
+
+  @Override
+  protected void rollbackState(final MasterProcedureEnv env, final 
CreateNamespaceState state)
+  throws IOException {
+if (isTraceEnabled()) {
+  LOG.trace(this + " rollback state=" + state);
+}
+try {
+  switch (state) {
+  case 

[4/4] hbase git commit: HBASE-13212: Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
HBASE-13212: Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dc79b3c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dc79b3c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dc79b3c5

Branch: refs/heads/master
Commit: dc79b3c5c91b7bc0c230199fe60eb51324770084
Parents: 6661f2d
Author: Stephen Yuan Jiang 
Authored: Wed Aug 26 08:18:03 2015 -0700
Committer: Stephen Yuan Jiang 
Committed: Wed Aug 26 08:18:03 2015 -0700

--
 .../generated/MasterProcedureProtos.java| 2910 --
 .../hbase/protobuf/generated/MasterProtos.java  |  999 --
 hbase-protocol/src/main/protobuf/Master.proto   |6 +
 .../src/main/protobuf/MasterProcedure.proto |   36 +
 .../apache/hadoop/hbase/ZKNamespaceManager.java |9 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   56 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   14 +-
 .../hadoop/hbase/master/MasterServices.java |   38 +-
 .../hbase/master/TableNamespaceManager.java |  216 +-
 .../procedure/CreateNamespaceProcedure.java |  364 +++
 .../procedure/DeleteNamespaceProcedure.java |  398 +++
 .../procedure/ModifyNamespaceProcedure.java |  281 ++
 .../hadoop/hbase/master/TestCatalogJanitor.java |   28 +-
 .../MasterProcedureTestingUtility.java  |2 -
 .../procedure/TestCreateNamespaceProcedure.java |  292 ++
 .../procedure/TestDeleteNamespaceProcedure.java |  282 ++
 .../procedure/TestModifyNamespaceProcedure.java |  295 ++
 17 files changed, 5657 insertions(+), 569 deletions(-)
--




[3/4] hbase git commit: HBASE-13212: Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/dc79b3c5/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 9bf4c98..d40c1f7 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -499,6 +499,315 @@ public final class MasterProcedureProtos {
   }
 
   /**
+   * Protobuf enum {@code hbase.pb.CreateNamespaceState}
+   */
+  public enum CreateNamespaceState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+CREATE_NAMESPACE_PREPARE(0, 1),
+/**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+CREATE_NAMESPACE_CREATE_DIRECTORY(1, 2),
+/**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+CREATE_NAMESPACE_INSERT_INTO_NS_TABLE(2, 3),
+/**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+CREATE_NAMESPACE_UPDATE_ZK(3, 4),
+/**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+CREATE_NAMESPACE_SET_NAMESPACE_QUOTA(4, 5),
+;
+
+/**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+public static final int CREATE_NAMESPACE_PREPARE_VALUE = 1;
+/**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+public static final int CREATE_NAMESPACE_CREATE_DIRECTORY_VALUE = 2;
+/**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+public static final int CREATE_NAMESPACE_INSERT_INTO_NS_TABLE_VALUE = 3;
+/**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+public static final int CREATE_NAMESPACE_UPDATE_ZK_VALUE = 4;
+/**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+public static final int CREATE_NAMESPACE_SET_NAMESPACE_QUOTA_VALUE = 5;
+
+
+public final int getNumber() { return value; }
+
+public static CreateNamespaceState valueOf(int value) {
+  switch (value) {
+case 1: return CREATE_NAMESPACE_PREPARE;
+case 2: return CREATE_NAMESPACE_CREATE_DIRECTORY;
+case 3: return CREATE_NAMESPACE_INSERT_INTO_NS_TABLE;
+case 4: return CREATE_NAMESPACE_UPDATE_ZK;
+case 5: return CREATE_NAMESPACE_SET_NAMESPACE_QUOTA;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public CreateNamespaceState findValueByNumber(int number) {
+  return CreateNamespaceState.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+}
+
+private static final CreateNamespaceState[] VALUES = values();
+
+public static CreateNamespaceState valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private CreateNamespaceState(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.CreateNamespaceState)
+  }
+
+  /**
+   * Protobuf enum {@code hbase.pb.ModifyNamespaceState}
+   */
+  public enum ModifyNamespaceState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+MODIFY_NAMESPACE_PREPARE(0, 1),
+/**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+MODIFY_NAMESPACE_UPDATE_NS_TABLE(1, 2),
+/**
+ * MODIFY_NAMESPACE_UPDATE_ZK = 3;
+ */
+MODIFY_NAMESPACE_UPDATE_ZK(2, 3),
+;
+
+/**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+public static final int MODIFY_NAMESPACE_PREPARE_VALUE = 1;
+/**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+public static final int MODIFY_NAMESPACE_UPDATE_NS_TABLE_VAL

hbase git commit: HBASE-14078 improve error message when HMaster can't bind to port

2015-08-26 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master dc79b3c5c -> ff86749ca


HBASE-14078 improve error message when HMaster can't bind to port


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff86749c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff86749c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff86749c

Branch: refs/heads/master
Commit: ff86749caeb63eafcf10cbfba45334757a791384
Parents: dc79b3c
Author: stack 
Authored: Wed Aug 26 09:17:33 2015 -0700
Committer: stack 
Committed: Wed Aug 26 09:17:46 2015 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java   | 18 --
 .../hadoop/hbase/regionserver/RSRpcServices.java  | 16 
 2 files changed, 20 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff86749c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 34776d5..7834f25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2306,16 +2306,14 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   Constructor c =
 masterClass.getConstructor(Configuration.class, 
CoordinatedStateManager.class);
   return c.newInstance(conf, cp);
-} catch (InvocationTargetException ite) {
-  Throwable target = ite.getTargetException() != null?
-ite.getTargetException(): ite;
-  if (target.getCause() != null) target = target.getCause();
-  throw new RuntimeException("Failed construction of Master: " +
-masterClass.toString(), target);
-} catch (Exception e) {
-  throw new RuntimeException("Failed construction of Master: " +
-masterClass.toString() + ((e.getCause() != null)?
-  e.getCause().getMessage(): ""), e);
+} catch(Exception e) {
+  Throwable error = e;
+  if (e instanceof InvocationTargetException &&
+  ((InvocationTargetException)e).getTargetException() != null) {
+error = ((InvocationTargetException)e).getTargetException();
+  }
+  throw new RuntimeException("Failed construction of Master: " + 
masterClass.toString() + ". "
+, error);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ff86749c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 65cedee..70ac7a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
@@ -946,10 +947,17 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 String name = rs.getProcessName() + "/" + initialIsa.toString();
 // Set how many times to retry talking to another server over HConnection.
 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
-rpcServer = new RpcServer(rs, name, getServices(),
-  bindAddress, // use final bindAddress for this server.
-  rs.conf,
-  rpcSchedulerFactory.create(rs.conf, this, rs));
+try {
+  rpcServer = new RpcServer(rs, name, getServices(),
+bindAddress, // use final bindAddress for this server.
+rs.conf,
+rpcSchedulerFactory.create(rs.conf, this, rs));
+} catch(BindException be) {
+  String configName = (this instanceof MasterRpcServices) ? 
HConstants.MASTER_PORT :
+HConstants.REGIONSERVER_PORT;
+throw new IOException(be.getMessage() + ". To switch ports use the '" 
+ configName +
+  "' configuration property.", be.getCause() != null ? be.getCause() : 
be);
+}
 
 scannerLeaseTimeoutPeriod = rs.conf.getInt(
   HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,



hbase git commit: HBASE-14078 improve error message when HMaster can't bind to port

2015-08-26 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 268a69f17 -> 180e8b8fd


HBASE-14078 improve error message when HMaster can't bind to port


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/180e8b8f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/180e8b8f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/180e8b8f

Branch: refs/heads/branch-1.2
Commit: 180e8b8fd68f7d6181bbca17183f55bed2fd844f
Parents: 268a69f
Author: stack 
Authored: Wed Aug 26 09:17:33 2015 -0700
Committer: stack 
Committed: Wed Aug 26 09:18:31 2015 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java   | 18 --
 .../hadoop/hbase/regionserver/RSRpcServices.java  | 16 
 2 files changed, 20 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/180e8b8f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 423deaf..8dbc321 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2290,16 +2290,14 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   Constructor c =
 masterClass.getConstructor(Configuration.class, 
CoordinatedStateManager.class);
   return c.newInstance(conf, cp);
-} catch (InvocationTargetException ite) {
-  Throwable target = ite.getTargetException() != null?
-ite.getTargetException(): ite;
-  if (target.getCause() != null) target = target.getCause();
-  throw new RuntimeException("Failed construction of Master: " +
-masterClass.toString(), target);
-} catch (Exception e) {
-  throw new RuntimeException("Failed construction of Master: " +
-masterClass.toString() + ((e.getCause() != null)?
-  e.getCause().getMessage(): ""), e);
+} catch(Exception e) {
+  Throwable error = e;
+  if (e instanceof InvocationTargetException &&
+  ((InvocationTargetException)e).getTargetException() != null) {
+error = ((InvocationTargetException)e).getTargetException();
+  }
+  throw new RuntimeException("Failed construction of Master: " + 
masterClass.toString() + ". "
+, error);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/180e8b8f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 69d2a89..0b939d5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
@@ -864,10 +865,17 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 String name = rs.getProcessName() + "/" + initialIsa.toString();
 // Set how many times to retry talking to another server over HConnection.
 ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
-rpcServer = new RpcServer(rs, name, getServices(),
-  bindAddress, // use final bindAddress for this server.
-  rs.conf,
-  rpcSchedulerFactory.create(rs.conf, this, rs));
+try {
+  rpcServer = new RpcServer(rs, name, getServices(),
+bindAddress, // use final bindAddress for this server.
+rs.conf,
+rpcSchedulerFactory.create(rs.conf, this, rs));
+} catch(BindException be) {
+  String configName = (this instanceof MasterRpcServices) ? 
HConstants.MASTER_PORT :
+HConstants.REGIONSERVER_PORT;
+throw new IOException(be.getMessage() + ". To switch ports use the '" 
+ configName +
+  "' configuration property.", be.getCause() != null ? be.getCause() : 
be);
+}
 
 scannerLeaseTimeoutPeriod = rs.conf.getInt(
   HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,



hbase git commit: HBASE-14310 test-patch.sh should handle spurious non-zero exit code from maven

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master ff86749ca -> aca8c3b74


HBASE-14310 test-patch.sh should handle spurious non-zero exit code from maven


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aca8c3b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aca8c3b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aca8c3b7

Branch: refs/heads/master
Commit: aca8c3b74b09646c72c4e0fe26a4b2103da0d288
Parents: ff86749
Author: tedyu 
Authored: Wed Aug 26 09:37:41 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 09:37:41 2015 -0700

--
 dev-support/test-patch.sh | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aca8c3b7/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 0b534034..33f84c5 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -335,10 +335,13 @@ setup () {
   $MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs 
-DskipTests \
 -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
   if [[ $? != 0 ]] ; then
+echo "mvn exit code was $?"
 ERR=`$GREP -A 5 'Compilation failure' $PATCH_DIR/trunkJavacWarnings.txt`
-echo "Trunk compilation is broken?
-{code}$ERR{code}"
-cleanupAndExit 1
+if [[ ${#ERR} -ge 1 ]] ; then
+  echo "Trunk compilation is broken?
+  {code}$ERR{code}"
+  cleanupAndExit 1
+fi
   fi
   mv target/checkstyle-result.xml $PATCH_DIR/trunkCheckstyle.xml
   collectFindbugsReports trunk $BASEDIR $PATCH_DIR



[2/2] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

2015-08-26 Thread apurtell
HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fade887a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fade887a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fade887a

Branch: refs/heads/hbase-6721
Commit: fade887a309e6b12a4e580c5207794b3b05e9b4e
Parents: 16f65ba
Author: Andrew Purtell 
Authored: Wed Aug 26 13:04:29 2015 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 26 13:04:29 2015 -0700

--
 .../hbase/group/IntegrationTestGroup.java   |5 -
 .../hbase/protobuf/generated/MasterProtos.java  | 1348 ++
 hbase-protocol/src/main/protobuf/Master.proto   |7 -
 .../hadoop/hbase/group/GroupAdminServer.java|   94 +-
 .../hbase/group/GroupBasedLoadBalancer.java |   37 +-
 .../hbase/group/GroupInfoManagerImpl.java   |   32 +-
 .../org/apache/hadoop/hbase/group/MXBean.java   |8 +-
 .../apache/hadoop/hbase/group/MXBeanImpl.java   |   19 +-
 .../hadoop/hbase/master/AssignmentManager.java  |8 +-
 .../hbase/master/MasterCoprocessorHost.java |   43 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |8 +
 .../apache/hadoop/hbase/group/TestGroups.java   |   42 +-
 .../hadoop/hbase/group/TestGroupsBase.java  |8 +-
 .../hbase/group/TestGroupsOfflineMode.java  |  181 +++
 .../security/access/TestAccessController.java   |   50 -
 hbase-shell/src/main/ruby/hbase/admin.rb|   20 -
 hbase-shell/src/main/ruby/hbase/group_admin.rb  |4 +-
 hbase-shell/src/main/ruby/shell.rb  |2 -
 .../commands/list_group_server_transitions.rb   |   44 -
 .../ruby/shell/commands/list_group_tables.rb|   45 -
 20 files changed, 498 insertions(+), 1507 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
index 22cddd7..62f4f8a 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
@@ -74,11 +74,6 @@ public class IntegrationTestGroup extends TestGroupsBase {
 ((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
 LOG.info("Done restoring the cluster");
 
-groupAdmin.addGroup("master");
-groupAdmin.moveServers(
-
Sets.newHashSet(cluster.getInitialClusterStatus().getMaster().getHostPort()),
-"master");
-
 TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
   @Override
   public boolean evaluate() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index beee110..71cc5d4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -61124,1065 +61124,6 @@ public final class MasterProtos {
 // 
@@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfServerResponse)
   }
 
-  public interface ListServersInTransitionRequestOrBuilder
-  extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListServersInTransitionRequest}
-   */
-  public static final class ListServersInTransitionRequest extends
-  com.google.protobuf.GeneratedMessage
-  implements ListServersInTransitionRequestOrBuilder {
-// Use ListServersInTransitionRequest.newBuilder() to construct.
-private 
ListServersInTransitionRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
-  super(builder);
-  this.unknownFields = builder.getUnknownFields();
-}
-private ListServersInTransitionRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-private static final ListServersInTransitionRequest defaultInstance;
-public static ListServersInTransitionRequest getDefaultInstance() {
-  return defaultInstance;
-}
-
-public ListServersInTransitionRequest getDefaultInstanceForType() {
-  return defaultInstance;
-}
-
-private final com.google.protobuf.UnknownFieldSet unknownFields;
-@jav

[1/2] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

2015-08-26 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/hbase-6721 16f65badc -> fade887a3


http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 7a3b01f..05d2fb3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -62,12 +62,15 @@ public class MasterCoprocessorHost
   static class MasterEnvironment extends CoprocessorHost.Environment
   implements MasterCoprocessorEnvironment {
 private MasterServices masterServices;
+final boolean supportGroupCPs;
 
 public MasterEnvironment(final Class implClass, final Coprocessor impl,
 final int priority, final int seq, final Configuration conf,
 final MasterServices services) {
   super(impl, priority, seq, conf);
   this.masterServices = services;
+  supportGroupCPs = !useLegacyMethod(impl.getClass(),
+  "preBalanceGroup", ObserverContext.class, String.class);
 }
 
 public MasterServices getMasterServices() {
@@ -1110,7 +1113,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preMoveServers(ctx, servers, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preMoveServers(ctx, servers, targetGroup);
+}
   }
 });
   }
@@ -1121,7 +1126,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postMoveServers(ctx, servers, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postMoveServers(ctx, servers, targetGroup);
+}
   }
 });
   }
@@ -1132,7 +1139,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preMoveTables(ctx, tables, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preMoveTables(ctx, tables, targetGroup);
+}
   }
 });
   }
@@ -1143,7 +1152,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postMoveTables(ctx, tables, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postMoveTables(ctx, tables, targetGroup);
+}
   }
 });
   }
@@ -1154,7 +1165,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preAddGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preAddGroup(ctx, name);
+}
   }
 });
   }
@@ -1165,7 +1178,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postAddGroup(ctx, name);
+if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postAddGroup(ctx, name);
+}
   }
 });
   }
@@ -1176,7 +1191,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preRemoveGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preRemoveGroup(ctx, name);
+}
   }
 });
   }
@@ -1187,7 +1204,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postRemoveGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postRemoveGroup(ctx, name);
+}
   }
 });
   }
@@ -1198,7 +1217,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preBalanceGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preBalanceGroup(ctx, name);
+}
   }
 });
   }
@@ -1209,7 +1230,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserv

[2/3] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

2015-08-26 Thread apurtell
HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1e6b202
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1e6b202
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1e6b202

Branch: refs/heads/hbase-6721
Commit: f1e6b202e969821d974952af2226bfec56f046bd
Parents: 16f65ba
Author: Andrew Purtell 
Authored: Wed Aug 26 13:04:29 2015 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 26 13:28:22 2015 -0700

--
 .../hbase/group/IntegrationTestGroup.java   |5 -
 .../hbase/protobuf/generated/MasterProtos.java  | 1348 ++
 hbase-protocol/src/main/protobuf/Master.proto   |7 -
 .../hadoop/hbase/group/GroupAdminServer.java|   94 +-
 .../hbase/group/GroupBasedLoadBalancer.java |   37 +-
 .../hbase/group/GroupInfoManagerImpl.java   |   32 +-
 .../org/apache/hadoop/hbase/group/MXBean.java   |8 +-
 .../apache/hadoop/hbase/group/MXBeanImpl.java   |   19 +-
 .../hadoop/hbase/master/AssignmentManager.java  |8 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |8 +
 .../apache/hadoop/hbase/group/TestGroups.java   |   42 +-
 .../hadoop/hbase/group/TestGroupsBase.java  |8 +-
 .../hbase/group/TestGroupsOfflineMode.java  |  181 +++
 .../security/access/TestAccessController.java   |   50 -
 hbase-shell/src/main/ruby/hbase/admin.rb|   20 -
 hbase-shell/src/main/ruby/hbase/group_admin.rb  |4 +-
 hbase-shell/src/main/ruby/shell.rb  |2 -
 .../commands/list_group_server_transitions.rb   |   44 -
 .../ruby/shell/commands/list_group_tables.rb|   45 -
 19 files changed, 465 insertions(+), 1497 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1e6b202/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
index 22cddd7..62f4f8a 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
@@ -74,11 +74,6 @@ public class IntegrationTestGroup extends TestGroupsBase {
 ((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
 LOG.info("Done restoring the cluster");
 
-groupAdmin.addGroup("master");
-groupAdmin.moveServers(
-
Sets.newHashSet(cluster.getInitialClusterStatus().getMaster().getHostPort()),
-"master");
-
 TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
   @Override
   public boolean evaluate() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1e6b202/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index beee110..71cc5d4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -61124,1065 +61124,6 @@ public final class MasterProtos {
 // 
@@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfServerResponse)
   }
 
-  public interface ListServersInTransitionRequestOrBuilder
-  extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListServersInTransitionRequest}
-   */
-  public static final class ListServersInTransitionRequest extends
-  com.google.protobuf.GeneratedMessage
-  implements ListServersInTransitionRequestOrBuilder {
-// Use ListServersInTransitionRequest.newBuilder() to construct.
-private 
ListServersInTransitionRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
-  super(builder);
-  this.unknownFields = builder.getUnknownFields();
-}
-private ListServersInTransitionRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-private static final ListServersInTransitionRequest defaultInstance;
-public static ListServersInTransitionRequest getDefaultInstance() {
-  return defaultInstance;
-}
-
-public ListServersInTransitionRequest getDefaultInstanceForType() {
-  return defaultInstance;
-}
-
-private final com.google.protobuf.UnknownFieldSet unknownFields;
-@java.lang.Override
-public final com.google.protobuf.Unkno

[1/3] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

2015-08-26 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/hbase-6721 fade887a3 -> a4821da1f (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1e6b202/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
index aa7ef1f..673c48e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.group;
 
+import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 import org.apache.commons.logging.Log;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
@@ -46,15 +48,20 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -62,8 +69,6 @@ import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.verify;
 
 @Category({MediumTests.class})
 public class TestGroups extends TestGroupsBase {
@@ -139,12 +144,12 @@ public class TestGroups extends TestGroupsBase {
 TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
   @Override
   public boolean evaluate() throws Exception {
-LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups());
+LOG.info("Waiting for cleanup to finish " + groupAdmin.listGroups());
 //Might be greater since moving servers back to default
 //is after starting a server
 
 return 
groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size()
-   == NUM_SLAVES_BASE;
+== NUM_SLAVES_BASE;
   }
 });
   }
@@ -156,12 +161,37 @@ public class TestGroups extends TestGroupsBase {
 //verify it was loaded properly
 assertEquals("hadoop:name=Group,service=Group", 
it.next().getCanonicalName());
 
-final MXBeanImpl info = MXBeanImpl.init(groupAdmin, master);
+final AtomicReference deadServer = new 
AtomicReference(null);
+
+//We use mocks to simulate offline servers to avoid
+//the complexity and overhead of killing servers
+MasterServices mockMaster = Mockito.mock(MasterServices.class);
+final ServerManager mockServerManager = Mockito.mock(ServerManager.class);
+Mockito.when(mockMaster.getServerManager()).thenReturn(mockServerManager);
+Mockito.when(mockServerManager.getOnlineServersList()).then(new 
Answer>() {
+  @Override
+  public List answer(InvocationOnMock invocation) throws 
Throwable {
+GroupInfo groupInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP);
+List finalList = Lists.newArrayList();
+HostPort lastServer = groupInfo.getServers().last();
+for (ServerName server: 
master.getServerManager().getOnlineServersList()) {
+  if (!server.getHostPort().equals(lastServer)) {
+finalList.add(server);
+  }
+}
+deadServer.set(lastServer);
+return finalList;
+  }
+});
+MXBean info = new MXBeanImpl(groupAdmin, mockMaster);
+
 GroupInfo defaultGroup = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP);
 assertEquals(2, info.getGroups().size());
 assertEquals(defaultGroup.getName(), info.getGroups().get(0).getName());
 assertEquals(defaultGroup.getServers(), 
Sets.newTreeSet(info.getGroups().get(0).getServers()));
-assertEquals(defaultGroup.getServers(), 
Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP)));
+assertEquals(defaultGroup.getServers().headSet(deadServer.get()),
+
Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP)));
+
 
 GroupInfo barGroup = a

[3/3] hbase git commit: HBASE-14232 Backwards compatiblity support for new MasterObserver APIs (Francis Liu)

2015-08-26 Thread apurtell
HBASE-14232 Backwards compatiblity support for new MasterObserver APIs (Francis 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a4821da1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a4821da1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a4821da1

Branch: refs/heads/hbase-6721
Commit: a4821da1f2e50660a9dfee30583772dbe70da3c6
Parents: f1e6b20
Author: Andrew Purtell 
Authored: Wed Aug 26 13:28:44 2015 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 26 13:28:44 2015 -0700

--
 .../hbase/master/MasterCoprocessorHost.java | 43 +++-
 1 file changed, 33 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a4821da1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 7a3b01f..05d2fb3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -62,12 +62,15 @@ public class MasterCoprocessorHost
   static class MasterEnvironment extends CoprocessorHost.Environment
   implements MasterCoprocessorEnvironment {
 private MasterServices masterServices;
+final boolean supportGroupCPs;
 
 public MasterEnvironment(final Class implClass, final Coprocessor impl,
 final int priority, final int seq, final Configuration conf,
 final MasterServices services) {
   super(impl, priority, seq, conf);
   this.masterServices = services;
+  supportGroupCPs = !useLegacyMethod(impl.getClass(),
+  "preBalanceGroup", ObserverContext.class, String.class);
 }
 
 public MasterServices getMasterServices() {
@@ -1110,7 +1113,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preMoveServers(ctx, servers, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preMoveServers(ctx, servers, targetGroup);
+}
   }
 });
   }
@@ -1121,7 +1126,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postMoveServers(ctx, servers, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postMoveServers(ctx, servers, targetGroup);
+}
   }
 });
   }
@@ -1132,7 +1139,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preMoveTables(ctx, tables, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preMoveTables(ctx, tables, targetGroup);
+}
   }
 });
   }
@@ -1143,7 +1152,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postMoveTables(ctx, tables, targetGroup);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postMoveTables(ctx, tables, targetGroup);
+}
   }
 });
   }
@@ -1154,7 +1165,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preAddGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preAddGroup(ctx, name);
+}
   }
 });
   }
@@ -1165,7 +1178,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.postAddGroup(ctx, name);
+if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) {
+  oserver.postAddGroup(ctx, name);
+}
   }
 });
   }
@@ -1176,7 +1191,9 @@ public class MasterCoprocessorHost
   @Override
   public void call(MasterObserver oserver,
   ObserverContext ctx) throws 
IOException {
-oserver.preRemoveGroup(ctx, name);
+if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+  oserver.preRemoveGroup(ctx, name);
+}
   }
 });
   }
@@ -1187,7 +1204,9 @@ public class MasterCoprocessorHost
   @Override
   public void call

hbase git commit: HBASE-14232 Backwards compatiblity support for new MasterObserver APIs (Francis Liu)

2015-08-26 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/hbase-6721-0.98 66e16163f -> 8726a60a5


HBASE-14232 Backwards compatiblity support for new MasterObserver APIs (Francis 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8726a60a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8726a60a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8726a60a

Branch: refs/heads/hbase-6721-0.98
Commit: 8726a60a52728730e46c46f4a6b9992530de2216
Parents: 66e1616
Author: Andrew Purtell 
Authored: Wed Aug 26 13:28:44 2015 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 26 13:41:06 2015 -0700

--
 .../hbase/coprocessor/CoprocessorHost.java  | 74 
 .../hbase/master/MasterCoprocessorHost.java | 47 +
 2 files changed, 109 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8726a60a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index 1c55738..16f6a86 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -30,6 +30,7 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -870,4 +871,77 @@ public abstract class CoprocessorHost {
   "coprocessor set.", e);
 }
   }
+
+  /**
+   * Used to gracefully handle fallback to deprecated methods when we
+   * evolve coprocessor APIs.
+   *
+   * When a particular Coprocessor API is updated to change methods, hosts can 
support fallback
+   * to the deprecated API by using this method to determine if an instance 
implements the new API.
+   * In the event that said support is partial, then in the face of a runtime 
issue that prevents
+   * proper operation {@link #legacyWarning(Class, String)} should be used to 
let operators know.
+   *
+   * For examples of this in action, see the implementation of
+   * 
+   *   {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost}
+   *   {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost}
+   * 
+   *
+   * @param clazz Coprocessor you wish to evaluate
+   * @param methodName the name of the non-deprecated method version
+   * @param parameterTypes the Class of the non-deprecated method's arguments 
in the order they are
+   * declared.
+   */
+  @InterfaceAudience.Private
+  protected static boolean useLegacyMethod(final Class 
clazz,
+  final String methodName, final Class... parameterTypes) {
+boolean useLegacy;
+// Use reflection to see if they implement the non-deprecated version
+try {
+  clazz.getDeclaredMethod(methodName, parameterTypes);
+  LOG.debug("Found an implementation of '" + methodName + "' that uses 
updated method " +
+  "signature. Skipping legacy support for invocations in '" + clazz 
+"'.");
+  useLegacy = false;
+} catch (NoSuchMethodException exception) {
+  useLegacy = true;
+} catch (SecurityException exception) {
+  LOG.warn("The Security Manager denied our attempt to detect if the 
coprocessor '" + clazz +
+  "' requires legacy support; assuming it does. If you get later 
errors about legacy " +
+  "coprocessor use, consider updating your security policy to allow 
access to the package" +
+  " and declared members of your implementation.");
+  LOG.debug("Details of Security Manager rejection.", exception);
+  useLegacy = true;
+}
+return useLegacy;
+  }
+
+  /**
+   * Used to limit legacy handling to once per Coprocessor class per 
classloader.
+   */
+  private static final Set> legacyWarning =
+  new ConcurrentSkipListSet>(
+  new Comparator>() {
+@Override
+public int compare(Class c1, Class c2) {
+  if (c1.equals(c2)) {
+return 0;
+  }
+  return c1.getName().compareTo(c2.getName());
+}
+  });
+
+  /**
+   * limits the amount of logging to once per coprocessor class.
+   * Used in concert with {@link #useLegacyMethod(Class, String, Class[])} 
when a runtime issue
+   * prevents properly supporting the legacy version of a coprocessor API.
+   * Since coprocessors can be in tight loops this serves to limit the amount 
of log spam we create.
+   */
+  @InterfaceAudien

hbase git commit: HBASE-14313 After a Connection sees ConnectionClosingException on a connection it never recovers

2015-08-26 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 ae5439051 -> 0e4aabf99


HBASE-14313 After a Connection sees ConnectionClosingException on a connection 
it never recovers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e4aabf9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e4aabf9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e4aabf9

Branch: refs/heads/branch-1.1
Commit: 0e4aabf99b341e8bfa6cd55ca4182e3e1dbd42cc
Parents: ae54390
Author: Elliott Clark 
Authored: Tue Aug 25 18:39:31 2015 -0700
Committer: Elliott Clark 
Committed: Wed Aug 26 13:44:49 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e4aabf9/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 4b96e31..a2d7172 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -935,7 +935,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 } catch (IOException e) {
   // We set the value inside the synchronized block, this way the next 
in line
   //  won't even try to write
-  shouldCloseConnection.set(true);
+  markClosed(e);
   writeException = e;
   interrupt();
 }



hbase git commit: HBASE-14313 After a Connection sees ConnectionClosingException on a connection it never recovers

2015-08-26 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 72b7f475c -> ea018af2e


HBASE-14313 After a Connection sees ConnectionClosingException on a connection 
it never recovers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea018af2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea018af2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea018af2

Branch: refs/heads/branch-1.0
Commit: ea018af2ea1737291916240d054c5c7871bb57c0
Parents: 72b7f47
Author: Elliott Clark 
Authored: Tue Aug 25 18:39:31 2015 -0700
Committer: Elliott Clark 
Committed: Wed Aug 26 13:46:11 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea018af2/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index f4fd844..d399341 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -933,7 +933,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 } catch (IOException e) {
   // We set the value inside the synchronized block, this way the next 
in line
   //  won't even try to write
-  shouldCloseConnection.set(true);
+  markClosed(e);
   writeException = e;
   interrupt();
 }



hbase git commit: HBASE-14313 After a Connection sees ConnectionClosingException on a connection it never recovers

2015-08-26 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/master aca8c3b74 -> 56890d9fe


HBASE-14313 After a Connection sees ConnectionClosingException on a connection 
it never recovers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56890d9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56890d9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56890d9f

Branch: refs/heads/master
Commit: 56890d9fe148dd192520fab349a66aa3f688e232
Parents: aca8c3b
Author: Elliott Clark 
Authored: Tue Aug 25 18:39:31 2015 -0700
Committer: Elliott Clark 
Committed: Wed Aug 26 13:44:12 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56890d9f/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 5ece8ae..6647615 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -939,7 +939,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 } catch (IOException e) {
   // We set the value inside the synchronized block, this way the next 
in line
   //  won't even try to write
-  shouldCloseConnection.set(true);
+  markClosed(e);
   writeException = e;
   interrupt();
 }



hbase git commit: HBASE-14313 After a Connection sees ConnectionClosingException on a connection it never recovers

2015-08-26 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 180e8b8fd -> 0a1f0cd66


HBASE-14313 After a Connection sees ConnectionClosingException on a connection 
it never recovers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a1f0cd6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a1f0cd6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a1f0cd6

Branch: refs/heads/branch-1.2
Commit: 0a1f0cd66a0f090782726246d44d7e9c611abc68
Parents: 180e8b8
Author: Elliott Clark 
Authored: Tue Aug 25 18:39:31 2015 -0700
Committer: Elliott Clark 
Committed: Wed Aug 26 13:44:38 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0a1f0cd6/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 5ece8ae..6647615 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -939,7 +939,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 } catch (IOException e) {
   // We set the value inside the synchronized block, this way the next 
in line
   //  won't even try to write
-  shouldCloseConnection.set(true);
+  markClosed(e);
   writeException = e;
   interrupt();
 }



[3/4] hbase git commit: HBASE-13212 Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b96cf7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 9bf4c98..d40c1f7 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -499,6 +499,315 @@ public final class MasterProcedureProtos {
   }
 
   /**
+   * Protobuf enum {@code hbase.pb.CreateNamespaceState}
+   */
+  public enum CreateNamespaceState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+CREATE_NAMESPACE_PREPARE(0, 1),
+/**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+CREATE_NAMESPACE_CREATE_DIRECTORY(1, 2),
+/**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+CREATE_NAMESPACE_INSERT_INTO_NS_TABLE(2, 3),
+/**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+CREATE_NAMESPACE_UPDATE_ZK(3, 4),
+/**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+CREATE_NAMESPACE_SET_NAMESPACE_QUOTA(4, 5),
+;
+
+/**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+public static final int CREATE_NAMESPACE_PREPARE_VALUE = 1;
+/**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+public static final int CREATE_NAMESPACE_CREATE_DIRECTORY_VALUE = 2;
+/**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+public static final int CREATE_NAMESPACE_INSERT_INTO_NS_TABLE_VALUE = 3;
+/**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+public static final int CREATE_NAMESPACE_UPDATE_ZK_VALUE = 4;
+/**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+public static final int CREATE_NAMESPACE_SET_NAMESPACE_QUOTA_VALUE = 5;
+
+
+public final int getNumber() { return value; }
+
+public static CreateNamespaceState valueOf(int value) {
+  switch (value) {
+case 1: return CREATE_NAMESPACE_PREPARE;
+case 2: return CREATE_NAMESPACE_CREATE_DIRECTORY;
+case 3: return CREATE_NAMESPACE_INSERT_INTO_NS_TABLE;
+case 4: return CREATE_NAMESPACE_UPDATE_ZK;
+case 5: return CREATE_NAMESPACE_SET_NAMESPACE_QUOTA;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public CreateNamespaceState findValueByNumber(int number) {
+  return CreateNamespaceState.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+}
+
+private static final CreateNamespaceState[] VALUES = values();
+
+public static CreateNamespaceState valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private CreateNamespaceState(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.CreateNamespaceState)
+  }
+
+  /**
+   * Protobuf enum {@code hbase.pb.ModifyNamespaceState}
+   */
+  public enum ModifyNamespaceState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+MODIFY_NAMESPACE_PREPARE(0, 1),
+/**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+MODIFY_NAMESPACE_UPDATE_NS_TABLE(1, 2),
+/**
+ * MODIFY_NAMESPACE_UPDATE_ZK = 3;
+ */
+MODIFY_NAMESPACE_UPDATE_ZK(2, 3),
+;
+
+/**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+public static final int MODIFY_NAMESPACE_PREPARE_VALUE = 1;
+/**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+public static final int MODIFY_NAMESPACE_UPDATE_NS_TABLE_VAL

[2/4] hbase git commit: HBASE-13212 Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b96cf7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index f1420b0..4fbded7 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -16232,6 +16232,26 @@ public final class MasterProtos {
  * required .hbase.pb.NamespaceDescriptor namespaceDescriptor = 
1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder
 getNamespaceDescriptorOrBuilder();
+
+// optional uint64 nonce_group = 2 [default = 0];
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+boolean hasNonceGroup();
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+long getNonceGroup();
+
+// optional uint64 nonce = 3 [default = 0];
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+boolean hasNonce();
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+long getNonce();
   }
   /**
* Protobuf type {@code hbase.pb.CreateNamespaceRequest}
@@ -16297,6 +16317,16 @@ public final class MasterProtos {
   bitField0_ |= 0x0001;
   break;
 }
+case 16: {
+  bitField0_ |= 0x0002;
+  nonceGroup_ = input.readUInt64();
+  break;
+}
+case 24: {
+  bitField0_ |= 0x0004;
+  nonce_ = input.readUInt64();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -16359,8 +16389,42 @@ public final class MasterProtos {
   return namespaceDescriptor_;
 }
 
+// optional uint64 nonce_group = 2 [default = 0];
+public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+private long nonceGroup_;
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public boolean hasNonceGroup() {
+  return ((bitField0_ & 0x0002) == 0x0002);
+}
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public long getNonceGroup() {
+  return nonceGroup_;
+}
+
+// optional uint64 nonce = 3 [default = 0];
+public static final int NONCE_FIELD_NUMBER = 3;
+private long nonce_;
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public boolean hasNonce() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public long getNonce() {
+  return nonce_;
+}
+
 private void initFields() {
   namespaceDescriptor_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+  nonceGroup_ = 0L;
+  nonce_ = 0L;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -16385,6 +16449,12 @@ public final class MasterProtos {
   if (((bitField0_ & 0x0001) == 0x0001)) {
 output.writeMessage(1, namespaceDescriptor_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+output.writeUInt64(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeUInt64(3, nonce_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -16398,6 +16468,14 @@ public final class MasterProtos {
 size += com.google.protobuf.CodedOutputStream
   .computeMessageSize(1, namespaceDescriptor_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(3, nonce_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -16426,6 +16504,16 @@ public final class MasterProtos {
 result = result && getNamespaceDescriptor()
 .equals(other.getNamespaceDescriptor());
   }
+  result = result && (hasNonceGroup() == other.hasNonceGroup());
+  if (hasNonceGroup()) {
+result = result && (getNonceGroup()
+== other.getNonceGroup());
+  }
+  result = result && (hasNonce() == other.hasNonce());
+  if (hasNonce()) {
+result = result && (getNonce()
+== other.getNonce());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   re

[4/4] hbase git commit: HBASE-13212 Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
HBASE-13212 Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4b96cf7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4b96cf7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4b96cf7

Branch: refs/heads/branch-1
Commit: e4b96cf78a4e7b15a1bc46a4f060f65065732786
Parents: 9b55f1c
Author: Stephen Yuan Jiang 
Authored: Wed Aug 26 13:54:50 2015 -0700
Committer: Stephen Yuan Jiang 
Committed: Wed Aug 26 13:54:50 2015 -0700

--
 .../generated/MasterProcedureProtos.java| 2910 --
 .../hbase/protobuf/generated/MasterProtos.java  |  989 --
 hbase-protocol/src/main/protobuf/Master.proto   |6 +
 .../src/main/protobuf/MasterProcedure.proto |   36 +
 .../apache/hadoop/hbase/ZKNamespaceManager.java |9 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   56 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   14 +-
 .../hadoop/hbase/master/MasterServices.java |   38 +-
 .../hbase/master/TableNamespaceManager.java |  232 +-
 .../procedure/CreateNamespaceProcedure.java |  364 +++
 .../procedure/DeleteNamespaceProcedure.java |  398 +++
 .../procedure/ModifyNamespaceProcedure.java |  281 ++
 .../hadoop/hbase/master/TestCatalogJanitor.java |   28 +-
 .../MasterProcedureTestingUtility.java  |2 -
 .../procedure/TestCreateNamespaceProcedure.java |  291 ++
 .../procedure/TestDeleteNamespaceProcedure.java |  281 ++
 .../procedure/TestModifyNamespaceProcedure.java |  294 ++
 17 files changed, 5655 insertions(+), 574 deletions(-)
--




[1/4] hbase git commit: HBASE-13212 Procedure V2 - master Create/Modify/Delete namespace (Stephen Yuan Jiang)

2015-08-26 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9b55f1cd2 -> e4b96cf78


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b96cf7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
new file mode 100644
index 000..c91092a
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceExistException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * The procedure to create a new namespace.
+ */
+@InterfaceAudience.Private
+public class CreateNamespaceProcedure
+extends StateMachineProcedure
+implements TableProcedureInterface {
+  private static final Log LOG = 
LogFactory.getLog(CreateNamespaceProcedure.class);
+
+  private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+  private NamespaceDescriptor nsDescriptor;
+  private Boolean traceEnabled;
+
+  public CreateNamespaceProcedure() {
+this.traceEnabled = null;
+  }
+
+  public CreateNamespaceProcedure(
+  final MasterProcedureEnv env,
+  final NamespaceDescriptor nsDescriptor) throws IOException {
+this.nsDescriptor = nsDescriptor;
+this.traceEnabled = null;
+  }
+
+  @Override
+  protected Flow executeFromState(final MasterProcedureEnv env, final 
CreateNamespaceState state)
+  throws InterruptedException {
+if (isTraceEnabled()) {
+  LOG.trace(this + " execute state=" + state);
+}
+
+try {
+  switch (state) {
+  case CREATE_NAMESPACE_PREPARE:
+prepareCreate(env);
+setNextState(CreateNamespaceState.CREATE_NAMESPACE_CREATE_DIRECTORY);
+break;
+  case CREATE_NAMESPACE_CREATE_DIRECTORY:
+createDirectory(env, nsDescriptor);
+
setNextState(CreateNamespaceState.CREATE_NAMESPACE_INSERT_INTO_NS_TABLE);
+break;
+  case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
+insertIntoNSTable(env, nsDescriptor);
+setNextState(CreateNamespaceState.CREATE_NAMESPACE_UPDATE_ZK);
+break;
+  case CREATE_NAMESPACE_UPDATE_ZK:
+updateZKNamespaceManager(env, nsDescriptor);
+
setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
+break;
+  case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
+setNamespaceQuota(env, nsDescriptor);
+return Flow.NO_MORE_STATE;
+  default:
+throw new UnsupportedOperationException(this + " unhandled state=" + 
state);
+  }
+} catch (IOException e) {
+  LOG.warn("Error trying to create the namespace" + nsDescriptor.getName()
++ " (in state=" + state + ")", e);
+
+  setFailure("master-create-namespace", e);
+}
+return Flow.HAS_MORE_STATE;
+  }
+
+  @Override
+  protected void rollbackState(final MasterProcedureEnv env, final 
CreateNamespaceState state)
+  throws IOException {
+if (isTraceEnabled()) {
+  LOG.trace(this + " rollback state=" + state);
+}
+try {
+  switch (state) {
+  cas

hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e4b96cf78 -> b8c7a08a9


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8c7a08a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8c7a08a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8c7a08a

Branch: refs/heads/branch-1
Commit: b8c7a08a9ed9a8c99e0de31512966065c4050e99
Parents: e4b96cf
Author: tedyu 
Authored: Wed Aug 26 20:49:43 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 20:49:43 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 120 +--
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  97 ++-
 2 files changed, 129 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8c7a08a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index f112b2e..1f125e5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -94,7 +94,7 @@ public class FuzzyRowFilter extends FilterBase {
 
   private void preprocessSearchKey(Pair p) {
 if (UnsafeAccess.isAvailable() == false) {
-   return;
+  return;
 }
 byte[] key = p.getFirst();
 byte[] mask = p.getSecond();
@@ -157,83 +157,83 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult =
-Bytes.compareTo(nextRowKey, 0, nextRowKey.length, 
currentCell.getRowArray(),
-  currentCell.getRowOffset(), currentCell.getRowLength());
-if ((reversed && compareResult > 0) || (!reversed && compareResult < 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValueUtil.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+

hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 0a1f0cd66 -> 430f31ff4


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/430f31ff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/430f31ff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/430f31ff

Branch: refs/heads/branch-1.2
Commit: 430f31ff4e6a45562a91e997adbfe2c95f8df4a7
Parents: 0a1f0cd
Author: tedyu 
Authored: Wed Aug 26 20:53:24 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 20:53:24 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 120 +--
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  97 ++-
 2 files changed, 129 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/430f31ff/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index f112b2e..1f125e5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -94,7 +94,7 @@ public class FuzzyRowFilter extends FilterBase {
 
   private void preprocessSearchKey(Pair p) {
 if (UnsafeAccess.isAvailable() == false) {
-   return;
+  return;
 }
 byte[] key = p.getFirst();
 byte[] mask = p.getSecond();
@@ -157,83 +157,83 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult =
-Bytes.compareTo(nextRowKey, 0, nextRowKey.length, 
currentCell.getRowArray(),
-  currentCell.getRowOffset(), currentCell.getRowLength());
-if ((reversed && compareResult > 0) || (!reversed && compareResult < 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValueUtil.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+

hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 0e4aabf99 -> 4b5c4e44b


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4b5c4e44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4b5c4e44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4b5c4e44

Branch: refs/heads/branch-1.1
Commit: 4b5c4e44b2367d49a71f872828bfc0bf0e8c43db
Parents: 0e4aabf
Author: tedyu 
Authored: Wed Aug 26 20:54:26 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 20:54:26 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 120 +--
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  97 ++-
 2 files changed, 129 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4b5c4e44/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index f112b2e..1f125e5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -94,7 +94,7 @@ public class FuzzyRowFilter extends FilterBase {
 
   private void preprocessSearchKey(Pair p) {
 if (UnsafeAccess.isAvailable() == false) {
-   return;
+  return;
 }
 byte[] key = p.getFirst();
 byte[] mask = p.getSecond();
@@ -157,83 +157,83 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult =
-Bytes.compareTo(nextRowKey, 0, nextRowKey.length, 
currentCell.getRowArray(),
-  currentCell.getRowOffset(), currentCell.getRowLength());
-if ((reversed && compareResult > 0) || (!reversed && compareResult < 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValueUtil.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+

hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 ea018af2e -> e7c7570d5


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e7c7570d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e7c7570d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e7c7570d

Branch: refs/heads/branch-1.0
Commit: e7c7570d5adf1b094d7fc7339e30291f75366b54
Parents: ea018af
Author: tedyu 
Authored: Wed Aug 26 20:55:14 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 20:55:14 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 120 +--
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  97 ++-
 2 files changed, 129 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7c7570d/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index f112b2e..1f125e5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -94,7 +94,7 @@ public class FuzzyRowFilter extends FilterBase {
 
   private void preprocessSearchKey(Pair p) {
 if (UnsafeAccess.isAvailable() == false) {
-   return;
+  return;
 }
 byte[] key = p.getFirst();
 byte[] mask = p.getSecond();
@@ -157,83 +157,83 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult =
-Bytes.compareTo(nextRowKey, 0, nextRowKey.length, 
currentCell.getRowArray(),
-  currentCell.getRowOffset(), currentCell.getRowLength());
-if ((reversed && compareResult > 0) || (!reversed && compareResult < 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValueUtil.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+

svn commit: r10313 - in /dev/hbase/hbase-1.1.2RC2: ./ hbase-1.1.2-bin.tar.gz hbase-1.1.2-bin.tar.gz.asc hbase-1.1.2-bin.tar.gz.mds hbase-1.1.2-src.tar.gz hbase-1.1.2-src.tar.gz.asc hbase-1.1.2-src.tar

2015-08-26 Thread ndimiduk
Author: ndimiduk
Date: Thu Aug 27 03:57:20 2015
New Revision: 10313

Log:
HBase-1.1.2 RC2 artifacts

Added:
dev/hbase/hbase-1.1.2RC2/
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz   (with props)
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.asc
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.mds
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz   (with props)
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.asc
dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.mds

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.asc
==
--- dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.asc Thu Aug 27 03:57:20 2015
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJV3ogmAAoJEK2QOQccNIm9EdcP/A8FaTKrVhOF6hua4jxXbKww
+G8oObYexDttnm18G9JAcGRgxtR7UWvztyMr8nMwhM9igJur2MWWegG5GU+rtRjMu
+9qONtvUR+9w6bqb0niqdIgMcB1LRa9Sa1fN7OVodvJYHlrGEKWYyy9igKgvIGYT1
+fMmO8hRjwelKKISzpYtOKYPeBaZhu60JRzcv3Tc0NGJfgLe5TSWCFflw6kNH7pWP
++KF7vAT55E+buEfyRC8R4pJwYk67HWb4nN5UKHhJkbUm8p7OXF2JtDrUugCu915B
+Kh67K96+aOk6no8LK2qSN7HG7ViAU9ReQxtxiD32kiF2dehlUoDXgzC4kvoABg5y
+TF06rB/ePQEZw2TWseztCFJlf0gWjvLXEJ47qZppjlqGoSH1GKM3A9PIh9hxfkct
+4yEfI/YnGohl1MF0dINWnk5TcZHLghKGEJZtspotNcYx7ZNctB/9Np7ZYIBlYLWt
+1HC+BcPWo9dL8plnA4+03ij5CwQsukbmwV1yiv4jNSAmr1WviXq+lN33AGGenrGZ
+6oCVsqzXCYEdP1C9rsAVSOA4notMBhnlNQxjcs4PazwZzCwpAJNd4DjumF+yG20T
+Ayp3j36m1iAMfJK3iMParrdjjVfqpPvhuJQ8lG2gCmRkHj1EKbkozyCMr+4xoLAM
+gaDxiZHJWCBjU64CnkCk
+=O4X/
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.mds
==
--- dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-bin.tar.gz.mds Thu Aug 27 03:57:20 2015
@@ -0,0 +1,17 @@
+hbase-1.1.2-bin.tar.gz:MD5 = 9F 8C A7 EC 80 6F CB BA  33 AC F1 C5 61 5B 44
+ 15
+hbase-1.1.2-bin.tar.gz:   SHA1 = DC62 A7BB 102C B5C7 096E  74FE 8826 99A2 DDDE
+ A8A4
+hbase-1.1.2-bin.tar.gz: RMD160 = 1DB0 167A E9C3 10F9 98EB  C165 E6C2 522C 1C12
+ 1A3D
+hbase-1.1.2-bin.tar.gz: SHA224 = 4E553467 50D54477 1A08CF9C E4FB8D60 255A8997
+ 7A60CDC0 E778B337
+hbase-1.1.2-bin.tar.gz: SHA256 = 8CA5BF02 03CEF86B 4A0ACBBA 89AFCD59 77488EBC
+ 73EEC097 E93C592B 16F8BEDE
+hbase-1.1.2-bin.tar.gz: SHA384 = 5B0C4354 68AA74CC 57286F6F 13093E86 E304FB5E
+ 9D3D21DE A6C66525 BA6F825D 060F338C D6255F72
+ 8E719CC1 B1FA366B
+hbase-1.1.2-bin.tar.gz: SHA512 = 1348B258 A19EFA6B 0D3832F4 D2457750 CD70B5DF
+ F17DC472 20B46CDE BE720B79 34F4C2EA 1B75B701
+ 2ED90F55 2D4D56F7 0EA125B0 A1CADC9B CD2C17E1
+ D528A6D4

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.asc
==
--- dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.asc (added)
+++ dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.asc Thu Aug 27 03:57:20 2015
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIcBAABCgAGBQJV3ogrAAoJEK2QOQccNIm9dVgP/3jItpvuhg39ZBTlR9cQoa4y
+7rgqfFXOKAzoHgsbyXhVi6PiWLEiejzfkJf3lbYbudBaqd45/DJz4jFtEfWBuUxL
+3qxBGFn0ybIESCWlWJM2CKbWtp/Ge+3abmodsWhkkqnc1xVCFC9iOVQ5p5NLwMTc
+Zi0vPR7oi1ubkHcqtG4jh6lqlNw3vL0k5LUsaOl3G6mlUSHXG+qZzb+ahRqNZd/8
+72OjlzfUko8bldfEX6cugGdVkwKz+gB9jeWFQYcg30EafXjmMzpzuoPBeIA5WQg4
+mT2oHkmyUS2HlDDxDUaOlieR3kvLW3xV4ksofyDNv6Rap83/N3Quv6P3ZRFC4FrO
+FrZzqNc9iN3LceIEI0GJvGUV6lrv7fDnMNAB8mYbw/fv+kc7itkNZjXkQpa+z1Vf
+cXywULlUOJoVp8surW++C7TRIJ7+qkzjmg4ee6g4euJnkDOUoBddBUBIIZneI+24
+uOlRda06X9nIn3l6JtgTJM2fAEVEVX04zwoapyvgIjhq2/dj76J+krUMZsb7Dn09
+tJKqtgRvud9W6A1vqBUkZOPcEWMnAYCNImxxmxU//i+h+ZdgP1TIvNOWzRwvjDJ6
+R6SfWZNqRN+psMnPqG2+1kk2Gma3QPG+7rpTfO6f2AeGoRFa81nPfdN4MAgZAm8C
+opKRJKhZBy02B7IhVIzz
+=Jepv
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.1.2RC2/hbase-1.1.2-src.tar.gz.mds
==
--- d

Git Push Summary

2015-08-26 Thread ndimiduk
Repository: hbase
Updated Tags:  refs/tags/1.1.2RC2 [created] b54078ddc


hbase git commit: HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist (hongbin ma)

2015-08-26 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/0.98 4e4aabb93 -> 3fe903cb7


HBASE-14269 FuzzyRowFilter omits certain rows when multiple fuzzy keys exist 
(hongbin ma)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3fe903cb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3fe903cb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3fe903cb

Branch: refs/heads/0.98
Commit: 3fe903cb77c09eb6905dc235b7e7109e3135bbc6
Parents: 4e4aabb
Author: tedyu 
Authored: Wed Aug 26 21:02:51 2015 -0700
Committer: tedyu 
Committed: Wed Aug 26 21:02:51 2015 -0700

--
 .../hadoop/hbase/filter/FuzzyRowFilter.java | 118 +--
 .../hadoop/hbase/filter/TestFuzzyRowFilter.java |   1 -
 .../filter/TestFuzzyRowFilterEndToEnd.java  |  94 ++-
 3 files changed, 126 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3fe903cb/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 7a3f1c7..d6ad3a0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.filter;
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -157,83 +157,83 @@ public class FuzzyRowFilter extends FilterBase {
 
   @Override
   public Cell getNextCellHint(Cell currentCell) {
-boolean result = true;
-if (tracker.needsUpdate()) {
-  result = tracker.updateTracker(currentCell);
-}
+boolean result = tracker.updateTracker(currentCell);
 if (result == false) {
   done = true;
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-// We need to compare nextRowKey with currentCell
-int compareResult =
-Bytes.compareTo(nextRowKey, 0, nextRowKey.length, 
currentCell.getRowArray(),
-  currentCell.getRowOffset(), currentCell.getRowLength());
-if ((reversed && compareResult > 0) || (!reversed && compareResult < 0)) {
-  // This can happen when we have multilpe filters and some other filter
-  // returns next row with hint which is larger (smaller for reverse)
-  // than the current (really?)
-  result = tracker.updateTracker(currentCell);
-  if (result == false) {
-done = true;
-return null;
-  } else {
-nextRowKey = tracker.nextRow();
-  }
-}
 return KeyValue.createFirstOnRow(nextRowKey);
   }
 
   /**
-   * If we have multiple fuzzy keys, row tracker should improve overall 
performance It calculates
-   * all next rows (one per every fuzzy key), sort them accordingly (ascending 
for regular and
-   * descending for reverse). Next time getNextCellHint is called we check row 
tracker first and
-   * return next row from the tracker if it exists, if there are no rows in 
the tracker we update
-   * tracker with a current cell and return first row.
+   * If we have multiple fuzzy keys, row tracker should improve overall 
performance. It calculates
+   * all next rows (one per every fuzzy key) and put them (the fuzzy key is 
bundled) into a priority
+   * queue so that the smallest row key always appears at queue head, which 
helps to decide the
+   * "Next Cell Hint". As scanning going on, the number of candidate rows in 
the RowTracker will
+   * remain the size of fuzzy keys until some of the fuzzy keys won't possibly 
have matches any
+   * more.
*/
   private class RowTracker {
-private final List nextRows;
-private int next = -1;
+private final PriorityQueue>> nextRows;
+private boolean initialized = false;
 
 RowTracker() {
-  nextRows = new ArrayList();
-}
-
-boolean needsUpdate() {
-  return next == -1 || next == nextRows.size();
+  nextRows =
+  new PriorityQueue>>(fuzzyKeysData.size(),
+  new Comparator>>() {
+@Override
+public int compare(Pair> o1,
+Pair> o2) {
+  int compare = Bytes.compareTo(o1.getFirst(), o2.getFirst());
+  if (!isReversed()) {
+return compare;
+  } else {
+return -compare;
+  }
+}
+  });
 }
 
 byte[] nextRow() {
-  if (next < 0 || next == nextR