This is an automated email from the ASF dual-hosted git repository.

ndimiduk pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 8a9e083  HBASE-26622 Update error-prone to 2.10
8a9e083 is described below

commit 8a9e083994c1ab5211e70bb22b38399a41a72763
Author: Mike Drob <[email protected]>
AuthorDate: Thu Dec 23 11:00:54 2021 -0800

    HBASE-26622 Update error-prone to 2.10
    
    Author:    Mike Drob <[email protected]>
    Co-authored-by: Nick Dimiduk <[email protected]>
    Signed-off-by: Andrew Purtell <[email protected]>
---
 .../hadoop/hbase/io/asyncfs/ProtobufDecoder.java   |  8 ++------
 .../java/org/apache/hadoop/hbase/KeyValue.java     | 17 ++++++++-------
 .../hbase/coprocessor/AggregateImplementation.java | 10 ++++-----
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  2 +-
 .../hbase/regionserver/RegionCoprocessorHost.java  |  1 +
 .../hadoop/hbase/security/token/TokenProvider.java |  2 +-
 .../client/TestPutDeleteEtcCellIteration.java      | 12 +++++------
 .../hadoop/hbase/codec/CodecPerformance.java       | 11 ++--------
 .../regionserver/TestMergesSplitsAddToTracker.java | 24 ++++++++--------------
 .../hbase/thrift/ThriftHBaseServiceHandler.java    |  1 -
 pom.xml                                            |  2 +-
 11 files changed, 35 insertions(+), 55 deletions(-)

diff --git 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java
 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java
index d6e68f3..3be9a2e 100644
--- 
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java
+++ 
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java
@@ -132,17 +132,13 @@ public class ProtobufDecoder extends 
MessageToMessageDecoder<ByteBuf> {
     try {
       getParserForTypeMethod = 
protobufMessageLiteClass.getDeclaredMethod("getParserForType");
       newBuilderForTypeMethod = 
protobufMessageLiteClass.getDeclaredMethod("newBuilderForType");
+      // TODO: If this is false then the class will fail to load? Can refactor 
it out?
+      hasParser = true;
     } catch (NoSuchMethodException e) {
       // If the method is not found, we are in trouble. Abort.
       throw new RuntimeException(e);
     }
 
-    try {
-      protobufMessageLiteClass.getDeclaredMethod("getParserForType");
-      hasParser = true;
-    } catch (Throwable var2) {
-    }
-
     HAS_PARSER = hasParser;
   }
 }
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f4046e4..c05d0be 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1102,10 +1102,10 @@ public class KeyValue implements ExtendedCell, 
Cloneable {
    */
   @Override
   public KeyValue clone() throws CloneNotSupportedException {
-    super.clone();
-    byte [] b = new byte[this.length];
-    System.arraycopy(this.bytes, this.offset, b, 0, this.length);
-    KeyValue ret = new KeyValue(b, 0, b.length);
+    KeyValue ret = (KeyValue) super.clone();
+    ret.bytes = Arrays.copyOf(this.bytes, this.bytes.length);
+    ret.offset = 0;
+    ret.length = ret.bytes.length;
     // Important to clone the memstoreTS as well - otherwise memstore's
     // update-in-place methods (eg increment) will end up creating
     // new entries
@@ -1720,8 +1720,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     @Override
-    protected Object clone() throws CloneNotSupportedException {
-      return new MetaComparator();
+    protected MetaComparator clone() throws CloneNotSupportedException {
+      return (MetaComparator) super.clone();
     }
 
     /**
@@ -2248,9 +2248,8 @@ public class KeyValue implements ExtendedCell, Cloneable {
     }
 
     @Override
-    protected Object clone() throws CloneNotSupportedException {
-      super.clone();
-      return new KVComparator();
+    protected KVComparator clone() throws CloneNotSupportedException {
+      return (KVComparator) super.clone();
     }
 
   }
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
index a7181f9..5571e1b 100644
--- 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
@@ -53,11 +53,11 @@ import org.slf4j.LoggerFactory;
  * {@link ColumnInterpreter} is used to interpret column value. This class is
  * parameterized with the following (these are the types with which the {@link 
ColumnInterpreter}
  * is parameterized, and for more description on these, refer to {@link 
ColumnInterpreter}):
- * @param T Cell value data type
- * @param S Promoted data type
- * @param P PB message that is used to transport initializer specific bytes
- * @param Q PB message that is used to transport Cell (&lt;T&gt;) instance
- * @param R PB message that is used to transport Promoted (&lt;S&gt;) instance
+ * @param <T> Cell value data type
+ * @param <S> Promoted data type
+ * @param <P> PB message that is used to transport initializer specific bytes
+ * @param <Q> PB message that is used to transport Cell (&lt;T&gt;) instance
+ * @param <R> PB message that is used to transport Promoted (&lt;S&gt;) 
instance
  */
 @InterfaceAudience.Private
 public class AggregateImplementation<T, S, P extends Message, Q extends 
Message, R extends Message>
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index ca7c9a3..a3c3f11 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -95,7 +95,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Writes HFiles. Passed Cells must arrive in order.
  * Writes current time as the sequence id for the file. Sets the major 
compacted
- * attribute on created @{link {@link HFile}s. Calling write(null,null) will 
forcibly roll
+ * attribute on created {@link HFile}s. Calling write(null,null) will forcibly 
roll
  * all HFiles being written.
  * <p>
  * Using this class as part of a MapReduce job is best done
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 6961bfd..78565c1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -260,6 +260,7 @@ public class RegionCoprocessorHost
    * @param rsServices interface to available region server functionality
    * @param conf the configuration
    */
+  @SuppressWarnings("ReturnValueIgnored") // Checking method exists as CPU 
optimization
   public RegionCoprocessorHost(final HRegion region,
       final RegionServerServices rsServices, final Configuration conf) {
     super(rsServices);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
index 92bd0db..28fef37 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
@@ -129,7 +129,7 @@ public class TokenProvider implements 
AuthenticationProtos.AuthenticationService
 
       Token<AuthenticationTokenIdentifier> token =
           secretManager.generateToken(currentUser.getName());
-      response.setToken(ClientTokenUtil.toToken(token)).build();
+      response.setToken(ClientTokenUtil.toToken(token));
     } catch (IOException ioe) {
       CoprocessorRpcUtils.setControllerException(controller, ioe);
     }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
index 9f2cc01..b5e1178 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
@@ -61,7 +61,7 @@ public class TestPutDeleteEtcCellIteration {
     for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
       Cell cell = cellScanner.current();
       byte [] bytes = Bytes.toBytes(index++);
-      cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+      assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell);
     }
     assertEquals(COUNT, index);
   }
@@ -74,15 +74,13 @@ public class TestPutDeleteEtcCellIteration {
       p.addColumn(bytes, bytes, TIMESTAMP, bytes);
     }
     int index = 0;
-    int trigger = 3;
     for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
       Cell cell = cellScanner.current();
       byte [] bytes = Bytes.toBytes(index++);
       // When we hit the trigger, try inserting a new KV; should trigger 
exception
-      if (trigger == 3) p.addColumn(bytes, bytes, TIMESTAMP, bytes);
-      cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+      p.addColumn(bytes, bytes, TIMESTAMP, bytes);
+      assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell);
     }
-    assertEquals(COUNT, index);
   }
 
   @Test
@@ -96,7 +94,7 @@ public class TestPutDeleteEtcCellIteration {
     for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance();) {
       Cell cell = cellScanner.current();
       byte [] bytes = Bytes.toBytes(index++);
-      cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, 
KeyValue.Type.DeleteColumn));
+      assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, 
KeyValue.Type.Delete), cell);
     }
     assertEquals(COUNT, index);
   }
@@ -151,7 +149,7 @@ public class TestPutDeleteEtcCellIteration {
     for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) {
       Cell cell = cellScanner.current();
       byte [] bytes = Bytes.toBytes(index++);
-      cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+      assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell);
     }
     assertEquals(COUNT, index);
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
index 73f5ca0..e801b5b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hbase.codec;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
@@ -30,10 +30,6 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.codec.CellCodec;
-import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.codec.KeyValueCodec;
-import org.apache.hadoop.hbase.codec.MessageCodec;
 import org.apache.hadoop.hbase.io.CellOutputStream;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -96,10 +92,7 @@ public class CodecPerformance {
   }
 
   static void verifyCells(final Cell [] input, final Cell [] output) {
-    assertEquals(input.length, output.length);
-    for (int i = 0; i < input.length; i ++) {
-      input[i].equals(output[i]);
-    }
+    assertArrayEquals(input, output);
   }
 
   static void doCodec(final Codec codec, final Cell [] cells, final int 
cycles, final int count,
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java
index 703d619..85e7380 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java
@@ -18,15 +18,18 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import static 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.everyItem;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasProperty;
+import static org.hamcrest.Matchers.not;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.mutable.MutableBoolean;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -228,21 +231,12 @@ public class TestMergesSplitsAddToTracker {
     return new Pair<>(fileInfo, copyName);
   }
 
-  private void validateDaughterRegionsFiles(HRegion region, String 
orignalFileName,
+  private void validateDaughterRegionsFiles(HRegion region, String 
originalFileName,
       String untrackedFile) throws IOException {
     //verify there's no link for the untracked, copied file in first region
     List<StoreFileInfo> infos = 
region.getRegionFileSystem().getStoreFiles("info");
-    final MutableBoolean foundLink = new MutableBoolean(false);
-    infos.stream().forEach(i -> {
-      i.getActiveFileName().contains(orignalFileName);
-      if(i.getActiveFileName().contains(untrackedFile)){
-        fail();
-      }
-      if(i.getActiveFileName().contains(orignalFileName)){
-        foundLink.setTrue();
-      }
-    });
-    assertTrue(foundLink.booleanValue());
+    assertThat(infos, everyItem(hasProperty("activeFileName", 
not(containsString(untrackedFile)))));
+    assertThat(infos, hasItem(hasProperty("activeFileName", 
containsString(originalFileName))));
   }
 
   private void verifyFilesAreTracked(Path regionDir, FileSystem fs) throws 
Exception {
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
index c942977..369b2be 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
@@ -154,7 +154,6 @@ public class ThriftHBaseServiceHandler extends 
HBaseServiceHandler implements Hb
    * id-&gt;scanner hash-map.
    *
    * @param id the ID of the scanner to remove
-   * @return a Scanner, or null if ID was invalid.
    */
   private synchronized void removeScanner(int id) {
     scannerMap.invalidate(id);
diff --git a/pom.xml b/pom.xml
index 896dc99..f3f956e 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1547,7 +1547,7 @@
     -->
     <checkstyle.version>8.28</checkstyle.version>
     <exec.maven.version>1.6.0</exec.maven.version>
-    <error-prone.version>2.4.0</error-prone.version>
+    <error-prone.version>2.10.0</error-prone.version>
     <jamon.plugin.version>2.4.2</jamon.plugin.version>
     <lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
     <maven.antrun.version>1.8</maven.antrun.version>

Reply via email to