This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch 2.1
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/2.1 by this push:
     new 1a40aee541 fixes intermittent failure in ScanConsistencyIT (#4292)
1a40aee541 is described below

commit 1a40aee54116416181576f3f10198033fe3235f3
Author: Keith Turner <ktur...@apache.org>
AuthorDate: Thu Feb 22 16:10:29 2024 -0500

    fixes intermittent failure in ScanConsistencyIT (#4292)
    
    The following failure was observed when running ScanConsistencyIT in the
    elasticity branch.
    
    ```
    java.util.concurrent.ExecutionException: java.util.NoSuchElementException
            at 
java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122)
            at 
java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191)
            at 
org.apache.accumulo.test.ScanConsistencyIT.testConcurrentScanConsistency(ScanConsistencyIT.java:186)
            at java.base/java.lang.reflect.Method.invoke(Method.java:566)
            at 
java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
            at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
            at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
            at java.base/java.lang.Thread.run(Thread.java:829)
    Caused by: java.util.NoSuchElementException
            at 
com.google.common.collect.MoreCollectors$ToOptionalState.getElement(MoreCollectors.java:163)
            at 
com.google.common.collect.MoreCollectors.lambda$static$1(MoreCollectors.java:75)
            at 
java.base/java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:582)
            at 
org.apache.accumulo.test.ScanConsistencyIT$TableOpsTask.call(ScanConsistencyIT.java:685)
            at 
org.apache.accumulo.test.ScanConsistencyIT$TableOpsTask.call(ScanConsistencyIT.java:622)
            ... 4 more
    ```
    
    This was caused by the test attempting to do a filter compaction when
    there was currently no data to delete.  Added a check for this case in
    this commit.
---
 .../apache/accumulo/test/ScanConsistencyIT.java    | 33 ++++++++++++----------
 1 file changed, 18 insertions(+), 15 deletions(-)

diff --git a/test/src/main/java/org/apache/accumulo/test/ScanConsistencyIT.java 
b/test/src/main/java/org/apache/accumulo/test/ScanConsistencyIT.java
index a882547a05..0c03de99b0 100644
--- a/test/src/main/java/org/apache/accumulo/test/ScanConsistencyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ScanConsistencyIT.java
@@ -659,21 +659,24 @@ public class ScanConsistencyIT extends 
AccumuloClusterHarness {
           // 1 in 20 chance of doing a filter compaction. This compaction will 
delete a data set.
           var deletes = tctx.dataTracker.getDeletes();
 
-          // The row has the format <random long>:<generation>, the following 
gets the generations
-          // from the rows. Expect the generation to be the same for a set of 
data to delete.
-          String gen = deletes.stream().map(m -> new String(m.getRow(), UTF_8))
-              .map(row -> 
row.split(":")[1]).distinct().collect(MoreCollectors.onlyElement());
-
-          IteratorSetting iterSetting =
-              new IteratorSetting(100, "genfilter", GenerationFilter.class);
-          iterSetting.addOptions(Map.of("generation", gen));
-
-          // run a compaction that deletes every key with the specified 
generation. Must wait on the
-          // compaction because at the end of the test it will try to verify 
deleted data is not
-          // present. Must flush the table in case data to delete is still in 
memory.
-          tctx.client.tableOperations().compact(tctx.table, new 
CompactionConfig().setFlush(true)
-              .setWait(true).setIterators(List.of(iterSetting)));
-          numFilters++;
+          if (!deletes.isEmpty()) {
+            // The row has the format <random long>:<generation>, the 
following gets the generations
+            // from the rows. Expect the generation to be the same for a set 
of data to delete.
+            String gen = deletes.stream().map(m -> new String(m.getRow(), 
UTF_8))
+                .map(row -> 
row.split(":")[1]).distinct().collect(MoreCollectors.onlyElement());
+
+            IteratorSetting iterSetting =
+                new IteratorSetting(100, "genfilter", GenerationFilter.class);
+            iterSetting.addOptions(Map.of("generation", gen));
+
+            // run a compaction that deletes every key with the specified 
generation. Must wait on
+            // the
+            // compaction because at the end of the test it will try to verify 
deleted data is not
+            // present. Must flush the table in case data to delete is still 
in memory.
+            tctx.client.tableOperations().compact(tctx.table, new 
CompactionConfig().setFlush(true)
+                .setWait(true).setIterators(List.of(iterSetting)));
+            numFilters++;
+          }
         }
       }
 

Reply via email to