This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit dc2800691e3b4e269d8f86d6b9f99861c4d9257d
Merge: 3792630c86 319ec30b7f
Author: Daniel Roberts <ddani...@gmail.com>
AuthorDate: Thu Jan 4 01:53:51 2024 +0000

    Merge branch '2.1'

 .../apache/accumulo/test/functional/MergeIT.java   | 60 ++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --cc test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index 4d265a1120,43590e8030..af02e42f51
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@@ -18,52 -18,46 +18,63 @@@
   */
  package org.apache.accumulo.test.functional;
  
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +import static 
org.apache.accumulo.test.util.FileMetadataUtil.printAndVerifyFileMetadata;
 +import static 
org.apache.accumulo.test.util.FileMetadataUtil.verifyMergedMarkerCleared;
  import static org.junit.jupiter.api.Assertions.assertArrayEquals;
  import static org.junit.jupiter.api.Assertions.assertEquals;
+ import static org.junit.jupiter.api.Assertions.assertFalse;
  import static org.junit.jupiter.api.Assertions.assertTrue;
 +import static org.junit.jupiter.api.Assertions.fail;
  
  import java.time.Duration;
  import java.util.Arrays;
  import java.util.Collection;
  import java.util.HashSet;
  import java.util.List;
 +import java.util.Map;
  import java.util.Map.Entry;
+ import java.util.Set;
  import java.util.SortedSet;
  import java.util.TreeSet;
+ import java.util.UUID;
  
  import org.apache.accumulo.core.client.Accumulo;
  import org.apache.accumulo.core.client.AccumuloClient;
 +import org.apache.accumulo.core.client.AccumuloException;
  import org.apache.accumulo.core.client.BatchWriter;
  import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.admin.CompactionConfig;
  import org.apache.accumulo.core.client.admin.NewTableConfiguration;
  import org.apache.accumulo.core.client.admin.TimeType;
 +import org.apache.accumulo.core.conf.Property;
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Range;
  import org.apache.accumulo.core.data.TableId;
  import org.apache.accumulo.core.data.Value;
+ import org.apache.accumulo.core.dataImpl.KeyExtent;
++import org.apache.accumulo.core.metadata.ReferencedTabletFile;
  import org.apache.accumulo.core.metadata.StoredTabletFile;
 -import org.apache.accumulo.core.metadata.TabletFile;
 +import org.apache.accumulo.core.metadata.schema.DataFileValue;
+ import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
+ import org.apache.accumulo.core.metadata.schema.ExternalCompactionMetadata;
  import org.apache.accumulo.core.security.Authorizations;
+ import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
+ import org.apache.accumulo.core.spi.compaction.CompactionKind;
 +import org.apache.accumulo.core.util.FastFormat;
  import org.apache.accumulo.core.util.Merge;
+ import org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl;
  import org.apache.accumulo.harness.AccumuloClusterHarness;
 +import org.apache.accumulo.test.TestIngest;
 +import org.apache.accumulo.test.TestIngest.IngestParams;
 +import org.apache.accumulo.test.VerifyIngest;
 +import org.apache.accumulo.test.VerifyIngest.VerifyParams;
+ import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.Text;
  import org.junit.jupiter.api.Test;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
  
  public class MergeIT extends AccumuloClusterHarness {
  
@@@ -568,4 -230,52 +579,53 @@@
        }
      }
    }
+ 
+   // Test that merge handles metadata from compactions
+   @Test
+   public void testCompactionMetadata() throws Exception {
+     try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
+       String tableName = getUniqueNames(1)[0];
+       c.tableOperations().create(tableName);
+ 
+       var split = new Text("m");
+       c.tableOperations().addSplits(tableName, new TreeSet<>(List.of(split)));
+ 
+       TableId tableId = getServerContext().getTableId(tableName);
+ 
+       // add metadata from compactions to tablets prior to merge
+       try (var tabletsMutator = 
getServerContext().getAmple().mutateTablets()) {
+         for (var extent : List.of(new KeyExtent(tableId, split, null),
+             new KeyExtent(tableId, null, split))) {
+           var tablet = tabletsMutator.mutateTablet(extent);
+           ExternalCompactionId ecid = 
ExternalCompactionId.generate(UUID.randomUUID());
+ 
 -          TabletFile tmpFile = new TabletFile(new 
Path("file:///accumulo/tables/t-0/b-0/c1.rf"));
++          ReferencedTabletFile tmpFile =
++              ReferencedTabletFile.of(new 
Path("file:///accumulo/tables/t-0/b-0/c1.rf"));
+           CompactionExecutorId ceid = 
CompactionExecutorIdImpl.externalId("G1");
+           Set<StoredTabletFile> jobFiles =
+               Set.of(new 
StoredTabletFile("file:///accumulo/tables/t-0/b-0/b2.rf"));
+           ExternalCompactionMetadata ecMeta = new 
ExternalCompactionMetadata(jobFiles, jobFiles,
+               tmpFile, "localhost:4444", CompactionKind.SYSTEM, (short) 2, 
ceid, false, false, 44L);
+           tablet.putExternalCompaction(ecid, ecMeta);
+           tablet.mutate();
+         }
+       }
+ 
+       // ensure data is in metadata table as expected
+       try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+         for (var tablet : tablets) {
+           assertFalse(tablet.getExternalCompactions().isEmpty());
+         }
+       }
+ 
+       c.tableOperations().merge(tableName, null, null);
+ 
+       // ensure merge operation remove compaction entries
+       try (var tablets = 
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+         for (var tablet : tablets) {
+           assertTrue(tablet.getExternalCompactions().isEmpty());
+         }
+       }
+     }
+   }
  }

Reply via email to