This is an automated email from the ASF dual-hosted git repository.
hemant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f33f2fbb9e HDDS-10295. Provide an "ozone repair" subcommand to update
the snapshot info in transactionInfoTable (#6533)
f33f2fbb9e is described below
commit f33f2fbb9e319d9c2c6116e4b4f0a0e1dbe32b97
Author: DaveTeng0 <[email protected]>
AuthorDate: Tue Jun 11 19:26:50 2024 -0700
HDDS-10295. Provide an "ozone repair" subcommand to update the snapshot
info in transactionInfoTable (#6533)
---
.../apache/hadoop/hdds/utils/TransactionInfo.java | 2 +-
.../hadoop/ozone/shell/TestOzoneRepairShell.java | 133 +++++++++++++++++++
.../apache/hadoop/ozone/debug/RocksDBUtils.java | 31 +++++
.../hadoop/ozone/repair/TransactionInfoRepair.java | 135 +++++++++++++++++++
.../hadoop/ozone/repair/om/SnapshotRepair.java | 34 ++---
.../ozone/repair/TestTransactionInfoRepair.java | 146 +++++++++++++++++++++
.../org.mockito.plugins.MockMaker | 16 +++
7 files changed, 472 insertions(+), 25 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
index 75329c0c17..e7c4ec4ce3 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
@@ -50,7 +50,7 @@ public final class TransactionInfo implements
Comparable<TransactionInfo> {
return CODEC;
}
- private static TransactionInfo valueOf(String transactionInfo) {
+ public static TransactionInfo valueOf(String transactionInfo) {
final String[] tInfo = transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY);
Preconditions.checkArgument(tInfo.length == 2,
"Unexpected split length: %s in \"%s\"", tInfo.length,
transactionInfo);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java
new file mode 100644
index 0000000000..328fc1ddd8
--- /dev/null
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.shell;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.debug.DBScanner;
+import org.apache.hadoop.ozone.debug.RDBParser;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.repair.RDBRepair;
+import org.apache.hadoop.ozone.repair.TransactionInfoRepair;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import picocli.CommandLine;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Test Ozone Repair shell.
+ */
+public class TestOzoneRepairShell {
+
+ private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+ private static final PrintStream OLD_OUT = System.out;
+ private static final PrintStream OLD_ERR = System.err;
+ private static final String DEFAULT_ENCODING = UTF_8.name();
+ private static MiniOzoneCluster cluster = null;
+ private static OzoneConfiguration conf = null;
+
+ private static final String TRANSACTION_INFO_TABLE_TERM_INDEX_PATTERN =
"([0-9]+#[0-9]+)";
+
+ @BeforeAll
+ public static void init() throws Exception {
+ conf = new OzoneConfiguration();
+ cluster = MiniOzoneCluster.newBuilder(conf).build();
+ cluster.waitForClusterToBeReady();
+ }
+
+ @BeforeEach
+ public void setup() throws Exception {
+ System.setOut(new PrintStream(out, false, DEFAULT_ENCODING));
+ System.setErr(new PrintStream(err, false, DEFAULT_ENCODING));
+ }
+
+ @AfterEach
+ public void reset() {
+ // reset stream after each unit test
+ out.reset();
+ err.reset();
+
+ // restore system streams
+ System.setOut(OLD_OUT);
+ System.setErr(OLD_ERR);
+ }
+
+ @Test
+ public void testUpdateTransactionInfoTable() throws Exception {
+ CommandLine cmd = new CommandLine(new RDBRepair()).addSubcommand(new
TransactionInfoRepair());
+ String dbPath = new File(OMStorage.getOmDbDir(conf) + "/" +
OM_DB_NAME).getPath();
+
+ cluster.getOzoneManager().stop();
+
+ String cmdOut = scanTransactionInfoTable(dbPath);
+ String[] originalHighestTermIndex = parseScanOutput(cmdOut);
+
+ String testTerm = "1111";
+ String testIndex = "1111";
+ String[] args =
+ new String[] {"--db=" + dbPath, "update-transaction", "--term",
testTerm, "--index", testIndex};
+ int exitCode = cmd.execute(args);
+ assertEquals(0, exitCode);
+ assertThat(out.toString(DEFAULT_ENCODING)).contains(
+ "The original highest transaction Info was " +
+ String.format("(t:%s, i:%s)", originalHighestTermIndex[0],
originalHighestTermIndex[1]));
+ assertThat(out.toString(DEFAULT_ENCODING)).contains(
+ String.format("The highest transaction info has been updated to:
(t:%s, i:%s)",
+ testTerm, testIndex));
+
+ String cmdOut2 = scanTransactionInfoTable(dbPath);
+ assertThat(cmdOut2).contains(testTerm + "#" + testIndex);
+
+ cmd.execute("--db=" + dbPath, "update-transaction", "--term",
+ originalHighestTermIndex[0], "--index", originalHighestTermIndex[1]);
+ cluster.getOzoneManager().restart();
+ cluster.newClient().getObjectStore().createVolume("vol1");
+ }
+
+ private String scanTransactionInfoTable(String dbPath) throws Exception {
+ CommandLine cmdDBScanner = new CommandLine(new
RDBParser()).addSubcommand(new DBScanner());
+ String[] argsDBScanner =
+ new String[] {"--db=" + dbPath, "scan", "--column_family",
"transactionInfoTable"};
+ cmdDBScanner.execute(argsDBScanner);
+ return out.toString(DEFAULT_ENCODING);
+ }
+
+ private String[] parseScanOutput(String output) throws IOException {
+ Pattern pattern =
Pattern.compile(TRANSACTION_INFO_TABLE_TERM_INDEX_PATTERN);
+ Matcher matcher = pattern.matcher(output);
+ if (matcher.find()) {
+ return matcher.group(1).split("#");
+ }
+ throw new IllegalStateException("Failed to scan and find raft's highest
term and index from TransactionInfo table");
+ }
+
+}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
index cb780da450..5f1603b414 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java
@@ -18,11 +18,18 @@
package org.apache.hadoop.ozone.debug;
+import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.RocksDatabase;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
/**
@@ -45,4 +52,28 @@ public final class RocksDBUtils {
}
return cfs;
}
+
+ public static ColumnFamilyHandle getColumnFamilyHandle(String
columnFamilyName, List<ColumnFamilyHandle> cfHandleList)
+ throws RocksDBException {
+ byte[] nameBytes = columnFamilyName.getBytes(StandardCharsets.UTF_8);
+
+ for (ColumnFamilyHandle cf : cfHandleList) {
+ if (Arrays.equals(cf.getName(), nameBytes)) {
+ return cf;
+ }
+ }
+
+ return null;
+ }
+
+ public static <T> T getValue(ManagedRocksDB db,
+ ColumnFamilyHandle columnFamilyHandle, String
key,
+ Codec<T> codec)
+ throws IOException, RocksDBException {
+ byte[] bytes = db.get().get(columnFamilyHandle,
+ StringCodec.get().toPersistedFormat(key));
+ return bytes != null ? codec.fromPersistedFormat(bytes) : null;
+ }
+
+
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
new file mode 100644
index 0000000000..f2a6331737
--- /dev/null
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
+ * information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache
+ * License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a
+ * copy of the License at
+ *
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.repair;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
+import org.apache.hadoop.ozone.debug.RocksDBUtils;
+import org.kohsuke.MetaInfServices;
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+import picocli.CommandLine;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
+import static
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE;
+
+
+/**
+ * Tool to update the highest term-index in transactionInfoTable.
+ */
[email protected](
+ name = "update-transaction",
+ description = "CLI to update the highest index in transactionInfoTable.
Currently it is only supported for OM.",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class
+)
+@MetaInfServices(SubcommandWithParent.class)
+public class TransactionInfoRepair
+ implements Callable<Void>, SubcommandWithParent {
+
+ @CommandLine.Spec
+ private static CommandLine.Model.CommandSpec spec;
+
+ @CommandLine.ParentCommand
+ private RDBRepair parent;
+
+ @CommandLine.Option(names = {"--term"},
+ required = true,
+ description = "Highest term of transactionInfoTable. The input should be
non-zero long integer.")
+ private long highestTransactionTerm;
+
+ @CommandLine.Option(names = {"--index"},
+ required = true,
+ description = "Highest index of transactionInfoTable. The input should
be non-zero long integer.")
+ private long highestTransactionIndex;
+
+
+ protected void setHighestTransactionTerm(
+ long highestTransactionTerm) {
+ this.highestTransactionTerm = highestTransactionTerm;
+ }
+
+ protected void setHighestTransactionIndex(
+ long highestTransactionIndex) {
+ this.highestTransactionIndex = highestTransactionIndex;
+ }
+
+
+ @Override
+ public Void call() throws Exception {
+ List<ColumnFamilyHandle> cfHandleList = new ArrayList<>();
+ String dbPath = getParent().getDbPath();
+ List<ColumnFamilyDescriptor> cfDescList =
RocksDBUtils.getColumnFamilyDescriptors(
+ dbPath);
+
+ try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList,
cfHandleList)) {
+ ColumnFamilyHandle transactionInfoCfh =
RocksDBUtils.getColumnFamilyHandle(TRANSACTION_INFO_TABLE, cfHandleList);
+ if (transactionInfoCfh == null) {
+ throw new IllegalArgumentException(TRANSACTION_INFO_TABLE +
+ " is not in a column family in DB for the given path.");
+ }
+ TransactionInfo originalTransactionInfo =
+ RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY,
TransactionInfo.getCodec());
+
+ System.out.println("The original highest transaction Info was " +
originalTransactionInfo.getTermIndex());
+
+ TransactionInfo transactionInfo =
TransactionInfo.valueOf(highestTransactionTerm, highestTransactionIndex);
+
+ byte[] transactionInfoBytes =
TransactionInfo.getCodec().toPersistedFormat(transactionInfo);
+ db.get()
+ .put(transactionInfoCfh,
StringCodec.get().toPersistedFormat(TRANSACTION_INFO_KEY),
transactionInfoBytes);
+
+ System.out.println("The highest transaction info has been updated to: " +
+ RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY,
+ TransactionInfo.getCodec()).getTermIndex());
+ } catch (RocksDBException exception) {
+ System.err.println("Failed to update the RocksDB for the given path: " +
dbPath);
+ System.err.println(
+ "Make sure that Ozone entity (OM) is not running for the give
database path and current host.");
+ throw new IOException("Failed to update RocksDB.", exception);
+ } finally {
+ IOUtils.closeQuietly(cfHandleList);
+ }
+
+ return null;
+ }
+
+ protected RDBRepair getParent() {
+ return parent;
+ }
+
+ @Override
+ public Class<?> getParentType() {
+ return RDBRepair.class;
+ }
+
+}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java
index ec5e2f8f93..d07fc13be8 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java
@@ -31,13 +31,13 @@ import org.kohsuke.MetaInfServices;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import picocli.CommandLine.Model.CommandSpec;
import java.io.IOException;
-import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
@@ -58,6 +58,8 @@ import static
org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE;
@MetaInfServices(SubcommandWithParent.class)
public class SnapshotRepair implements Callable<Void>, SubcommandWithParent {
+ protected static final Logger LOG =
LoggerFactory.getLogger(SnapshotRepair.class);
+
@CommandLine.Spec
private static CommandSpec spec;
@@ -91,7 +93,7 @@ public class SnapshotRepair implements Callable<Void>,
SubcommandWithParent {
List<ColumnFamilyDescriptor> cfDescList =
RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath());
try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(),
cfDescList, cfHandleList)) {
- ColumnFamilyHandle snapshotInfoCfh = getSnapshotInfoCfh(cfHandleList);
+ ColumnFamilyHandle snapshotInfoCfh =
RocksDBUtils.getColumnFamilyHandle(SNAPSHOT_INFO_TABLE, cfHandleList);
if (snapshotInfoCfh == null) {
System.err.println(SNAPSHOT_INFO_TABLE + " is not in a column family
in DB for the given path.");
return null;
@@ -100,7 +102,9 @@ public class SnapshotRepair implements Callable<Void>,
SubcommandWithParent {
String snapshotInfoTableKey =
SnapshotInfo.getTableKey(bucketUri.getValue().getVolumeName(),
bucketUri.getValue().getBucketName(), snapshotName);
- SnapshotInfo snapshotInfo = getSnapshotInfo(db, snapshotInfoCfh,
snapshotInfoTableKey);
+ SnapshotInfo snapshotInfo = RocksDBUtils.getValue(db, snapshotInfoCfh,
snapshotInfoTableKey,
+ SnapshotInfo.getCodec());
+
if (snapshotInfo == null) {
System.err.println(snapshotName + " does not exist for given
bucketUri: " + OM_KEY_PREFIX +
bucketUri.getValue().getVolumeName() + OM_KEY_PREFIX +
bucketUri.getValue().getBucketName());
@@ -146,13 +150,13 @@ public class SnapshotRepair implements Callable<Void>,
SubcommandWithParent {
.put(snapshotInfoCfh,
StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes);
System.out.println("Snapshot Info is updated to : " +
- getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey));
+ RocksDBUtils.getValue(db, snapshotInfoCfh, snapshotInfoTableKey,
SnapshotInfo.getCodec()));
}
} catch (RocksDBException exception) {
System.err.println("Failed to update the RocksDB for the given path: " +
parent.getDbPath());
System.err.println(
"Make sure that Ozone entity (OM, SCM or DN) is not running for the
give dbPath and current host.");
- System.err.println(exception);
+ LOG.error(exception.toString());
} finally {
IOUtils.closeQuietly(cfHandleList);
}
@@ -175,24 +179,6 @@ public class SnapshotRepair implements Callable<Void>,
SubcommandWithParent {
return snapshotIdSet;
}
- private ColumnFamilyHandle getSnapshotInfoCfh(List<ColumnFamilyHandle>
cfHandleList) throws RocksDBException {
- byte[] nameBytes = SNAPSHOT_INFO_TABLE.getBytes(StandardCharsets.UTF_8);
-
- for (ColumnFamilyHandle cf : cfHandleList) {
- if (Arrays.equals(cf.getName(), nameBytes)) {
- return cf;
- }
- }
-
- return null;
- }
-
- private SnapshotInfo getSnapshotInfo(ManagedRocksDB db, ColumnFamilyHandle
snapshotInfoCfh, String snapshotInfoLKey)
- throws IOException, RocksDBException {
- byte[] bytes = db.get().get(snapshotInfoCfh,
StringCodec.get().toPersistedFormat(snapshotInfoLKey));
- return bytes != null ? SnapshotInfo.getCodec().fromPersistedFormat(bytes)
: null;
- }
-
@Override
public Class<?> getParentType() {
return RDBRepair.class;
diff --git
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
new file mode 100644
index 0000000000..a581e1d29d
--- /dev/null
+++
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.repair;
+
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
+import org.apache.hadoop.ozone.debug.RocksDBUtils;
+import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ratis.server.protocol.TermIndex;
+import org.junit.jupiter.api.Test;
+import org.mockito.MockedStatic;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.function.Supplier;
+
+import static
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests TransactionInfoRepair.
+ */
+public class TestTransactionInfoRepair {
+
+
+ private static final String DB_PATH = "testDBPath";
+ private static final long TEST_TERM = 1;
+ private static final long TEST_INDEX = 1;
+
+ @Test
+ public void testUpdateTransactionInfoTableSuccessful() throws Exception {
+ ManagedRocksDB mdb = mockRockDB();
+ try (GenericTestUtils.SystemOutCapturer outCapturer = new
GenericTestUtils.SystemOutCapturer()) {
+ testCommand(mdb, mock(ColumnFamilyHandle.class), () -> {
+ try {
+ return outCapturer.getOutput();
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ }, new String[]{String.format("The original highest transaction Info was
(t:%s, i:%s)",
+ TEST_TERM, TEST_INDEX),
+ String.format("The highest transaction info has been updated to:
(t:%s, i:%s)",
+ TEST_TERM, TEST_INDEX)});
+ }
+ }
+
+ @Test
+ public void testCommandWhenTableNotInDBForGivenPath() throws Exception {
+ ManagedRocksDB mdb = mockRockDB();
+ IllegalArgumentException exception =
assertThrows(IllegalArgumentException.class,
+ () -> testCommand(mdb, null, null, new String[]{""}));
+ assertThat(exception.getMessage()).contains(TRANSACTION_INFO_TABLE +
+ " is not in a column family in DB for the given path");
+ }
+
+ @Test
+ public void testCommandWhenFailToUpdateRocksDBForGivenPath() throws
Exception {
+ ManagedRocksDB mdb = mockRockDB();
+ RocksDB rdb = mdb.get();
+
+ doThrow(RocksDBException.class).when(rdb)
+ .put(any(ColumnFamilyHandle.class), any(byte[].class),
any(byte[].class));
+
+ IOException exception = assertThrows(IOException.class,
+ () -> testCommand(mdb, mock(ColumnFamilyHandle.class), null, new
String[]{""}));
+ assertThat(exception.getMessage()).contains("Failed to update RocksDB.");
+ assertThat(exception.getCause()).isInstanceOf(RocksDBException.class);
+ }
+
+
+ private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle
columnFamilyHandle, Supplier<String> capturer,
+ String[] messages) throws Exception {
+ try (MockedStatic<ManagedRocksDB> mocked =
mockStatic(ManagedRocksDB.class);
+ MockedStatic<RocksDBUtils> mockUtil = mockStatic(RocksDBUtils.class))
{
+ mocked.when(() -> ManagedRocksDB.open(anyString(), anyList(),
anyList())).thenReturn(mdb);
+ mockUtil.when(() -> RocksDBUtils.getColumnFamilyHandle(anyString(),
anyList()))
+ .thenReturn(columnFamilyHandle);
+ mockUtil.when(() ->
+ RocksDBUtils.getValue(any(ManagedRocksDB.class),
any(ColumnFamilyHandle.class), anyString(),
+ any(Codec.class))).thenReturn(mock(TransactionInfo.class));
+
+ mockTransactionInfo(mockUtil);
+
+ TransactionInfoRepair cmd = spy(TransactionInfoRepair.class);
+ RDBRepair rdbRepair = mock(RDBRepair.class);
+ when(rdbRepair.getDbPath()).thenReturn(DB_PATH);
+ when(cmd.getParent()).thenReturn(rdbRepair);
+ cmd.setHighestTransactionTerm(TEST_TERM);
+ cmd.setHighestTransactionIndex(TEST_INDEX);
+
+ cmd.call();
+ for (String message : messages) {
+ assertThat(capturer.get()).contains(message);
+ }
+ }
+ }
+
+ private void mockTransactionInfo(MockedStatic<RocksDBUtils> mockUtil) {
+ mockUtil.when(() ->
+ RocksDBUtils.getValue(any(ManagedRocksDB.class),
any(ColumnFamilyHandle.class), anyString(),
+ any(Codec.class))).thenReturn(mock(TransactionInfo.class));
+
+ TransactionInfo transactionInfo2 = mock(TransactionInfo.class);
+ doReturn(TermIndex.valueOf(TEST_TERM,
TEST_INDEX)).when(transactionInfo2).getTermIndex();
+ mockUtil.when(() ->
+ RocksDBUtils.getValue(any(ManagedRocksDB.class),
any(ColumnFamilyHandle.class), anyString(),
+ any(Codec.class))).thenReturn(transactionInfo2);
+ }
+
+ private ManagedRocksDB mockRockDB() {
+ ManagedRocksDB db = mock(ManagedRocksDB.class);
+ RocksDB rocksDB = mock(RocksDB.class);
+ doReturn(rocksDB).when(db).get();
+ return db;
+ }
+
+}
diff --git
a/hadoop-ozone/tools/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
b/hadoop-ozone/tools/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
new file mode 100644
index 0000000000..3c9e1c8a69
--- /dev/null
+++
b/hadoop-ozone/tools/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+mock-maker-inline
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]