This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new cd0c55e4ff HDDS-9828. Do not use Files.createTempFile in tests (#5824)
cd0c55e4ff is described below
commit cd0c55e4ffa879b3e7cffe182237f0e1c824a7c0
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Wed Dec 20 08:47:59 2023 +0100
HDDS-9828. Do not use Files.createTempFile in tests (#5824)
---
.../container/keyvalue/helpers/TestChunkUtils.java | 188 ++++++++++-----------
.../hdds/security/symmetric/TestLocalKeyStore.java | 18 +-
.../hadoop/hdds/utils/TestNativeLibraryLoader.java | 12 +-
.../db/managed/TestManagedSSTDumpIterator.java | 11 +-
.../rocksdb/util/TestManagedSstFileReader.java | 15 +-
.../scm/ha/TestInterSCMGrpcProtocolService.java | 8 +-
.../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 29 ++--
.../hdds/scm/TestSCMDbCheckpointServlet.java | 120 ++++++-------
.../hadoop/ozone/om/TestOMDbCheckpointServlet.java | 35 ++--
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 8 +-
.../om/ratis/TestOzoneManagerRatisRequest.java | 5 +-
.../ozone/om/service/TestRangerBGSyncService.java | 3 +-
.../ozone/om/snapshot/TestSnapshotDiffManager.java | 11 +-
13 files changed, 234 insertions(+), 229 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
index 037de863c0..bda8b7d5a9 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
@@ -28,7 +28,6 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
-import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutorService;
@@ -55,22 +54,25 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests for {@link ChunkUtils}.
*/
-public class TestChunkUtils {
+class TestChunkUtils {
private static final Logger LOG =
LoggerFactory.getLogger(TestChunkUtils.class);
- private static final String PREFIX = TestChunkUtils.class.getSimpleName();
private static final int BUFFER_CAPACITY = 1 << 20;
private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10;
private static final Random RANDOM = new Random();
+ @TempDir
+ private Path tempDir;
+
static ChunkBuffer readData(File file, long off, long len)
throws StorageContainerException {
LOG.info("off={}, len={}", off, len);
@@ -79,130 +81,112 @@ public class TestChunkUtils {
}
@Test
- public void concurrentReadOfSameFile() throws Exception {
+ void concurrentReadOfSameFile() throws Exception {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = Files.createTempFile(PREFIX, "concurrent");
- try {
- int len = data.limit();
- int offset = 0;
- File file = tempFile.toFile();
- ChunkUtils.writeData(file, data, offset, len, null, true);
- int threads = 10;
- ExecutorService executor = new ThreadPoolExecutor(threads, threads,
- 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
- AtomicInteger processed = new AtomicInteger();
- AtomicBoolean failed = new AtomicBoolean();
- for (int i = 0; i < threads; i++) {
- final int threadNumber = i;
- executor.execute(() -> {
- try {
- final ChunkBuffer chunk = readData(file, offset, len);
- // There should be only one element in readBuffers
- final List<ByteBuffer> buffers = chunk.asByteBufferList();
- Assertions.assertEquals(1, buffers.size());
- final ByteBuffer readBuffer = buffers.get(0);
-
- LOG.info("Read data ({}): {}", threadNumber,
- new String(readBuffer.array(), UTF_8));
- if (!Arrays.equals(array, readBuffer.array())) {
- failed.set(true);
- }
- assertEquals(len, readBuffer.remaining());
- } catch (Exception e) {
- LOG.error("Failed to read data ({})", threadNumber, e);
+ Path tempFile = tempDir.resolve("concurrent");
+ int len = data.limit();
+ int offset = 0;
+ File file = tempFile.toFile();
+ ChunkUtils.writeData(file, data, offset, len, null, true);
+ int threads = 10;
+ ExecutorService executor = new ThreadPoolExecutor(threads, threads,
+ 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
+ AtomicInteger processed = new AtomicInteger();
+ AtomicBoolean failed = new AtomicBoolean();
+ for (int i = 0; i < threads; i++) {
+ final int threadNumber = i;
+ executor.execute(() -> {
+ try {
+ final ChunkBuffer chunk = readData(file, offset, len);
+ // There should be only one element in readBuffers
+ final List<ByteBuffer> buffers = chunk.asByteBufferList();
+ Assertions.assertEquals(1, buffers.size());
+ final ByteBuffer readBuffer = buffers.get(0);
+
+ LOG.info("Read data ({}): {}", threadNumber,
+ new String(readBuffer.array(), UTF_8));
+ if (!Arrays.equals(array, readBuffer.array())) {
failed.set(true);
}
- processed.incrementAndGet();
- });
- }
- try {
- GenericTestUtils.waitFor(() -> processed.get() == threads,
- 100, (int) TimeUnit.SECONDS.toMillis(5));
- } finally {
- executor.shutdownNow();
- }
- assertFalse(failed.get());
+ assertEquals(len, readBuffer.remaining());
+ } catch (Exception e) {
+ LOG.error("Failed to read data ({})", threadNumber, e);
+ failed.set(true);
+ }
+ processed.incrementAndGet();
+ });
+ }
+ try {
+ GenericTestUtils.waitFor(() -> processed.get() == threads,
+ 100, (int) TimeUnit.SECONDS.toMillis(5));
} finally {
- Files.deleteIfExists(tempFile);
+ executor.shutdownNow();
}
+ assertFalse(failed.get());
}
@Test
- public void concurrentProcessing() throws Exception {
+ void concurrentProcessing() throws Exception {
final int perThreadWait = 1000;
final int maxTotalWait = 5000;
int threads = 20;
- List<Path> paths = new LinkedList<>();
+ ExecutorService executor = new ThreadPoolExecutor(threads, threads,
+ 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
+ AtomicInteger processed = new AtomicInteger();
+ for (int i = 0; i < threads; i++) {
+ Path path = tempDir.resolve(String.valueOf(i));
+ executor.execute(() -> {
+ try {
+ ChunkUtils.processFileExclusively(path, () -> {
+ try {
+ Thread.sleep(perThreadWait);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ processed.incrementAndGet();
+ return null;
+ });
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ });
+ }
try {
- ExecutorService executor = new ThreadPoolExecutor(threads, threads,
- 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
- AtomicInteger processed = new AtomicInteger();
- for (int i = 0; i < threads; i++) {
- Path path = Files.createTempFile(PREFIX, String.valueOf(i));
- paths.add(path);
- executor.execute(() -> {
- try {
- ChunkUtils.processFileExclusively(path, () -> {
- try {
- Thread.sleep(perThreadWait);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- processed.incrementAndGet();
- return null;
- });
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- });
- }
- try {
- GenericTestUtils.waitFor(() -> processed.get() == threads,
- 100, maxTotalWait);
- } finally {
- executor.shutdownNow();
- }
+ GenericTestUtils.waitFor(() -> processed.get() == threads,
+ 100, maxTotalWait);
} finally {
- for (Path path : paths) {
- FileUtils.deleteQuietly(path.toFile());
- }
+ executor.shutdownNow();
}
}
@Test
- public void serialRead() throws Exception {
+ void serialRead() throws IOException {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = Files.createTempFile(PREFIX, "serial");
- try {
- File file = tempFile.toFile();
- int len = data.limit();
- int offset = 0;
- ChunkUtils.writeData(file, data, offset, len, null, true);
-
- final ChunkBuffer chunk = readData(file, offset, len);
- // There should be only one element in readBuffers
- final List<ByteBuffer> buffers = chunk.asByteBufferList();
- Assertions.assertEquals(1, buffers.size());
- final ByteBuffer readBuffer = buffers.get(0);
-
- assertArrayEquals(array, readBuffer.array());
- assertEquals(len, readBuffer.remaining());
- } catch (Exception e) {
- LOG.error("Failed to read data", e);
- } finally {
- Files.deleteIfExists(tempFile);
- }
+ Path tempFile = tempDir.resolve("serial");
+ File file = tempFile.toFile();
+ int len = data.limit();
+ int offset = 0;
+ ChunkUtils.writeData(file, data, offset, len, null, true);
+
+ final ChunkBuffer chunk = readData(file, offset, len);
+ // There should be only one element in readBuffers
+ final List<ByteBuffer> buffers = chunk.asByteBufferList();
+ Assertions.assertEquals(1, buffers.size());
+ final ByteBuffer readBuffer = buffers.get(0);
+
+ assertArrayEquals(array, readBuffer.array());
+ assertEquals(len, readBuffer.remaining());
}
@Test
- public void validateChunkForOverwrite() throws IOException {
+ void validateChunkForOverwrite() throws IOException {
- Path tempFile = Files.createTempFile(PREFIX, "overwrite");
+ Path tempFile = tempDir.resolve("overwrite");
FileUtils.write(tempFile.toFile(), "test", UTF_8);
Assertions.assertTrue(
@@ -226,7 +210,7 @@ public class TestChunkUtils {
}
@Test
- public void readMissingFile() {
+ void readMissingFile() {
// given
int len = 123;
int offset = 0;
@@ -242,7 +226,7 @@ public class TestChunkUtils {
}
@Test
- public void testReadData() throws Exception {
+ void testReadData() throws Exception {
final File dir = GenericTestUtils.getTestDir("testReadData");
try {
Assertions.assertTrue(dir.mkdirs());
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
index b5c717399d..393a0c5f01 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestLocalKeyStore.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.security.symmetric;
import com.google.common.collect.ImmutableList;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -53,17 +54,20 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test cases for {@link LocalSecretKeyStore}.
*/
-public class TestLocalKeyStore {
+class TestLocalKeyStore {
private SecretKeyStore secretKeyStore;
private Path testSecretFile;
+ @TempDir
+ private Path tempDir;
+
@BeforeEach
- private void setup() throws Exception {
- testSecretFile = Files.createTempFile("key-strore-test", ".json");
+ void setup() throws IOException {
+ testSecretFile = Files.createFile(tempDir.resolve("key-store-test.json"));
secretKeyStore = new LocalSecretKeyStore(testSecretFile);
}
- public static Stream<Arguments> saveAndLoadTestCases() throws Exception {
+ static Stream<Arguments> saveAndLoadTestCases() throws Exception {
return Stream.of(
// empty
Arguments.of(ImmutableList.of()),
@@ -81,7 +85,7 @@ public class TestLocalKeyStore {
@ParameterizedTest
@MethodSource("saveAndLoadTestCases")
- public void testSaveAndLoad(List<ManagedSecretKey> keys) throws IOException {
+ void testSaveAndLoad(List<ManagedSecretKey> keys) throws IOException {
secretKeyStore.save(keys);
// Ensure the intended file exists and is readable and writeable to
@@ -100,7 +104,7 @@ public class TestLocalKeyStore {
* Verifies that secret keys are overwritten by subsequent writes.
*/
@Test
- public void testOverwrite() throws Exception {
+ void testOverwrite() throws Exception {
List<ManagedSecretKey> initialKeys =
newArrayList(generateKey("HmacSHA256"));
secretKeyStore.save(initialKeys);
@@ -123,7 +127,7 @@ public class TestLocalKeyStore {
* test fails, instead, analyse the backward-compatibility of the change.
*/
@Test
- public void testLoadExistingFile() throws Exception {
+ void testLoadExistingFile() throws Exception {
// copy test file content to the backing file.
String testJson = "[\n" +
" {\n" +
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
index 472954f2bd..24218c5687 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.utils;
import org.apache.ozone.test.tag.Native;
import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.MockedStatic;
@@ -27,7 +28,7 @@ import org.mockito.Mockito;
import java.io.ByteArrayInputStream;
import java.io.File;
-import java.io.IOException;
+import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
@@ -42,10 +43,11 @@ import static org.mockito.ArgumentMatchers.same;
*/
public class TestNativeLibraryLoader {
- private static Stream<String> nativeLibraryDirectoryLocations()
- throws IOException {
- return Stream.of("", File.createTempFile("prefix", "suffix")
- .getParentFile().getAbsolutePath(), null);
+ @TempDir
+ private static Path tempDir;
+
+ private static Stream<String> nativeLibraryDirectoryLocations() {
+ return Stream.of("", tempDir.toAbsolutePath().toString(), null);
}
@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
index 99d2a6ced5..505d68d941 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
@@ -28,6 +28,7 @@ import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Named;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -40,6 +41,8 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -61,10 +64,12 @@ import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LI
*/
class TestManagedSSTDumpIterator {
+ @TempDir
+ private Path tempDir;
+
private File createSSTFileWithKeys(
TreeMap<Pair<String, Integer>, String> keys) throws Exception {
- File file = File.createTempFile("tmp_sst_file", ".sst");
- file.deleteOnExit();
+ File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile();
try (ManagedEnvOptions envOptions = new ManagedEnvOptions();
ManagedOptions managedOptions = new ManagedOptions();
ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(
@@ -252,7 +257,7 @@ class TestManagedSSTDumpIterator {
ByteArrayInputStream byteArrayInputStream =
new ByteArrayInputStream(inputBytes);
ManagedSSTDumpTool tool = Mockito.mock(ManagedSSTDumpTool.class);
- File file = File.createTempFile("tmp", ".sst");
+ File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile();
Future future = Mockito.mock(Future.class);
Mockito.when(future.isDone()).thenReturn(false);
Mockito.when(future.get()).thenReturn(0);
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
index 8c897b01d2..588e54ad8b 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java
@@ -31,6 +31,7 @@ import org.apache.ozone.test.tag.Native;
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.rocksdb.RocksDBException;
@@ -47,6 +48,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
@@ -58,6 +60,11 @@ import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LI
*/
class TestManagedSstFileReader {
+ @TempDir
+ private File tempDir;
+
+ private final AtomicInteger fileCounter = new AtomicInteger();
+
// Key prefix containing all characters, to check if all characters can be
// written & read from rocksdb through SSTDumptool
private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
@@ -65,9 +72,8 @@ class TestManagedSstFileReader {
.collect(Collectors.joining(""));
private String createRandomSSTFile(TreeMap<String, Integer> keys)
- throws IOException, RocksDBException {
- File file = File.createTempFile("tmp_sst_file", ".sst");
- file.deleteOnExit();
+ throws RocksDBException {
+ File file = new File(tempDir, "tmp_sst_file" +
fileCounter.incrementAndGet() + ".sst");
try (ManagedOptions managedOptions = new ManagedOptions();
ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions();
@@ -84,6 +90,7 @@ class TestManagedSstFileReader {
}
sstFileWriter.finish();
}
+ Assertions.assertTrue(file.exists());
return file.getAbsolutePath();
}
@@ -142,7 +149,7 @@ class TestManagedSstFileReader {
new ManagedSstFileReader(files).getKeyStream(
lowerBound.orElse(null), upperBound.orElse(null))) {
keyStream.forEach(key -> {
- Assertions.assertEquals(keysInBoundary.get(key), 1);
+ Assertions.assertEquals(1, keysInBoundary.get(key));
Assertions.assertNotNull(keysInBoundary.remove(key));
});
keysInBoundary.values()
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
index f966f1b65b..95b6abc04a 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
@@ -72,7 +72,7 @@ import static org.mockito.Mockito.when;
*
* @see <a href="https://issues.apache.org/jira/browse/HDDS-8901">HDDS-8901</a>
*/
-public class TestInterSCMGrpcProtocolService {
+class TestInterSCMGrpcProtocolService {
private static final String CP_FILE_NAME = "cpFile";
private static final String CP_CONTENTS = "Hello world!";
@@ -89,7 +89,7 @@ public class TestInterSCMGrpcProtocolService {
private Path temp;
@Test
- public void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception {
+ void testMTLSOnInterScmGrpcProtocolServiceAccess() throws Exception {
int port = new Random().nextInt(1000) + 45000;
OzoneConfiguration conf = setupConfiguration(port);
SCMCertificateClient
@@ -100,7 +100,7 @@ public class TestInterSCMGrpcProtocolService {
InterSCMGrpcClient client =
new InterSCMGrpcClient("localhost", port, conf, scmCertClient);
- Path tempFile = Files.createTempFile(temp, CP_FILE_NAME, "");
+ Path tempFile = temp.resolve(CP_FILE_NAME);
CompletableFuture<Path> res = client.download(tempFile);
Path downloaded = res.get();
@@ -182,7 +182,7 @@ public class TestInterSCMGrpcProtocolService {
}
private DBCheckpoint checkPoint() throws IOException {
- Path checkPointLocation = Files.createTempDirectory(temp, "cpDir");
+ Path checkPointLocation = Files.createDirectory(temp.resolve("cpDir"));
Path cpFile = Paths.get(checkPointLocation.toString(), CP_FILE_NAME);
Files.write(cpFile, CP_CONTENTS.getBytes(UTF_8));
DBCheckpoint checkpoint = mock(DBCheckpoint.class);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
index 8b1b2adfdf..90b5daabad 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
@@ -43,6 +43,7 @@ import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -60,14 +61,14 @@ import static
org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
* Setting a timeout for every test method to 300 seconds.
*/
@Timeout(value = 300)
-public class TestOzoneFsSnapshot {
+class TestOzoneFsSnapshot {
private static MiniOzoneCluster cluster;
private static final String OM_SERVICE_ID = "om-service-test1";
private static OzoneManager ozoneManager;
private static OzoneFsShell shell;
private static final String VOLUME =
- "vol-" + RandomStringUtils.randomNumeric(5);;
+ "vol-" + RandomStringUtils.randomNumeric(5);
private static final String BUCKET =
"buck-" + RandomStringUtils.randomNumeric(5);
private static final String KEY =
@@ -80,7 +81,7 @@ public class TestOzoneFsSnapshot {
BUCKET_PATH + OM_KEY_PREFIX + KEY;
@BeforeAll
- public static void initClass() throws Exception {
+ static void initClass() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
// Enable filesystem snapshot feature for the test regardless of the
default
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
@@ -106,7 +107,7 @@ public class TestOzoneFsSnapshot {
}
@AfterAll
- public static void shutdown() throws IOException {
+ static void shutdown() throws IOException {
shell.close();
if (cluster != null) {
cluster.shutdown();
@@ -129,7 +130,7 @@ public class TestOzoneFsSnapshot {
}
@Test
- public void testCreateSnapshotDuplicateName() throws Exception {
+ void testCreateSnapshotDuplicateName() throws Exception {
String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5);
int res = ToolRunner.run(shell,
@@ -144,7 +145,7 @@ public class TestOzoneFsSnapshot {
}
@Test
- public void testCreateSnapshotWithSubDirInput() throws Exception {
+ void testCreateSnapshotWithSubDirInput() throws Exception {
// Test that:
// $ ozone fs -createSnapshot ofs://om/vol1/buck2/dir3/ snap1
//
@@ -185,7 +186,7 @@ public class TestOzoneFsSnapshot {
@ValueSource(strings = {"snap-1",
"snap75795657617173401188448010125899089001363595171500499231286",
"sn1"})
- public void testCreateSnapshotSuccess(String snapshotName)
+ void testCreateSnapshotSuccess(String snapshotName)
throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
@@ -241,7 +242,7 @@ public class TestOzoneFsSnapshot {
@ParameterizedTest(name = "{0}")
@MethodSource("createSnapshotFailureScenarios")
- public void testCreateSnapshotFailure(String description,
+ void testCreateSnapshotFailure(String description,
String paramBucketPath,
String snapshotName,
String expectedMessage,
@@ -258,12 +259,12 @@ public class TestOzoneFsSnapshot {
* Test list snapshot and snapshot keys with "ozone fs -ls".
*/
@Test
- public void testFsLsSnapshot() throws Exception {
+ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
String newKey = "key-" + RandomStringUtils.randomNumeric(5);
String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + newKey;
// Write a non-zero byte key.
- Path tempFile = Files.createTempFile("testFsLsSnapshot-", "any-suffix");
+ Path tempFile = tempDir.resolve("testFsLsSnapshot-any-suffix");
FileUtils.write(tempFile.toFile(), "random data", UTF_8);
execShellCommandAndGetOutput(0,
new String[]{"-put", tempFile.toString(), newKeyPath});
@@ -294,7 +295,7 @@ public class TestOzoneFsSnapshot {
}
@Test
- public void testDeleteBucketWithSnapshot() throws Exception {
+ void testDeleteBucketWithSnapshot() throws Exception {
String snapshotName = createSnapshot();
String snapshotPath = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH
@@ -326,7 +327,7 @@ public class TestOzoneFsSnapshot {
}
@Test
- public void testSnapshotDeleteSuccess() throws Exception {
+ void testSnapshotDeleteSuccess() throws Exception {
String snapshotName = createSnapshot();
// Delete the created snapshot
int res = ToolRunner.run(shell,
@@ -372,7 +373,7 @@ public class TestOzoneFsSnapshot {
@ParameterizedTest(name = "{0}")
@MethodSource("deleteSnapshotFailureScenarios")
- public void testSnapshotDeleteFailure(String description,
+ void testSnapshotDeleteFailure(String description,
String paramBucketPath,
String snapshotName,
String expectedMessage,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
index b180b22475..a8a8fba852 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -25,11 +25,12 @@ import javax.servlet.WriteListener;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.commons.io.FileUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConsts.MULTIPART_FORM_DATA_BOUNDARY;
import static
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
@@ -54,6 +54,7 @@ import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -154,67 +155,66 @@ public class TestSCMDbCheckpointServlet {
@ParameterizedTest
@MethodSource("getHttpMethods")
- public void testEndpoint(String httpMethod)
+ void testEndpoint(String httpMethod, @TempDir Path tempDir)
throws ServletException, IOException, InterruptedException {
this.method = httpMethod;
- File tempFile = null;
- try {
- List<String> toExcludeList = new ArrayList<>();
- toExcludeList.add("sstFile1.sst");
- toExcludeList.add("sstFile2.sst");
-
- setupHttpMethod(toExcludeList);
-
- doNothing().when(responseMock).setContentType("application/x-tgz");
- doNothing().when(responseMock).setHeader(Mockito.anyString(),
- Mockito.anyString());
-
- tempFile = File.createTempFile("testEndpoint_" + System
- .currentTimeMillis(), ".tar");
-
- FileOutputStream fileOutputStream = new FileOutputStream(tempFile);
- when(responseMock.getOutputStream()).thenReturn(
- new ServletOutputStream() {
- @Override
- public boolean isReady() {
- return true;
- }
-
- @Override
- public void setWriteListener(WriteListener writeListener) {
- }
-
- @Override
- public void write(int b) throws IOException {
- fileOutputStream.write(b);
- }
- });
-
- when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn(
- new DBCheckpointServlet.Lock());
- scmDbCheckpointServletMock.init();
- long initialCheckpointCount =
- scmMetrics.getDBCheckpointMetrics().getNumCheckpoints();
-
- doEndpoint();
-
- Assertions.assertTrue(tempFile.length() > 0);
- Assertions.assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointCreationTimeTaken() > 0);
- Assertions.assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointStreamingTimeTaken() > 0);
- Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics().
- getNumCheckpoints() > initialCheckpointCount);
-
- Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(),
- any(), any(), eq(toExcludeList), any(), any());
- } finally {
- FileUtils.deleteQuietly(tempFile);
- }
+ List<String> toExcludeList = new ArrayList<>();
+ toExcludeList.add("sstFile1.sst");
+ toExcludeList.add("sstFile2.sst");
+
+ setupHttpMethod(toExcludeList);
+
+ doNothing().when(responseMock).setContentType("application/x-tgz");
+ doNothing().when(responseMock).setHeader(Mockito.anyString(),
+ Mockito.anyString());
+
+ final Path outputPath = tempDir.resolve("testEndpoint.tar");
+ when(responseMock.getOutputStream()).thenReturn(
+ new ServletOutputStream() {
+ private final OutputStream fileOutputStream =
Files.newOutputStream(outputPath);
+
+ @Override
+ public boolean isReady() {
+ return true;
+ }
+ @Override
+ public void setWriteListener(WriteListener writeListener) {
+ }
+
+ @Override
+ public void close() throws IOException {
+ fileOutputStream.close();
+ super.close();
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ fileOutputStream.write(b);
+ }
+ });
+
+ when(scmDbCheckpointServletMock.getBootstrapStateLock()).thenReturn(
+ new DBCheckpointServlet.Lock());
+ scmDbCheckpointServletMock.init();
+ long initialCheckpointCount =
+ scmMetrics.getDBCheckpointMetrics().getNumCheckpoints();
+
+ doEndpoint();
+
+ Assertions.assertTrue(outputPath.toFile().length() > 0);
+ Assertions.assertTrue(
+ scmMetrics.getDBCheckpointMetrics().
+ getLastCheckpointCreationTimeTaken() > 0);
+ Assertions.assertTrue(
+ scmMetrics.getDBCheckpointMetrics().
+ getLastCheckpointStreamingTimeTaken() > 0);
+ Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics().
+ getNumCheckpoints() > initialCheckpointCount);
+
+ Mockito.verify(scmDbCheckpointServletMock).writeDbDataToStream(any(),
+ any(), any(), eq(toExcludeList), any(), any());
}
@Test
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index d4f1f77787..a835944eef 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -29,6 +29,7 @@ import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
@@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.commons.io.FileUtils;
import static
org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
import static
org.apache.hadoop.hdds.utils.HddsServerUtil.OZONE_RATIS_SNAPSHOT_COMPLETE_FLAG_NAME;
@@ -140,9 +140,10 @@ public class TestOMDbCheckpointServlet {
private Path compactionDirPath;
private DBCheckpoint dbCheckpoint;
private String method;
- private File folder;
+ @TempDir
+ private Path folder;
private static final String FABRICATED_FILE_NAME = "fabricatedFile.sst";
- private FileOutputStream fileOutputStream;
+
/**
* Create a MiniDFSCluster for testing.
* <p>
@@ -151,16 +152,15 @@ public class TestOMDbCheckpointServlet {
* @throws Exception
*/
@BeforeEach
- public void init(@TempDir File tempDir) throws Exception {
- folder = tempDir;
+ void init() throws Exception {
conf = new OzoneConfiguration();
- tempFile = File.createTempFile("temp_" + System
- .currentTimeMillis(), ".tar");
-
- fileOutputStream = new FileOutputStream(tempFile);
+ final Path tempPath = folder.resolve("temp.tar");
+ tempFile = tempPath.toFile();
servletOutputStream = new ServletOutputStream() {
+ private final OutputStream fileOutputStream =
Files.newOutputStream(tempPath);
+
@Override
public boolean isReady() {
return true;
@@ -170,6 +170,12 @@ public class TestOMDbCheckpointServlet {
public void setWriteListener(WriteListener writeListener) {
}
+ @Override
+ public void close() throws IOException {
+ fileOutputStream.close();
+ super.close();
+ }
+
@Override
public void write(int b) throws IOException {
fileOutputStream.write(b);
@@ -185,7 +191,6 @@ public class TestOMDbCheckpointServlet {
if (cluster != null) {
cluster.shutdown();
}
- FileUtils.deleteQuietly(tempFile);
}
private void setupCluster() throws Exception {
@@ -458,7 +463,7 @@ public class TestOMDbCheckpointServlet {
dbCheckpoint = realCheckpoint.get();
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME;
int newDbDirLength = newDbDirName.length() + 1;
@@ -556,14 +561,14 @@ public class TestOMDbCheckpointServlet {
.thenReturn(null);
// Get the tarball.
- Path tmpdir = Files.createTempDirectory("bootstrapData");
+ Path tmpdir = folder.resolve("bootstrapData");
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock,
fileOutputStream, new ArrayList<>(), new ArrayList<>(), tmpdir);
}
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
FileUtil.unTar(tempFile, new File(testDirName));
@@ -603,14 +608,14 @@ public class TestOMDbCheckpointServlet {
.thenReturn(null);
// Get the tarball.
- Path tmpdir = Files.createTempDirectory("bootstrapData");
+ Path tmpdir = folder.resolve("bootstrapData");
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock,
fileOutputStream, toExcludeList, excludedList, tmpdir);
}
// Untar the file into a temp folder to be examined.
- String testDirName = folder.getAbsolutePath();
+ String testDirName = folder.resolve("testDir").toString();
int testDirLength = testDirName.length() + 1;
FileUtil.unTar(tempFile, new File(testDirName));
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index cd932f6efd..093f1107b5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -202,7 +202,7 @@ public class TestOMRatisSnapshots {
@ValueSource(ints = {100})
// tried up to 1000 snapshots and this test works, but some of the
// timeouts have to be increased.
- public void testInstallSnapshot(int numSnapshotsToCreate) throws Exception {
+ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir)
throws Exception {
// Get the leader OM
String leaderOMNodeId = OmFailoverProxyUtil
.getFailoverProxyProvider(objectStore.getClientProxy())
@@ -221,7 +221,7 @@ public class TestOMRatisSnapshots {
FaultInjector faultInjector =
new SnapshotMaxSizeInjector(leaderOM,
followerOM.getOmSnapshotProvider().getSnapshotDir(),
- sstSetList);
+ sstSetList, tempDir);
followerOM.getOmSnapshotProvider().setInjector(faultInjector);
// Create some snapshots, each with new keys
@@ -1186,11 +1186,11 @@ public class TestOMRatisSnapshots {
private final List<Set<String>> sstSetList;
private final Path tempDir;
SnapshotMaxSizeInjector(OzoneManager om, File snapshotDir,
- List<Set<String>> sstSetList) throws IOException {
+ List<Set<String>> sstSetList, Path tempDir) {
this.om = om;
this.snapshotDir = snapshotDir;
this.sstSetList = sstSetList;
- this.tempDir = Files.createTempDirectory("tmpDirPrefix");
+ this.tempDir = tempDir;
init();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
index e5d9605711..d25cdf298e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
@@ -39,7 +39,6 @@ import org.junit.jupiter.api.io.TempDir;
import org.mockito.Mockito;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
@@ -65,7 +64,7 @@ public class TestOzoneManagerRatisRequest {
public void testRequestWithNonExistentBucket() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder, "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration,
ozoneManager);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
@@ -106,7 +105,7 @@ public class TestOzoneManagerRatisRequest {
ozoneManager = Mockito.mock(OzoneManager.class);
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder, "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration,
ozoneManager);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index 3b70d8af1a..08358054fc 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -59,7 +59,6 @@ import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
@@ -188,7 +187,7 @@ public class TestRangerBGSyncService {
omMetrics = OMMetrics.create();
conf.set(OMConfigKeys.OZONE_OM_DB_DIRS,
- Files.createTempDirectory(folder.toAbsolutePath(), "om").toString());
+ folder.resolve("om").toAbsolutePath().toString());
// No need to conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, ...) here
// as we did the trick earlier with mockito.
omMetadataManager = new OmMetadataManagerImpl(conf, ozoneManager);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
index 5229ea46fb..28af68e253 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
@@ -101,7 +101,6 @@ import org.rocksdb.RocksIterator;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -207,6 +206,8 @@ public class TestSnapshotDiffManager {
private final OMMetrics omMetrics = OMMetrics.create();
@TempDir
private File dbDir;
+ @TempDir
+ private File snapDiffDir;
@Mock
private RocksDBCheckpointDiffer differ;
@Mock
@@ -431,7 +432,7 @@ public class TestSnapshotDiffManager {
UUID snap1 = UUID.randomUUID();
UUID snap2 = UUID.randomUUID();
- String diffDir = Files.createTempDirectory("snapdiff_dir").toString();
+ String diffDir = snapDiffDir.getAbsolutePath();
Set<String> randomStrings = IntStream.range(0, numberOfFiles)
.mapToObj(i -> RandomStringUtils.randomAlphabetic(10))
.collect(Collectors.toSet());
@@ -526,8 +527,7 @@ public class TestSnapshotDiffManager {
toSnapshotInfo,
false,
Collections.emptyMap(),
- Files.createTempDirectory("snapdiff_dir").toAbsolutePath()
- .toString());
+ snapDiffDir.getAbsolutePath());
assertEquals(deltaStrings, deltaFiles);
}
}
@@ -591,8 +591,7 @@ public class TestSnapshotDiffManager {
toSnapshotInfo,
false,
Collections.emptyMap(),
- Files.createTempDirectory("snapdiff_dir").toAbsolutePath()
- .toString());
+ snapDiffDir.getAbsolutePath());
assertEquals(deltaStrings, deltaFiles);
rcFromSnapshot.close();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]