This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 71d4f25053 HDDS-10060. Restrict awaitility to test scope (#5923)
71d4f25053 is described below
commit 71d4f25053314bdd86324701eb1cfdb61a94eedf
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Fri Jan 5 23:07:58 2024 +0100
HDDS-10060. Restrict awaitility to test scope (#5923)
---
.../org/apache/hadoop/hdds/ratis/RatisHelper.java | 35 ++++++++++++++++++
hadoop-hdds/framework/pom.xml | 4 ---
.../hadoop/hdds/utils/db/RDBCheckpointUtils.java | 23 +++---------
hadoop-hdds/managed-rocksdb/pom.xml | 4 ---
.../utils/db/managed/ManagedRocksObjectUtils.java | 42 ++++------------------
hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 2 --
hadoop-ozone/dist/src/main/license/jar-report.txt | 2 --
hadoop-ozone/integration-test/pom.xml | 9 ++---
hadoop-ozone/ozone-manager/pom.xml | 5 +++
pom.xml | 6 ----
10 files changed, 56 insertions(+), 76 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index b1d3e98e99..cb7f6f8a3b 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.ratis;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -27,6 +28,7 @@ import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.function.BiFunction;
+import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
@@ -64,12 +66,17 @@ import org.apache.ratis.rpc.RpcType;
import org.apache.ratis.rpc.SupportedRpcType;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
+import org.apache.ratis.util.JavaUtils;
import org.apache.ratis.util.JvmPauseMonitor;
+import org.apache.ratis.util.TimeDuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.TrustManager;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.ratis.util.Preconditions.assertTrue;
+
/**
* Ratis helper methods.
*/
@@ -603,4 +610,32 @@ public final class RatisHelper {
// Not re-thrown in order to keep the main exception, if there is any.
}
}
+
+ /**
+ * Similar to {@link JavaUtils#attemptUntilTrue(BooleanSupplier, int,
TimeDuration, String, Logger)},
+ * but:
+ * <li>takes max. {@link Duration} instead of number of attempts</li>
+ * <li>accepts {@link Duration} instead of {@link TimeDuration} for sleep
time</li>
+ *
+ * @return true if attempt was successful,
+ * false if wait for condition to become true timed out or was interrupted
+ */
+ public static boolean attemptUntilTrue(BooleanSupplier condition, Duration
pollInterval, Duration timeout) {
+ try {
+ final int attempts = calculateAttempts(pollInterval, timeout);
+ final TimeDuration sleepTime =
TimeDuration.valueOf(pollInterval.toMillis(), MILLISECONDS);
+ JavaUtils.attemptUntilTrue(condition, attempts, sleepTime, null, null);
+ return true;
+ } catch (InterruptedException | IllegalStateException exception) {
+ return false;
+ }
+ }
+
+ public static int calculateAttempts(Duration pollInterval, Duration
maxDuration) {
+ final long max = maxDuration.toMillis();
+ final long interval = pollInterval.toMillis();
+ assertTrue(max >= interval, () -> "max: " + maxDuration + " < interval:" +
pollInterval);
+ return (int) (max / interval);
+ }
+
}
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index c77684f877..8ad0d11d02 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -155,10 +155,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>rocksdb-checkpoint-differ</artifactId>
<version>${hdds.version}</version>
</dependency>
- <dependency>
- <groupId>org.awaitility</groupId>
- <artifactId>awaitility</artifactId>
- </dependency>
<!-- Test dependencies -->
<dependency>
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
index ee550d7227..c47b176e93 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java
@@ -18,16 +18,13 @@
package org.apache.hadoop.hdds.utils.db;
-import org.awaitility.core.ConditionTimeoutException;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.time.Duration;
-import java.time.Instant;
-
-import static org.awaitility.Awaitility.with;
/**
* RocksDB Checkpoint Utilities.
@@ -35,7 +32,6 @@ import static org.awaitility.Awaitility.with;
public final class RDBCheckpointUtils {
static final Logger LOG =
LoggerFactory.getLogger(RDBCheckpointUtils.class);
- private static final Duration POLL_DELAY_DURATION = Duration.ZERO;
private static final Duration POLL_INTERVAL_DURATION =
Duration.ofMillis(100);
private static final Duration POLL_MAX_DURATION = Duration.ofSeconds(20);
@@ -50,23 +46,12 @@ public final class RDBCheckpointUtils {
*/
public static boolean waitForCheckpointDirectoryExist(File file,
Duration maxWaitTimeout) {
- Instant start = Instant.now();
- try {
- with().atMost(maxWaitTimeout)
- .pollDelay(POLL_DELAY_DURATION)
- .pollInterval(POLL_INTERVAL_DURATION)
- .await()
- .until(file::exists);
- LOG.info("Waited for {} milliseconds for checkpoint directory {}" +
- " availability.",
- Duration.between(start, Instant.now()).toMillis(),
- file.getAbsoluteFile());
- return true;
- } catch (ConditionTimeoutException exception) {
+ final boolean success = RatisHelper.attemptUntilTrue(file::exists,
POLL_INTERVAL_DURATION, maxWaitTimeout);
+ if (!success) {
LOG.info("Checkpoint directory: {} didn't get created in {} secs.",
maxWaitTimeout.getSeconds(), file.getAbsolutePath());
- return false;
}
+ return success;
}
/**
diff --git a/hadoop-hdds/managed-rocksdb/pom.xml
b/hadoop-hdds/managed-rocksdb/pom.xml
index 1be630e854..f2f90ae5a9 100644
--- a/hadoop-hdds/managed-rocksdb/pom.xml
+++ b/hadoop-hdds/managed-rocksdb/pom.xml
@@ -40,10 +40,6 @@
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
</dependency>
- <dependency>
- <groupId>org.awaitility</groupId>
- <artifactId>awaitility</artifactId>
- </dependency>
</dependencies>
<build/>
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index 3d7c08275d..8ae82a688b 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -19,10 +19,9 @@
package org.apache.hadoop.hdds.utils.db.managed;
import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.apache.hadoop.hdds.utils.LeakDetector;
import org.apache.ratis.util.UncheckedAutoCloseable;
-import org.awaitility.Awaitility;
-import org.awaitility.core.ConditionTimeoutException;
import org.rocksdb.RocksDB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -31,7 +30,6 @@ import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.time.Duration;
-import java.time.Instant;
/**
* Utilities to help assert RocksObject closures.
@@ -43,10 +41,8 @@ public final class ManagedRocksObjectUtils {
public static final Logger LOG =
LoggerFactory.getLogger(ManagedRocksObjectUtils.class);
- private static final Duration POLL_DELAY_DURATION = Duration.ZERO;
private static final Duration POLL_INTERVAL_DURATION =
Duration.ofMillis(100);
-
private static final LeakDetector LEAK_DETECTOR = new
LeakDetector("ManagedRocksObject");
static UncheckedAutoCloseable track(AutoCloseable object) {
@@ -74,34 +70,6 @@ public final class ManagedRocksObjectUtils {
return HddsUtils.formatStackTrace(elements, 4);
}
- /**
- * Wait for file to be deleted.
- * @param file File to be deleted.
- * @param maxDuration poll max duration.
- * @param interval poll interval.
- * @param pollDelayDuration poll delay val.
- */
- public static void waitForFileDelete(File file, Duration maxDuration,
- Duration interval,
- Duration pollDelayDuration)
- throws IOException {
- Instant start = Instant.now();
- try {
- Awaitility.with().atMost(maxDuration)
- .pollDelay(pollDelayDuration)
- .pollInterval(interval)
- .await()
- .until(() -> !file.exists());
- LOG.info("Waited for {} milliseconds for file {} deletion.",
- Duration.between(start, Instant.now()).toMillis(),
- file.getAbsoluteFile());
- } catch (ConditionTimeoutException exception) {
- LOG.info("File: {} didn't get deleted in {} secs.",
- file.getAbsolutePath(), maxDuration.getSeconds());
- throw new IOException(exception);
- }
- }
-
/**
* Wait for file to be deleted.
* @param file File to be deleted.
@@ -110,8 +78,12 @@ public final class ManagedRocksObjectUtils {
*/
public static void waitForFileDelete(File file, Duration maxDuration)
throws IOException {
- waitForFileDelete(file, maxDuration, POLL_INTERVAL_DURATION,
- POLL_DELAY_DURATION);
+ if (!RatisHelper.attemptUntilTrue(() -> !file.exists(),
POLL_INTERVAL_DURATION, maxDuration)) {
+ String msg = String.format("File: %s didn't get deleted in %s secs.",
+ file.getAbsolutePath(), maxDuration.getSeconds());
+ LOG.info(msg);
+ throw new IOException(msg);
+ }
}
/**
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index 2a5d74877f..f5f6644efe 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -423,7 +423,6 @@ Apache License 2.0
org.eclipse.jetty:jetty-util-ajax
org.eclipse.jetty:jetty-webapp
org.eclipse.jetty:jetty-xml
- org.hamcrest:hamcrest
org.javassist:javassist
org.jetbrains:annotations
org.jetbrains.kotlin:kotlin-stdlib
@@ -442,7 +441,6 @@ Apache License 2.0
org.xerial:sqlite-jdbc
org.yaml:snakeyaml
software.amazon.ion:ion-java
- org.awaitility:awaitility
MIT
=====================
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt
b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 8792390b2c..2b582ddaf6 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -6,7 +6,6 @@ share/ozone/lib/antlr4-runtime.jar
share/ozone/lib/aopalliance-repackaged.jar
share/ozone/lib/aspectjrt.jar
share/ozone/lib/aspectjweaver.jar
-share/ozone/lib/awaitility.jar
share/ozone/lib/aws-java-sdk-core.jar
share/ozone/lib/aws-java-sdk-kms.jar
share/ozone/lib/aws-java-sdk-s3.jar
@@ -61,7 +60,6 @@ share/ozone/lib/hadoop-hdfs-client.jar
share/ozone/lib/hadoop-hdfs.jar
share/ozone/lib/hadoop-shaded-guava.jar
share/ozone/lib/hadoop-shaded-protobuf_3_7.jar
-share/ozone/lib/hamcrest.jar
share/ozone/lib/hdds-annotation-processing.jar
share/ozone/lib/hdds-client.jar
share/ozone/lib/hdds-common.jar
diff --git a/hadoop-ozone/integration-test/pom.xml
b/hadoop-ozone/integration-test/pom.xml
index 319cb637a6..21aff1b08f 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -253,14 +253,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</exclusion>
</exclusions>
</dependency>
- <dependency>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jul-to-slf4j</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.awaitility</groupId>
+ <artifactId>awaitility</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
diff --git a/hadoop-ozone/ozone-manager/pom.xml
b/hadoop-ozone/ozone-manager/pom.xml
index 8438dbbf2c..96f0e762f8 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -232,6 +232,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hdds-test-utils</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.awaitility</groupId>
+ <artifactId>awaitility</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.jmockit</groupId>
<artifactId>jmockit</artifactId>
diff --git a/pom.xml b/pom.xml
index 847fd5fdb9..2d3a9b2039 100644
--- a/pom.xml
+++ b/pom.xml
@@ -218,7 +218,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xs
<okhttp.version>2.7.5</okhttp.version>
<okio.version>3.6.0</okio.version>
<mockito2.version>3.5.9</mockito2.version>
- <hamcrest.version>1.3</hamcrest.version>
<jmockit.version>1.24</jmockit.version>
<junit4.version>4.13.1</junit4.version>
<junit5.version>5.10.1</junit5.version>
@@ -1280,11 +1279,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xs
<type>pom</type>
<scope>import</scope>
</dependency>
- <dependency>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- <version>${hamcrest.version}</version>
- </dependency>
<dependency>
<groupId>org.jmockit</groupId>
<artifactId>jmockit</artifactId>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]