This is an automated email from the ASF dual-hosted git repository.
yihua pushed a commit to branch branch-0.x
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/branch-0.x by this push:
new dfd64a8d2ea7 fix: multiple GH CI issues (#18260)
dfd64a8d2ea7 is described below
commit dfd64a8d2ea7f80f3af0e5e1df72455be28f2592
Author: Lin Liu <[email protected]>
AuthorDate: Fri Feb 27 10:38:04 2026 -0800
fix: multiple GH CI issues (#18260)
---
.github/workflows/bot.yml | 4 -
packaging/bundle-validation/Dockerfile | 6 ++
.../docker_java17/docker_java17_test.sh | 87 +++++++++++++++-------
3 files changed, 65 insertions(+), 32 deletions(-)
diff --git a/.github/workflows/bot.yml b/.github/workflows/bot.yml
index a666aee75122..227f4a51a218 100644
--- a/.github/workflows/bot.yml
+++ b/.github/workflows/bot.yml
@@ -267,7 +267,6 @@ jobs:
java-version: '17'
distribution: 'temurin'
architecture: x64
- cache: maven
- name: Quickstart Test
env:
SCALA_PROFILE: ${{ matrix.scalaProfile }}
@@ -322,7 +321,6 @@ jobs:
java-version: '17'
distribution: 'temurin'
architecture: x64
- cache: maven
- name: Scala UT - Common & Spark
env:
SCALA_PROFILE: ${{ matrix.scalaProfile }}
@@ -371,7 +369,6 @@ jobs:
java-version: '17'
distribution: 'temurin'
architecture: x64
- cache: maven
- name: Quickstart Test
env:
SCALA_PROFILE: ${{ matrix.scalaProfile }}
@@ -426,7 +423,6 @@ jobs:
java-version: '17'
distribution: 'temurin'
architecture: x64
- cache: maven
- name: Scala UT - Common & Spark
env:
SCALA_PROFILE: ${{ matrix.scalaProfile }}
diff --git a/packaging/bundle-validation/Dockerfile
b/packaging/bundle-validation/Dockerfile
index 995e11ef828b..e76766c9a3e2 100644
--- a/packaging/bundle-validation/Dockerfile
+++ b/packaging/bundle-validation/Dockerfile
@@ -54,5 +54,11 @@ ENV HDFS_SECONDARYNAMENODE_USER root
ENV YARN_RESOURCEMANAGER_USER root
ENV YARN_NODEMANAGER_USER root
+# JDK 8u272+ backported cgroup v2 container detection from JDK 11 but has a
NPE bug
+# (JDK-8278725) when running in Docker containers on hosts with cgroup v2
(GitHub Actions).
+# Disable container support for all JVM processes in this image to avoid the
NPE.
+# This affects Derby, HiveServer2, Spark, Hadoop daemons, etc.
+ENV JAVA_TOOL_OPTIONS="-XX:-UseContainerSupport"
+
# for RocksDb
RUN apk add --no-cache libstdc++
diff --git a/packaging/bundle-validation/docker_java17/docker_java17_test.sh
b/packaging/bundle-validation/docker_java17/docker_java17_test.sh
index 7fcc9e5000e3..2897ab7d1cc1 100755
--- a/packaging/bundle-validation/docker_java17/docker_java17_test.sh
+++ b/packaging/bundle-validation/docker_java17/docker_java17_test.sh
@@ -51,28 +51,18 @@ start_datanode () {
echo "::warning::docker_test_java17.sh starting datanode:"$DN
- cat $HADOOP_HOME/hadoop/etc/hdfs-site.xml
- cat $HADOOP_HOME/hadoop/etc/core-site.xml
-
DN_DIR_PREFIX=$DOCKER_TEST_DIR/additional_datanode/
PID_DIR=$DOCKER_TEST_DIR/pid/$1
- if [ -z $DN_DIR_PREFIX ]; then
- mkdir -p $DN_DIR_PREFIX
- fi
-
- if [ -z $PID_DIR ]; then
- mkdir -p $PID_DIR
- fi
-
- export HADOOP_PID_DIR=$PID_PREFIX
+ mkdir -p $DN_DIR_PREFIX $PID_DIR
+ export HADOOP_PID_DIR=$PID_DIR
DN_CONF_OPTS="\
-Dhadoop.tmp.dir=$DN_DIR_PREFIX$DN\
-Ddfs.datanode.address=localhost:5001$DN \
-Ddfs.datanode.http.address=localhost:5008$DN \
-Ddfs.datanode.ipc.address=localhost:5002$DN"
- $HADOOP_HOME/bin/hdfs --daemon start datanode $DN_CONF_OPTS
- $HADOOP_HOME/bin/hdfs dfsadmin -report
+ bash $HADOOP_HOME/bin/hdfs --daemon start datanode $DN_CONF_OPTS
+ bash $HADOOP_HOME/bin/hdfs dfsadmin -report
}
setup_hdfs () {
@@ -80,20 +70,52 @@ setup_hdfs () {
mv /opt/bundle-validation/tmp-conf-dir/hdfs-site.xml
$HADOOP_HOME/etc/hadoop/hdfs-site.xml
mv /opt/bundle-validation/tmp-conf-dir/core-site.xml
$HADOOP_HOME/etc/hadoop/core-site.xml
- $HADOOP_HOME/bin/hdfs namenode -format
- $HADOOP_HOME/bin/hdfs --daemon start namenode
- echo "::warning::docker_test_java17.sh starting hadoop hdfs"
- $HADOOP_HOME/sbin/start-dfs.sh
+ mkdir -p $DOCKER_TEST_DIR/pid
+ export HADOOP_PID_DIR=$DOCKER_TEST_DIR/pid
+
+ bash $HADOOP_HOME/bin/hdfs namenode -format
+ bash $HADOOP_HOME/bin/hdfs --daemon start namenode
+
+ echo "::warning::docker_test_java17.sh waiting for NameNode to start"
+ NAMENODE_READY=0
+ NAMENODE_PID=""
+ if [ -f "$HADOOP_PID_DIR/hadoop-root-namenode.pid" ]; then
+ NAMENODE_PID=$(cat "$HADOOP_PID_DIR/hadoop-root-namenode.pid")
+ fi
+ for i in $(seq 1 30); do
+ if [ -n "$NAMENODE_PID" ] && ! kill -0 "$NAMENODE_PID" 2>/dev/null; then
+ echo "::error::docker_test_java17.sh NameNode process $NAMENODE_PID died"
+ break
+ fi
+ if timeout 15 bash $HADOOP_HOME/bin/hdfs dfsadmin -report >/dev/null 2>&1;
then
+ NAMENODE_READY=1
+ break
+ fi
+ sleep 2
+ done
+ if [ "$NAMENODE_READY" -ne 1 ]; then
+ echo "::error::docker_test_java17.sh NameNode failed to start"
+ echo "::group::NameNode logs"
+ find "$HADOOP_HOME/logs" -name "hadoop-root-namenode*.log" -exec cat {} \;
2>/dev/null || echo "No NameNode logs found"
+ echo "::endgroup::"
+ exit 1
+ fi
+
+ echo "::warning::docker_test_java17.sh starting default datanode"
+ bash $HADOOP_HOME/bin/hdfs --daemon start datanode
- # start datanodes
+ echo "::warning::docker_test_java17.sh waiting for DataNode to register"
+ sleep 10
+
+ # start additional datanodes
for i in $(seq 1 3)
do
start_datanode $i
done
echo "::warning::docker_test_java17.sh starting hadoop hdfs, hdfs report"
- $HADOOP_HOME/bin/hdfs dfs -mkdir -p /user/root
- $HADOOP_HOME/bin/hdfs dfs -ls /user/
+ bash $HADOOP_HOME/bin/hdfs dfs -mkdir -p /user/root
+ bash $HADOOP_HOME/bin/hdfs dfs -ls /user/
if [ "$?" -ne 0 ]; then
echo "::error::docker_test_java17.sh Failed setting up HDFS!"
exit 1
@@ -103,7 +125,14 @@ setup_hdfs () {
stop_hdfs() {
use_default_java_runtime
echo "::warning::docker_test_java17.sh stopping hadoop hdfs"
- $HADOOP_HOME/sbin/stop-dfs.sh
+ export HADOOP_PID_DIR=$DOCKER_TEST_DIR/pid
+ bash $HADOOP_HOME/bin/hdfs --daemon stop datanode 2>/dev/null || true
+ for i in 1 2 3; do
+ export HADOOP_PID_DIR=$DOCKER_TEST_DIR/pid/$i
+ bash $HADOOP_HOME/bin/hdfs --daemon stop datanode 2>/dev/null || true
+ done
+ export HADOOP_PID_DIR=$DOCKER_TEST_DIR/pid
+ bash $HADOOP_HOME/bin/hdfs --daemon stop namenode 2>/dev/null || true
}
build_hudi_java8 () {
@@ -123,7 +152,14 @@ build_hudi_java8 () {
mkdir -p $JARS_DIR
fi
- cp ./packaging/hudi-spark-bundle/target/hudi-spark*.jar $JARS_DIR/spark.jar
+ echo "::warning::docker_test_java17.sh copy hudi-spark bundle jar to target
folder"
+ cp ./packaging/hudi-spark-bundle/target/hudi-spark*SNAPSHOT.jar
$JARS_DIR/spark.jar
+ echo "::warning::docker_test_java17.sh copy hudi-spark bundle jar to target
folder DONE"
+
+ if [ "$?" -ne 0 ]; then
+ echo "::error::docker_test_java17.sh Failed to copy hudi-spark bundle jar
to target folder"
+ exit 1
+ fi
}
run_docker_tests() {
@@ -156,11 +192,6 @@ run_docker_tests() {
# Execute tests
############################
cd $DOCKER_TEST_DIR
-echo "yxchang: $(PATH)"
-export PATH=/usr/bin:$PATH
-whoami
-which ssh
-whoami
echo "::warning::docker_test_java17.sh Building Hudi with Java 8"
build_hudi_java8