This is an automated email from the ASF dual-hosted git repository.

liurenjie1024 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg-rust.git


The following commit(s) were added to refs/heads/main by this push:
     new 946a28fa3 Simplify test setup by starting container only once. (#2071)
946a28fa3 is described below

commit 946a28fa3714ec4425fb31e295020b2c4ae2d06b
Author: Renjie Liu <[email protected]>
AuthorDate: Wed Jan 28 08:20:00 2026 +0800

    Simplify test setup by starting container only once. (#2071)
    
    ## Which issue does this PR close?
    
    - Closes #2070 .
    
    ## What changes are included in this PR?
    
    1. We removed the ci action to free up disk space, which was a hack and
    becomes quite unstable.
    2. Refactored tests to start containers only once. Also we introduced
    some utility methods to help making namespace unique across tests.
    
    ## Are these changes tested?
    
    Yes, ut
---
 .github/workflows/ci.yml                           |  37 ++--
 Cargo.lock                                         |  25 +--
 Cargo.toml                                         |   2 -
 Makefile                                           |  13 +-
 crates/catalog/glue/Cargo.toml                     |   2 -
 .../glue/testdata/glue_catalog/docker-compose.yaml |  44 -----
 crates/catalog/glue/tests/glue_catalog_test.rs     | 137 +++++++-------
 crates/catalog/hms/Cargo.toml                      |   2 -
 .../hms/testdata/hms_catalog/docker-compose.yaml   |  49 -----
 crates/catalog/hms/tests/hms_catalog_test.rs       | 124 ++++++------
 crates/catalog/rest/Cargo.toml                     |   2 -
 .../rest/testdata/rest_catalog/docker-compose.yaml |  65 -------
 crates/catalog/rest/tests/rest_catalog_test.rs     | 207 +++++++++++++--------
 crates/iceberg/Cargo.toml                          |   1 -
 .../testdata/file_io_gcs/docker-compose.yaml       |  23 ---
 .../testdata/file_io_s3/docker-compose.yaml        |  30 ---
 crates/iceberg/tests/file_io_gcs_test.rs           |  45 +----
 crates/iceberg/tests/file_io_s3_test.rs            |  75 +++-----
 crates/integration_tests/Cargo.toml                |   1 -
 crates/integration_tests/src/lib.rs                |  44 ++++-
 .../integration_tests/testdata/docker-compose.yaml | 108 -----------
 crates/integration_tests/tests/shared.rs           |  20 +-
 crates/integration_tests/tests/shared_tests/mod.rs |   2 +-
 crates/test_utils/Cargo.toml                       |   1 +
 crates/test_utils/src/lib.rs                       |  81 ++++++++
 dev/docker-compose.yaml                            | 202 ++++++++++++++++++++
 .../testdata/hms_catalog => dev/hms}/Dockerfile    |   0
 .../testdata/hms_catalog => dev/hms}/core-site.xml | 102 +++++-----
 .../testdata => dev}/spark/Dockerfile              |   0
 .../testdata => dev}/spark/provision.py            |   0
 .../testdata => dev}/spark/spark-defaults.conf     |   0
 31 files changed, 693 insertions(+), 751 deletions(-)

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index af9b812c7..829f64030 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -86,18 +86,6 @@ jobs:
           - macos-latest
           - windows-latest
     steps:
-      - name: Maximize build space (Ubuntu)
-        if: matrix.os == 'ubuntu-latest'
-        uses: easimon/maximize-build-space@master
-        with:
-          remove-dotnet: "true"
-          remove-android: "true"
-          remove-haskell: "true"
-          remove-codeql: "true"
-          remove-docker-images: "true"
-          root-reserve-mb: 10240
-          temp-reserve-mb: 10240
-
       - uses: actions/checkout@v6
 
       - name: Setup Rust toolchain
@@ -134,26 +122,15 @@ jobs:
       - name: Build
         run: cargo build -p iceberg --no-default-features
 
-  unit:
+  tests:
     runs-on: ubuntu-latest
     strategy:
       matrix:
         test-suite:
           - { name: "default", args: "--all-targets --all-features 
--workspace" }
           - { name: "doc", args: "--doc --all-features --workspace" }
-    name: Unit Tests (${{ matrix.test-suite.name }})
+    name: Tests (${{ matrix.test-suite.name }})
     steps:
-      - name: Maximize build space
-        uses: easimon/maximize-build-space@master
-        with:
-          remove-dotnet: "true"
-          remove-android: "true"
-          remove-haskell: "true"
-          remove-codeql: "true"
-          remove-docker-images: "true"
-          root-reserve-mb: 10240
-          temp-reserve-mb: 10240
-
       - uses: actions/checkout@v6
 
       - name: Setup Rust toolchain
@@ -169,9 +146,17 @@ jobs:
         with:
           key: ${{ matrix.test-suite.name }}
 
-      - name: Test
+      - name: Start Docker containers
+        if: matrix.test-suite.name == 'default'
+        run: make docker-up
+
+      - name: Run tests
         run: cargo test --no-fail-fast ${{ matrix.test-suite.args }}
 
+      - name: Stop Docker containers
+        if: always() && matrix.test-suite.name == 'default'
+        run: make docker-down
+
   msrv:
     name: Verify MSRV
     runs-on: ubuntu-latest
diff --git a/Cargo.lock b/Cargo.lock
index 003a52a38..c0c46ce67 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1546,16 +1546,6 @@ dependencies = [
  "memchr",
 ]
 
-[[package]]
-name = "ctor"
-version = "0.2.9"
-source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501"
-dependencies = [
- "quote",
- "syn 2.0.111",
-]
-
 [[package]]
 name = "darling"
 version = "0.20.11"
@@ -3375,7 +3365,6 @@ dependencies = [
  "bimap",
  "bytes",
  "chrono",
- "ctor",
  "derive_builder",
  "expect-test",
  "flate2",
@@ -3431,10 +3420,8 @@ dependencies = [
  "async-trait",
  "aws-config",
  "aws-sdk-glue",
- "ctor",
  "iceberg",
  "iceberg_test_utils",
- "port_scanner",
  "serde_json",
  "tokio",
  "tracing",
@@ -3447,7 +3434,6 @@ dependencies = [
  "anyhow",
  "async-trait",
  "chrono",
- "ctor",
  "faststr",
  "hive_metastore",
  "iceberg",
@@ -3456,7 +3442,6 @@ dependencies = [
  "metainfo",
  "motore-macros",
  "pilota",
- "port_scanner",
  "serde_json",
  "tokio",
  "tracing",
@@ -3486,13 +3471,11 @@ version = "0.8.0"
 dependencies = [
  "async-trait",
  "chrono",
- "ctor",
  "http 1.4.0",
  "iceberg",
  "iceberg_test_utils",
  "itertools 0.13.0",
  "mockito",
- "port_scanner",
  "reqwest",
  "serde",
  "serde_derive",
@@ -3564,7 +3547,6 @@ version = "0.8.0"
 dependencies = [
  "arrow-array",
  "arrow-schema",
- "ctor",
  "datafusion",
  "futures",
  "iceberg",
@@ -3625,6 +3607,7 @@ dependencies = [
 name = "iceberg_test_utils"
 version = "0.8.0"
 dependencies = [
+ "iceberg",
  "tracing",
  "tracing-subscriber",
 ]
@@ -4908,12 +4891,6 @@ version = "0.3.32"
 source = "registry+https://github.com/rust-lang/crates.io-index";
 checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
 
-[[package]]
-name = "port_scanner"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index";
-checksum = "325a6d2ac5dee293c3b2612d4993b98aec1dff096b0a2dae70ed7d95784a05da"
-
 [[package]]
 name = "portable-atomic"
 version = "1.11.1"
diff --git a/Cargo.toml b/Cargo.toml
index 148a45449..46ac4736b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -61,7 +61,6 @@ bimap = "0.6"
 bytes = "1.10"
 chrono = "0.4.41"
 clap = { version = "4.5.48", features = ["derive", "cargo"] }
-ctor = "0.2.8"
 dashmap = "6"
 datafusion = "51.0"
 datafusion-cli = "51.0"
@@ -104,7 +103,6 @@ opendal = "0.55.0"
 ordered-float = "4"
 parquet = "57.0"
 pilota = "0.11.10"
-port_scanner = "0.1.5"
 pretty_assertions = "1.4"
 rand = "0.8.5"
 regex = "1.11.3"
diff --git a/Makefile b/Makefile
index 6b8a039b4..14093e182 100644
--- a/Makefile
+++ b/Makefile
@@ -55,7 +55,8 @@ doc-test:
 unit-test: doc-test
        cargo test --no-fail-fast --lib --all-features --workspace
 
-test: doc-test
+test: docker-up
+       @trap '$(MAKE) docker-down' EXIT; \
        cargo test --no-fail-fast --all-targets --all-features --workspace
 
 clean:
@@ -66,3 +67,13 @@ install-mdbook:
 
 site: install-mdbook
        cd website && mdbook serve
+
+# Docker targets for integration tests
+docker-up:
+       docker compose -f dev/docker-compose.yaml up -d --build --wait
+
+docker-down:
+       docker compose -f dev/docker-compose.yaml down -v --remove-orphans 
--timeout 0
+
+docker-logs:
+       docker compose -f dev/docker-compose.yaml logs -f
diff --git a/crates/catalog/glue/Cargo.toml b/crates/catalog/glue/Cargo.toml
index b6126021f..f42fedeae 100644
--- a/crates/catalog/glue/Cargo.toml
+++ b/crates/catalog/glue/Cargo.toml
@@ -39,6 +39,4 @@ tokio = { workspace = true }
 tracing = { workspace = true }
 
 [dev-dependencies]
-ctor = { workspace = true }
 iceberg_test_utils = { path = "../../test_utils", features = ["tests"] }
-port_scanner = { workspace = true }
diff --git a/crates/catalog/glue/testdata/glue_catalog/docker-compose.yaml 
b/crates/catalog/glue/testdata/glue_catalog/docker-compose.yaml
deleted file mode 100644
index bb77d5f8b..000000000
--- a/crates/catalog/glue/testdata/glue_catalog/docker-compose.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-services:
-  minio:
-    image: minio/minio:RELEASE.2025-05-24T17-08-30Z
-    expose:
-      - 9000
-      - 9001
-    environment:
-      - MINIO_ROOT_USER=admin
-      - MINIO_ROOT_PASSWORD=password
-      - MINIO_DOMAIN=minio
-    command: [ "server", "/data", "--console-address", ":9001" ]
-
-  mc:
-    depends_on:
-      - minio
-    image: minio/mc:RELEASE.2025-05-21T01-59-54Z
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-    entrypoint: >
-      /bin/sh -c " until (/usr/bin/mc alias set minio http://minio:9000 admin 
password) do echo '...waiting...' && sleep 1; done; /usr/bin/mc mb 
minio/warehouse; /usr/bin/mc policy set public minio/warehouse; tail -f 
/dev/null "
-
-  moto:
-    image: motoserver/moto:5.0.3
-    expose:
-      - 5000
diff --git a/crates/catalog/glue/tests/glue_catalog_test.rs 
b/crates/catalog/glue/tests/glue_catalog_test.rs
index 491703214..f6e2060c0 100644
--- a/crates/catalog/glue/tests/glue_catalog_test.rs
+++ b/crates/catalog/glue/tests/glue_catalog_test.rs
@@ -16,12 +16,12 @@
 // under the License.
 
 //! Integration tests for glue catalog.
+//!
+//! These tests assume Docker containers are started externally via `make 
docker-up`.
+//! Each test uses unique namespaces based on module path to avoid conflicts.
 
 use std::collections::HashMap;
-use std::net::SocketAddr;
-use std::sync::RwLock;
 
-use ctor::{ctor, dtor};
 use iceberg::io::{S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, 
S3_SECRET_ACCESS_KEY};
 use iceberg::spec::{NestedField, PrimitiveType, Schema, Type};
 use iceberg::transaction::{ApplyTransactionAction, Transaction};
@@ -32,55 +32,18 @@ use iceberg_catalog_glue::{
     AWS_ACCESS_KEY_ID, AWS_REGION_NAME, AWS_SECRET_ACCESS_KEY, 
GLUE_CATALOG_PROP_URI,
     GLUE_CATALOG_PROP_WAREHOUSE, GlueCatalog, GlueCatalogBuilder,
 };
-use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
-use port_scanner::scan_port_addr;
+use iceberg_test_utils::{
+    cleanup_namespace, get_glue_endpoint, get_minio_endpoint, 
normalize_test_name_with_parts,
+    set_up,
+};
 use tokio::time::sleep;
 use tracing::info;
 
-const GLUE_CATALOG_PORT: u16 = 5000;
-const MINIO_PORT: u16 = 9000;
-static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
-
-#[ctor]
-fn before_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    let docker_compose = DockerCompose::new(
-        normalize_test_name(module_path!()),
-        format!("{}/testdata/glue_catalog", env!("CARGO_MANIFEST_DIR")),
-    );
-    docker_compose.up();
-    guard.replace(docker_compose);
-}
-
-#[dtor]
-fn after_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    guard.take();
-}
-
 async fn get_catalog() -> GlueCatalog {
     set_up();
 
-    let (glue_catalog_ip, minio_ip) = {
-        let guard = DOCKER_COMPOSE_ENV.read().unwrap();
-        let docker_compose = guard.as_ref().unwrap();
-        (
-            docker_compose.get_container_ip("moto"),
-            docker_compose.get_container_ip("minio"),
-        )
-    };
-    let glue_socket_addr = SocketAddr::new(glue_catalog_ip, GLUE_CATALOG_PORT);
-    let minio_socket_addr = SocketAddr::new(minio_ip, MINIO_PORT);
-    while !scan_port_addr(glue_socket_addr) {
-        info!("Waiting for 1s glue catalog to ready...");
-        sleep(std::time::Duration::from_millis(1000)).await;
-    }
-
-    while !scan_port_addr(minio_socket_addr) {
-        info!("Waiting for 1s minio to ready...");
-        sleep(std::time::Duration::from_millis(1000)).await;
-    }
+    let glue_endpoint = get_glue_endpoint();
+    let minio_endpoint = get_minio_endpoint();
 
     let props = HashMap::from([
         (AWS_ACCESS_KEY_ID.to_string(), "my_access_id".to_string()),
@@ -89,10 +52,7 @@ async fn get_catalog() -> GlueCatalog {
             "my_secret_key".to_string(),
         ),
         (AWS_REGION_NAME.to_string(), "us-east-1".to_string()),
-        (
-            S3_ENDPOINT.to_string(),
-            format!("http://{minio_socket_addr}";),
-        ),
+        (S3_ENDPOINT.to_string(), minio_endpoint),
         (S3_ACCESS_KEY_ID.to_string(), "admin".to_string()),
         (S3_SECRET_ACCESS_KEY.to_string(), "password".to_string()),
         (S3_REGION.to_string(), "us-east-1".to_string()),
@@ -117,10 +77,7 @@ async fn get_catalog() -> GlueCatalog {
     }
 
     let mut glue_props = HashMap::from([
-        (
-            GLUE_CATALOG_PROP_URI.to_string(),
-            format!("http://{glue_socket_addr}";),
-        ),
+        (GLUE_CATALOG_PROP_URI.to_string(), glue_endpoint),
         (
             GLUE_CATALOG_PROP_WAREHOUSE.to_string(),
             "s3a://warehouse/hive".to_string(),
@@ -163,7 +120,11 @@ fn set_table_creation(location: Option<String>, name: impl 
ToString) -> Result<T
 async fn test_rename_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_rename_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_rename_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
 
     catalog
         .create_namespace(namespace.name(), HashMap::new())
@@ -190,7 +151,11 @@ async fn test_rename_table() -> Result<()> {
 async fn test_table_exists() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_table_exists".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_table_exists"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
 
     catalog
         .create_namespace(namespace.name(), HashMap::new())
@@ -214,7 +179,11 @@ async fn test_table_exists() -> Result<()> {
 async fn test_drop_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_drop_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_drop_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
 
     catalog
         .create_namespace(namespace.name(), HashMap::new())
@@ -235,7 +204,11 @@ async fn test_drop_table() -> Result<()> {
 async fn test_load_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_load_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_load_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
 
     catalog
         .create_namespace(namespace.name(), HashMap::new())
@@ -260,7 +233,9 @@ async fn test_load_table() -> Result<()> {
 #[tokio::test]
 async fn test_create_table() -> Result<()> {
     let catalog = get_catalog().await;
-    let namespace = NamespaceIdent::new("test_create_table".to_string());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_create_table"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
     // inject custom location, ignore the namespace prefix
     let creation = set_table_creation(Some("s3a://warehouse/hive".into()), 
"my_table")?;
@@ -285,7 +260,9 @@ async fn test_create_table() -> Result<()> {
 #[tokio::test]
 async fn test_list_tables() -> Result<()> {
     let catalog = get_catalog().await;
-    let namespace = NamespaceIdent::new("test_list_tables".to_string());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_list_tables"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
 
     let expected = vec![];
@@ -299,7 +276,9 @@ async fn test_list_tables() -> Result<()> {
 #[tokio::test]
 async fn test_drop_namespace() -> Result<()> {
     let catalog = get_catalog().await;
-    let namespace = NamespaceIdent::new("test_drop_namespace".to_string());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_drop_namespace"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
 
     let exists = catalog.namespace_exists(&namespace).await?;
@@ -316,7 +295,9 @@ async fn test_drop_namespace() -> Result<()> {
 #[tokio::test]
 async fn test_update_namespace() -> Result<()> {
     let catalog = get_catalog().await;
-    let namespace = NamespaceIdent::new("test_update_namespace".into());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_update_namespace"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
 
     let before_update = catalog.get_namespace(&namespace).await?;
@@ -340,7 +321,9 @@ async fn test_update_namespace() -> Result<()> {
 async fn test_namespace_exists() -> Result<()> {
     let catalog = get_catalog().await;
 
-    let namespace = NamespaceIdent::new("test_namespace_exists".into());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_namespace_exists"));
+    cleanup_namespace(&catalog, &namespace).await;
 
     let exists = catalog.namespace_exists(&namespace).await?;
     assert!(!exists);
@@ -357,7 +340,9 @@ async fn test_namespace_exists() -> Result<()> {
 async fn test_get_namespace() -> Result<()> {
     let catalog = get_catalog().await;
 
-    let namespace = NamespaceIdent::new("test_get_namespace".into());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_get_namespace"));
+    cleanup_namespace(&catalog, &namespace).await;
 
     let does_not_exist = catalog.get_namespace(&namespace).await;
     assert!(does_not_exist.is_err());
@@ -377,7 +362,9 @@ async fn test_create_namespace() -> Result<()> {
     let catalog = get_catalog().await;
 
     let properties = HashMap::new();
-    let namespace = NamespaceIdent::new("test_create_namespace".into());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_create_namespace"));
+    cleanup_namespace(&catalog, &namespace).await;
 
     let expected = Namespace::new(namespace.clone());
 
@@ -392,7 +379,9 @@ async fn test_create_namespace() -> Result<()> {
 async fn test_list_namespace() -> Result<()> {
     let catalog = get_catalog().await;
 
-    let namespace = NamespaceIdent::new("test_list_namespace".to_string());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_list_namespace"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
 
     let result = catalog.list_namespaces(None).await?;
@@ -408,7 +397,11 @@ async fn test_list_namespace() -> Result<()> {
 async fn test_update_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_update_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_update_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
 
     catalog
         .create_namespace(namespace.name(), HashMap::new())
@@ -473,13 +466,13 @@ async fn test_update_table() -> Result<()> {
 #[tokio::test]
 async fn test_register_table() -> Result<()> {
     let catalog = get_catalog().await;
-    let namespace = NamespaceIdent::new("test_register_table".into());
+    // Use unique namespace to avoid conflicts
+    let namespace = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_register_table"));
+    cleanup_namespace(&catalog, &namespace).await;
     set_test_namespace(&catalog, &namespace).await?;
 
-    let creation = set_table_creation(
-        Some("s3a://warehouse/hive/test_register_table".into()),
-        "my_table",
-    )?;
+    let location = format!("s3a://warehouse/hive/{namespace}");
+    let creation = set_table_creation(Some(location), "my_table")?;
     let table = catalog.create_table(&namespace, creation).await?;
     let metadata_location = table
         .metadata_location()
diff --git a/crates/catalog/hms/Cargo.toml b/crates/catalog/hms/Cargo.toml
index 549dbb9c0..a6517fb7b 100644
--- a/crates/catalog/hms/Cargo.toml
+++ b/crates/catalog/hms/Cargo.toml
@@ -54,9 +54,7 @@ motore-macros = { workspace = true }
 volo = { workspace = true }
 
 [dev-dependencies]
-ctor = { workspace = true }
 iceberg_test_utils = { path = "../../test_utils", features = ["tests"] }
-port_scanner = { workspace = true }
 
 [package.metadata.cargo-machete]
 # These dependencies are added to ensure minimal dependency version
diff --git a/crates/catalog/hms/testdata/hms_catalog/docker-compose.yaml 
b/crates/catalog/hms/testdata/hms_catalog/docker-compose.yaml
deleted file mode 100644
index 2729f562c..000000000
--- a/crates/catalog/hms/testdata/hms_catalog/docker-compose.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-services:
-  minio:
-    image: minio/minio:RELEASE.2025-05-24T17-08-30Z
-    expose:
-      - 9000
-      - 9001
-    environment:
-      - MINIO_ROOT_USER=admin
-      - MINIO_ROOT_PASSWORD=password
-      - MINIO_DOMAIN=minio
-    command: [ "server", "/data", "--console-address", ":9001" ]
-
-  mc:
-    depends_on:
-      - minio
-    image: minio/mc:RELEASE.2025-05-21T01-59-54Z
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-    entrypoint: >
-      /bin/sh -c " until (/usr/bin/mc alias set minio http://minio:9000 admin 
password) do echo '...waiting...' && sleep 1; done; /usr/bin/mc mb 
minio/warehouse; /usr/bin/mc policy set public minio/warehouse; tail -f 
/dev/null "
-
-  hive-metastore:
-    image: iceberg-hive-metastore
-    build: ./
-    platform: ${DOCKER_DEFAULT_PLATFORM}
-    expose:
-      - 9083
-    environment:
-      SERVICE_NAME: "metastore"
-      SERVICE_OPTS: "-Dmetastore.warehouse.dir=s3a://warehouse/hive/"
diff --git a/crates/catalog/hms/tests/hms_catalog_test.rs 
b/crates/catalog/hms/tests/hms_catalog_test.rs
index 9793b7f73..bc036d0c6 100644
--- a/crates/catalog/hms/tests/hms_catalog_test.rs
+++ b/crates/catalog/hms/tests/hms_catalog_test.rs
@@ -16,12 +16,12 @@
 // under the License.
 
 //! Integration tests for hms catalog.
+//!
+//! These tests assume Docker containers are started externally via `make 
docker-up`.
+//! Each test uses unique namespaces based on module path to avoid conflicts.
 
 use std::collections::HashMap;
-use std::net::SocketAddr;
-use std::sync::RwLock;
 
-use ctor::{ctor, dtor};
 use iceberg::io::{S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, 
S3_SECRET_ACCESS_KEY};
 use iceberg::spec::{NestedField, PrimitiveType, Schema, Type};
 use iceberg::{Catalog, CatalogBuilder, Namespace, NamespaceIdent, 
TableCreation, TableIdent};
@@ -29,63 +29,22 @@ use iceberg_catalog_hms::{
     HMS_CATALOG_PROP_THRIFT_TRANSPORT, HMS_CATALOG_PROP_URI, 
HMS_CATALOG_PROP_WAREHOUSE,
     HmsCatalog, HmsCatalogBuilder, THRIFT_TRANSPORT_BUFFERED,
 };
-use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
-use port_scanner::scan_port_addr;
+use iceberg_test_utils::{
+    cleanup_namespace, get_hms_endpoint, get_minio_endpoint, 
normalize_test_name_with_parts, set_up,
+};
 use tokio::time::sleep;
 use tracing::info;
 
-const HMS_CATALOG_PORT: u16 = 9083;
-const MINIO_PORT: u16 = 9000;
-static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
 type Result<T> = std::result::Result<T, iceberg::Error>;
 
-#[ctor]
-fn before_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    let docker_compose = DockerCompose::new(
-        normalize_test_name(module_path!()),
-        format!("{}/testdata/hms_catalog", env!("CARGO_MANIFEST_DIR")),
-    );
-    docker_compose.up();
-    guard.replace(docker_compose);
-}
-
-#[dtor]
-fn after_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    guard.take();
-}
-
 async fn get_catalog() -> HmsCatalog {
     set_up();
 
-    let (hms_catalog_ip, minio_ip) = {
-        let guard = DOCKER_COMPOSE_ENV.read().unwrap();
-        let docker_compose = guard.as_ref().unwrap();
-        (
-            docker_compose.get_container_ip("hive-metastore"),
-            docker_compose.get_container_ip("minio"),
-        )
-    };
-    let hms_socket_addr = SocketAddr::new(hms_catalog_ip, HMS_CATALOG_PORT);
-    let minio_socket_addr = SocketAddr::new(minio_ip, MINIO_PORT);
-    while !scan_port_addr(hms_socket_addr) {
-        info!("scan hms_socket_addr {} check", hms_socket_addr);
-        info!("Waiting for 1s hms catalog to ready...");
-        sleep(std::time::Duration::from_millis(1000)).await;
-    }
-
-    while !scan_port_addr(minio_socket_addr) {
-        info!("Waiting for 1s minio to ready...");
-        sleep(std::time::Duration::from_millis(1000)).await;
-    }
+    let hms_endpoint = get_hms_endpoint();
+    let minio_endpoint = get_minio_endpoint();
 
     let props = HashMap::from([
-        (
-            HMS_CATALOG_PROP_URI.to_string(),
-            hms_socket_addr.to_string(),
-        ),
+        (HMS_CATALOG_PROP_URI.to_string(), hms_endpoint),
         (
             HMS_CATALOG_PROP_THRIFT_TRANSPORT.to_string(),
             THRIFT_TRANSPORT_BUFFERED.to_string(),
@@ -94,10 +53,7 @@ async fn get_catalog() -> HmsCatalog {
             HMS_CATALOG_PROP_WAREHOUSE.to_string(),
             "s3a://warehouse/hive".to_string(),
         ),
-        (
-            S3_ENDPOINT.to_string(),
-            format!("http://{minio_socket_addr}";),
-        ),
+        (S3_ENDPOINT.to_string(), minio_endpoint),
         (S3_ACCESS_KEY_ID.to_string(), "admin".to_string()),
         (S3_SECRET_ACCESS_KEY.to_string(), "password".to_string()),
         (S3_REGION.to_string(), "us-east-1".to_string()),
@@ -157,7 +113,12 @@ fn set_table_creation(location: Option<String>, name: impl 
ToString) -> Result<T
 async fn test_rename_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation: TableCreation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_rename_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_rename_table"
+    )));
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, namespace.name()).await;
     set_test_namespace(&catalog, namespace.name()).await?;
 
     let table: iceberg::table::Table = catalog.create_table(namespace.name(), 
creation).await?;
@@ -177,7 +138,11 @@ async fn test_rename_table() -> Result<()> {
 async fn test_table_exists() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_table_exists".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_table_exists"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
     set_test_namespace(&catalog, namespace.name()).await?;
 
     let table = catalog.create_table(namespace.name(), creation).await?;
@@ -193,7 +158,11 @@ async fn test_table_exists() -> Result<()> {
 async fn test_drop_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_drop_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_drop_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
     set_test_namespace(&catalog, namespace.name()).await?;
 
     let table = catalog.create_table(namespace.name(), creation).await?;
@@ -211,7 +180,11 @@ async fn test_drop_table() -> Result<()> {
 async fn test_load_table() -> Result<()> {
     let catalog = get_catalog().await;
     let creation = set_table_creation(None, "my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_load_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_load_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
     set_test_namespace(&catalog, namespace.name()).await?;
 
     let expected = catalog.create_table(namespace.name(), creation).await?;
@@ -235,7 +208,11 @@ async fn test_create_table() -> Result<()> {
     let catalog = get_catalog().await;
     // inject custom location, ignore the namespace prefix
     let creation = set_table_creation(Some("s3a://warehouse/hive".into()), 
"my_table")?;
-    let namespace = 
Namespace::new(NamespaceIdent::new("test_create_table".into()));
+    // Use unique namespace to avoid conflicts
+    let namespace = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_create_table"
+    )));
+    cleanup_namespace(&catalog, namespace.name()).await;
     set_test_namespace(&catalog, namespace.name()).await?;
 
     let result = catalog.create_table(namespace.name(), creation).await?;
@@ -259,9 +236,14 @@ async fn test_create_table() -> Result<()> {
 #[tokio::test]
 async fn test_list_tables() -> Result<()> {
     let catalog = get_catalog().await;
-    let ns = Namespace::new(NamespaceIdent::new("test_list_tables".into()));
-    let result = catalog.list_tables(ns.name()).await?;
+    // Use unique namespace to avoid conflicts
+    let ns = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_list_tables"
+    )));
+    // Clean up and create namespace, then verify it's empty
+    cleanup_namespace(&catalog, ns.name()).await;
     set_test_namespace(&catalog, ns.name()).await?;
+    let result = catalog.list_tables(ns.name()).await?;
 
     assert_eq!(result, vec![]);
 
@@ -311,10 +293,12 @@ async fn test_create_namespace() -> Result<()> {
         ("key1".to_string(), "value1".to_string()),
     ]);
 
+    // Use unique namespace to avoid conflicts
     let ns = Namespace::with_properties(
-        NamespaceIdent::new("test_create_namespace".into()),
+        
NamespaceIdent::new(normalize_test_name_with_parts!("test_create_namespace")),
         properties.clone(),
     );
+    cleanup_namespace(&catalog, ns.name()).await;
 
     let result = catalog.create_namespace(ns.name(), properties).await?;
 
@@ -355,7 +339,11 @@ async fn test_namespace_exists() -> Result<()> {
     let catalog = get_catalog().await;
 
     let ns_exists = Namespace::new(NamespaceIdent::new("default".into()));
-    let ns_not_exists = 
Namespace::new(NamespaceIdent::new("test_namespace_exists".into()));
+    // Use unique namespace to ensure it doesn't exist
+    let ns_not_exists = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_namespace_exists"
+    )));
+    cleanup_namespace(&catalog, ns_not_exists.name()).await;
 
     let result_exists = catalog.namespace_exists(ns_exists.name()).await?;
     let result_not_exists = 
catalog.namespace_exists(ns_not_exists.name()).await?;
@@ -370,7 +358,9 @@ async fn test_namespace_exists() -> Result<()> {
 async fn test_update_namespace() -> Result<()> {
     let catalog = get_catalog().await;
 
-    let ns = NamespaceIdent::new("test_update_namespace".into());
+    // Use unique namespace to avoid conflicts
+    let ns = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_update_namespace"));
+    cleanup_namespace(&catalog, &ns).await;
     set_test_namespace(&catalog, &ns).await?;
     let properties = HashMap::from([("comment".to_string(), 
"my_update".to_string())]);
 
@@ -390,7 +380,11 @@ async fn test_update_namespace() -> Result<()> {
 async fn test_drop_namespace() -> Result<()> {
     let catalog = get_catalog().await;
 
-    let ns = Namespace::new(NamespaceIdent::new("delete_me".into()));
+    // Use unique namespace to avoid conflicts
+    let ns = 
Namespace::new(NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_drop_namespace"
+    )));
+    cleanup_namespace(&catalog, ns.name()).await;
 
     catalog.create_namespace(ns.name(), HashMap::new()).await?;
 
diff --git a/crates/catalog/rest/Cargo.toml b/crates/catalog/rest/Cargo.toml
index 916b5ccf7..de72b6c61 100644
--- a/crates/catalog/rest/Cargo.toml
+++ b/crates/catalog/rest/Cargo.toml
@@ -44,8 +44,6 @@ typed-builder = { workspace = true }
 uuid = { workspace = true, features = ["v4"] }
 
 [dev-dependencies]
-ctor = { workspace = true }
 iceberg_test_utils = { path = "../../test_utils", features = ["tests"] }
 mockito = { workspace = true }
-port_scanner = { workspace = true }
 tokio = { workspace = true }
diff --git a/crates/catalog/rest/testdata/rest_catalog/docker-compose.yaml 
b/crates/catalog/rest/testdata/rest_catalog/docker-compose.yaml
deleted file mode 100644
index 29b44b2a2..000000000
--- a/crates/catalog/rest/testdata/rest_catalog/docker-compose.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-networks:
-  rest_bridge:
-
-services:
-  rest:
-    image: apache/iceberg-rest-fixture:1.10.0
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
-      - CATALOG_URI=jdbc:sqlite:file:/tmp/iceberg_rest_mode=memory
-      - CATALOG_WAREHOUSE=s3://icebergdata/demo
-      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
-      - CATALOG_S3_ENDPOINT=http://minio:9000
-    depends_on:
-      - minio
-    networks:
-      rest_bridge:
-    expose:
-      - 8181
-
-  minio:
-    image: minio/minio:RELEASE.2025-05-24T17-08-30Z
-    environment:
-      - MINIO_ROOT_USER=admin
-      - MINIO_ROOT_PASSWORD=password
-      - MINIO_DOMAIN=minio
-    hostname: icebergdata.minio
-    networks:
-      rest_bridge:
-    expose:
-      - 9001
-      - 9000
-    command: ["server", "/data", "--console-address", ":9001"]
-
-  mc:
-    depends_on:
-      - minio
-    image: minio/mc:RELEASE.2025-05-21T01-59-54Z
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-    entrypoint: >
-      /bin/sh -c " until (/usr/bin/mc alias set minio http://minio:9000 admin 
password) do echo '...waiting...' && sleep 1; done; /usr/bin/mc rm -r --force 
minio/icebergdata; /usr/bin/mc mb minio/icebergdata; /usr/bin/mc policy set 
public minio/icebergdata; tail -f /dev/null "
-    networks:
-      rest_bridge:
diff --git a/crates/catalog/rest/tests/rest_catalog_test.rs 
b/crates/catalog/rest/tests/rest_catalog_test.rs
index 59fea0b51..60c67caab 100644
--- a/crates/catalog/rest/tests/rest_catalog_test.rs
+++ b/crates/catalog/rest/tests/rest_catalog_test.rs
@@ -16,64 +16,55 @@
 // under the License.
 
 //! Integration tests for rest catalog.
+//!
+//! These tests assume Docker containers are started externally via `make 
docker-up`.
+//! Each test uses unique namespaces based on module path to avoid conflicts.
 
 use std::collections::HashMap;
-use std::net::SocketAddr;
-use std::sync::RwLock;
 
-use ctor::{ctor, dtor};
 use iceberg::spec::{FormatVersion, NestedField, PrimitiveType, Schema, Type};
 use iceberg::transaction::{ApplyTransactionAction, Transaction};
 use iceberg::{Catalog, CatalogBuilder, Namespace, NamespaceIdent, 
TableCreation, TableIdent};
 use iceberg_catalog_rest::{REST_CATALOG_PROP_URI, RestCatalog, 
RestCatalogBuilder};
-use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
-use port_scanner::scan_port_addr;
+use iceberg_test_utils::{
+    cleanup_namespace, get_rest_catalog_endpoint, 
normalize_test_name_with_parts, set_up,
+};
 use tokio::time::sleep;
 use tracing::info;
 
-const REST_CATALOG_PORT: u16 = 8181;
-static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = RwLock::new(None);
-
-#[ctor]
-fn before_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    let docker_compose = DockerCompose::new(
-        normalize_test_name(module_path!()),
-        format!("{}/testdata/rest_catalog", env!("CARGO_MANIFEST_DIR")),
-    );
-    docker_compose.up();
-    guard.replace(docker_compose);
-}
-
-#[dtor]
-fn after_all() {
-    let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-    guard.take();
-}
-
 async fn get_catalog() -> RestCatalog {
     set_up();
 
-    let rest_catalog_ip = {
-        let guard = DOCKER_COMPOSE_ENV.read().unwrap();
-        let docker_compose = guard.as_ref().unwrap();
-        docker_compose.get_container_ip("rest")
-    };
+    let rest_endpoint = get_rest_catalog_endpoint();
 
-    let rest_socket_addr = SocketAddr::new(rest_catalog_ip, REST_CATALOG_PORT);
-    while !scan_port_addr(rest_socket_addr) {
-        info!("Waiting for 1s rest catalog to ready...");
-        sleep(std::time::Duration::from_millis(1000)).await;
+    // Wait for catalog to be ready
+    let client = reqwest::Client::new();
+    let mut retries = 0;
+    while retries < 30 {
+        match client
+            .get(format!("{rest_endpoint}/v1/config"))
+            .send()
+            .await
+        {
+            Ok(resp) if resp.status().is_success() => {
+                info!("REST catalog is ready at {}", rest_endpoint);
+                break;
+            }
+            _ => {
+                info!(
+                    "Waiting for REST catalog to be ready... (attempt {})",
+                    retries + 1
+                );
+                sleep(std::time::Duration::from_millis(1000)).await;
+                retries += 1;
+            }
+        }
     }
 
     RestCatalogBuilder::default()
         .load(
             "rest",
-            HashMap::from([(
-                REST_CATALOG_PROP_URI.to_string(),
-                format!("http://{rest_socket_addr}";),
-            )]),
+            HashMap::from([(REST_CATALOG_PROP_URI.to_string(), 
rest_endpoint)]),
         )
         .await
         .unwrap()
@@ -83,9 +74,14 @@ async fn get_catalog() -> RestCatalog {
 async fn test_get_non_exist_namespace() {
     let catalog = get_catalog().await;
 
-    let result = catalog
-        
.get_namespace(&NamespaceIdent::from_strs(["test_get_non_exist_namespace"]).unwrap())
-        .await;
+    // Use unique namespace name to ensure it doesn't exist
+    let ns_ident = NamespaceIdent::new(normalize_test_name_with_parts!(
+        "test_get_non_exist_namespace"
+    ));
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, &ns_ident).await;
+
+    let result = catalog.get_namespace(&ns_ident).await;
 
     assert!(result.is_err());
     assert!(result.unwrap_err().to_string().contains("does not exist"));
@@ -95,14 +91,23 @@ async fn test_get_non_exist_namespace() {
 async fn test_get_namespace() {
     let catalog = get_catalog().await;
 
+    // Use unique namespace to avoid conflicts with other tests
     let ns = Namespace::with_properties(
-        NamespaceIdent::from_strs(["apple", "ios"]).unwrap(),
+        NamespaceIdent::from_strs([
+            "apple",
+            "ios",
+            &normalize_test_name_with_parts!("test_get_namespace"),
+        ])
+        .unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns.name()).await;
+
     // Verify that namespace doesn't exist
     assert!(catalog.get_namespace(ns.name()).await.is_err());
 
@@ -125,8 +130,12 @@ async fn test_get_namespace() {
 async fn test_list_namespace() {
     let catalog = get_catalog().await;
 
+    // Use unique parent namespace to avoid conflicts
+    let parent_ns_name = 
normalize_test_name_with_parts!("test_list_namespace");
+    let parent_ident = NamespaceIdent::from_strs([&parent_ns_name]).unwrap();
+
     let ns1 = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_namespace", "ios"]).unwrap(),
+        NamespaceIdent::from_strs([&parent_ns_name, "ios"]).unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
@@ -134,22 +143,20 @@ async fn test_list_namespace() {
     );
 
     let ns2 = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_namespace", "macos"]).unwrap(),
+        NamespaceIdent::from_strs([&parent_ns_name, "macos"]).unwrap(),
         HashMap::from([
             ("owner".to_string(), "xuanwo".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns1.name()).await;
+    cleanup_namespace(&catalog, ns2.name()).await;
+    cleanup_namespace(&catalog, &parent_ident).await;
+
     // Currently this namespace doesn't exist, so it should return error.
-    assert!(
-        catalog
-            .list_namespaces(Some(
-                &NamespaceIdent::from_strs(["test_list_namespace"]).unwrap()
-            ))
-            .await
-            .is_err()
-    );
+    assert!(catalog.list_namespaces(Some(&parent_ident)).await.is_err());
 
     // Create namespaces
     catalog
@@ -162,12 +169,7 @@ async fn test_list_namespace() {
         .unwrap();
 
     // List namespace
-    let nss = catalog
-        .list_namespaces(Some(
-            &NamespaceIdent::from_strs(["test_list_namespace"]).unwrap(),
-        ))
-        .await
-        .unwrap();
+    let nss = catalog.list_namespaces(Some(&parent_ident)).await.unwrap();
 
     assert!(nss.contains(ns1.name()));
     assert!(nss.contains(ns2.name()));
@@ -177,14 +179,23 @@ async fn test_list_namespace() {
 async fn test_list_empty_namespace() {
     let catalog = get_catalog().await;
 
+    // Use unique namespace to avoid conflicts
     let ns_apple = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_empty_namespace", 
"apple"]).unwrap(),
+        NamespaceIdent::from_strs([
+            "list_empty",
+            "apple",
+            &normalize_test_name_with_parts!("test_list_empty_namespace"),
+        ])
+        .unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns_apple.name()).await;
+
     // Currently this namespace doesn't exist, so it should return error.
     assert!(
         catalog
@@ -211,8 +222,12 @@ async fn test_list_empty_namespace() {
 async fn test_list_root_namespace() {
     let catalog = get_catalog().await;
 
+    // Use unique root namespace to avoid conflicts
+    let root_ns_name = 
normalize_test_name_with_parts!("test_list_root_namespace");
+    let root_ident = NamespaceIdent::from_strs([&root_ns_name]).unwrap();
+
     let ns1 = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_root_namespace", "apple", 
"ios"]).unwrap(),
+        NamespaceIdent::from_strs([&root_ns_name, "apple", "ios"]).unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
@@ -220,22 +235,20 @@ async fn test_list_root_namespace() {
     );
 
     let ns2 = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_root_namespace", "google", 
"android"]).unwrap(),
+        NamespaceIdent::from_strs([&root_ns_name, "google", 
"android"]).unwrap(),
         HashMap::from([
             ("owner".to_string(), "xuanwo".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns1.name()).await;
+    cleanup_namespace(&catalog, ns2.name()).await;
+    cleanup_namespace(&catalog, &root_ident).await;
+
     // Currently this namespace doesn't exist, so it should return error.
-    assert!(
-        catalog
-            .list_namespaces(Some(
-                
&NamespaceIdent::from_strs(["test_list_root_namespace"]).unwrap()
-            ))
-            .await
-            .is_err()
-    );
+    assert!(catalog.list_namespaces(Some(&root_ident)).await.is_err());
 
     // Create namespaces
     catalog
@@ -249,21 +262,31 @@ async fn test_list_root_namespace() {
 
     // List namespace
     let nss = catalog.list_namespaces(None).await.unwrap();
-    
assert!(nss.contains(&NamespaceIdent::from_strs(["test_list_root_namespace"]).unwrap()));
+    assert!(nss.contains(&root_ident));
 }
 
 #[tokio::test]
 async fn test_create_table() {
     let catalog = get_catalog().await;
 
+    // Use unique namespace to avoid conflicts
     let ns = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_create_table", "apple", 
"ios"]).unwrap(),
+        NamespaceIdent::from_strs([
+            "create_table",
+            "apple",
+            "ios",
+            &normalize_test_name_with_parts!("test_create_table"),
+        ])
+        .unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns.name()).await;
+
     // Create namespaces
     catalog
         .create_namespace(ns.name(), ns.properties().clone())
@@ -311,14 +334,24 @@ async fn test_create_table() {
 async fn test_update_table() {
     let catalog = get_catalog().await;
 
+    // Use unique namespace to avoid conflicts
     let ns = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_update_table", "apple", 
"ios"]).unwrap(),
+        NamespaceIdent::from_strs([
+            "update_table",
+            "apple",
+            "ios",
+            &normalize_test_name_with_parts!("test_update_table"),
+        ])
+        .unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns.name()).await;
+
     // Create namespaces
     catalog
         .create_namespace(ns.name(), ns.properties().clone())
@@ -380,15 +413,24 @@ fn assert_map_contains(map1: &HashMap<String, String>, 
map2: &HashMap<String, St
 async fn test_list_empty_multi_level_namespace() {
     let catalog = get_catalog().await;
 
+    // Use unique namespace to avoid conflicts
     let ns_apple = Namespace::with_properties(
-        NamespaceIdent::from_strs(["test_list_empty_multi_level_namespace", 
"a_a", "apple"])
-            .unwrap(),
+        NamespaceIdent::from_strs([
+            "multi_level",
+            "a_a",
+            "apple",
+            
&normalize_test_name_with_parts!("test_list_empty_multi_level_namespace"),
+        ])
+        .unwrap(),
         HashMap::from([
             ("owner".to_string(), "ray".to_string()),
             ("community".to_string(), "apache".to_string()),
         ]),
     );
 
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, ns_apple.name()).await;
+
     // Currently this namespace doesn't exist, so it should return error.
     assert!(
         catalog
@@ -405,10 +447,7 @@ async fn test_list_empty_multi_level_namespace() {
 
     // List namespace
     let nss = catalog
-        .list_namespaces(Some(
-            
&NamespaceIdent::from_strs(["test_list_empty_multi_level_namespace", "a_a", 
"apple"])
-                .unwrap(),
-        ))
+        .list_namespaces(Some(ns_apple.name()))
         .await
         .unwrap();
     assert!(nss.is_empty());
@@ -418,8 +457,12 @@ async fn test_list_empty_multi_level_namespace() {
 async fn test_register_table() {
     let catalog = get_catalog().await;
 
-    // Create namespace
-    let ns = NamespaceIdent::from_strs(["ns"]).unwrap();
+    // Create unique namespace to avoid conflicts
+    let ns = 
NamespaceIdent::new(normalize_test_name_with_parts!("test_register_table"));
+
+    // Clean up from any previous test runs
+    cleanup_namespace(&catalog, &ns).await;
+
     catalog.create_namespace(&ns, HashMap::new()).await.unwrap();
 
     // Create the table, store the metadata location, drop the table
@@ -434,7 +477,7 @@ async fn test_register_table() {
     let metadata_location = table.metadata_location().unwrap();
     catalog.drop_table(table.identifier()).await.unwrap();
 
-    let new_table_identifier = TableIdent::from_strs(["ns", "t2"]).unwrap();
+    let new_table_identifier = TableIdent::new(ns.clone(), "t2".to_string());
     let table_registered = catalog
         .register_table(&new_table_identifier, metadata_location.to_string())
         .await
diff --git a/crates/iceberg/Cargo.toml b/crates/iceberg/Cargo.toml
index 9a5de7736..3835e77d1 100644
--- a/crates/iceberg/Cargo.toml
+++ b/crates/iceberg/Cargo.toml
@@ -92,7 +92,6 @@ uuid = { workspace = true }
 zstd = { workspace = true }
 
 [dev-dependencies]
-ctor = { workspace = true }
 expect-test = { workspace = true }
 iceberg_test_utils = { path = "../test_utils", features = ["tests"] }
 mockall = { workspace = true }
diff --git a/crates/iceberg/testdata/file_io_gcs/docker-compose.yaml 
b/crates/iceberg/testdata/file_io_gcs/docker-compose.yaml
deleted file mode 100644
index 6935a0864..000000000
--- a/crates/iceberg/testdata/file_io_gcs/docker-compose.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-services:
-  gcs-server:
-    image: 
fsouza/fake-gcs-server@sha256:36b0116fae5236e8def76ccb07761a9ca323e476f366a5f4bf449cac19deaf2d
-    expose:
-      - 4443
-    command: --scheme http
diff --git a/crates/iceberg/testdata/file_io_s3/docker-compose.yaml 
b/crates/iceberg/testdata/file_io_s3/docker-compose.yaml
deleted file mode 100644
index cbce31864..000000000
--- a/crates/iceberg/testdata/file_io_s3/docker-compose.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-services:
-  minio:
-    image: minio/minio:RELEASE.2024-02-26T09-33-48Z
-    expose:
-      - 9000
-      - 9001
-    environment:
-      MINIO_ROOT_USER: 'admin'
-      MINIO_ROOT_PASSWORD: 'password'
-      MINIO_ADDRESS: ':9000'
-      MINIO_CONSOLE_ADDRESS: ':9001'
-    entrypoint: sh
-    command: -c 'mkdir -p /data/bucket1 && /usr/bin/minio server /data'
diff --git a/crates/iceberg/tests/file_io_gcs_test.rs 
b/crates/iceberg/tests/file_io_gcs_test.rs
index 9fbcdadd0..005b0f397 100644
--- a/crates/iceberg/tests/file_io_gcs_test.rs
+++ b/crates/iceberg/tests/file_io_gcs_test.rs
@@ -16,59 +16,30 @@
 // under the License.
 
 //! Integration tests for FileIO Google Cloud Storage (GCS).
+//!
+//! These tests assume Docker containers are started externally via `make 
docker-up`.
 
 #[cfg(all(test, feature = "storage-gcs"))]
 mod tests {
     use std::collections::HashMap;
-    use std::net::SocketAddr;
-    use std::sync::RwLock;
 
     use bytes::Bytes;
-    use ctor::{ctor, dtor};
     use iceberg::io::{FileIO, FileIOBuilder, GCS_NO_AUTH, GCS_SERVICE_PATH};
-    use iceberg_test_utils::docker::DockerCompose;
-    use iceberg_test_utils::{normalize_test_name, set_up};
+    use iceberg_test_utils::{get_gcs_endpoint, set_up};
 
-    static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = 
RwLock::new(None);
-    static FAKE_GCS_PORT: u16 = 4443;
     static FAKE_GCS_BUCKET: &str = "test-bucket";
 
-    #[ctor]
-    fn before_all() {
-        let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-        let docker_compose = DockerCompose::new(
-            normalize_test_name(module_path!()),
-            format!("{}/testdata/file_io_gcs", env!("CARGO_MANIFEST_DIR")),
-        );
-        docker_compose.up();
-        guard.replace(docker_compose);
-    }
-
-    #[dtor]
-    fn after_all() {
-        let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-        guard.take();
-    }
-
     async fn get_file_io_gcs() -> FileIO {
         set_up();
 
-        let ip = DOCKER_COMPOSE_ENV
-            .read()
-            .unwrap()
-            .as_ref()
-            .unwrap()
-            .get_container_ip("gcs-server");
-        let addr = SocketAddr::new(ip, FAKE_GCS_PORT);
+        let gcs_endpoint = get_gcs_endpoint();
 
         // A bucket must exist for FileIO
-        create_bucket(FAKE_GCS_BUCKET, addr.to_string())
-            .await
-            .unwrap();
+        create_bucket(FAKE_GCS_BUCKET, &gcs_endpoint).await.unwrap();
 
         FileIOBuilder::new("gcs")
             .with_props(vec![
-                (GCS_SERVICE_PATH, format!("http://{addr}";)),
+                (GCS_SERVICE_PATH, gcs_endpoint),
                 (GCS_NO_AUTH, "true".to_string()),
             ])
             .build()
@@ -76,12 +47,12 @@ mod tests {
     }
 
     // Create a bucket against the emulated GCS storage server.
-    async fn create_bucket(name: &str, server_addr: String) -> 
anyhow::Result<()> {
+    async fn create_bucket(name: &str, server_endpoint: &str) -> 
anyhow::Result<()> {
         let mut bucket_data = HashMap::new();
         bucket_data.insert("name", name);
 
         let client = reqwest::Client::new();
-        let endpoint = format!("http://{server_addr}/storage/v1/b";);
+        let endpoint = format!("{server_endpoint}/storage/v1/b");
         client.post(endpoint).json(&bucket_data).send().await?;
         Ok(())
     }
diff --git a/crates/iceberg/tests/file_io_s3_test.rs 
b/crates/iceberg/tests/file_io_s3_test.rs
index b04412832..f28538e73 100644
--- a/crates/iceberg/tests/file_io_s3_test.rs
+++ b/crates/iceberg/tests/file_io_s3_test.rs
@@ -16,51 +16,30 @@
 // under the License.
 
 //! Integration tests for FileIO S3.
+//!
+//! These tests assume Docker containers are started externally via `make 
docker-up`.
+//! Each test uses unique file paths based on module path to avoid conflicts.
 #[cfg(all(test, feature = "storage-s3"))]
 mod tests {
-    use std::net::{IpAddr, SocketAddr};
-    use std::sync::{Arc, RwLock};
+    use std::sync::Arc;
 
     use async_trait::async_trait;
-    use ctor::{ctor, dtor};
     use iceberg::io::{
         CustomAwsCredentialLoader, FileIO, FileIOBuilder, S3_ACCESS_KEY_ID, 
S3_ENDPOINT, S3_REGION,
         S3_SECRET_ACCESS_KEY,
     };
-    use iceberg_test_utils::docker::DockerCompose;
-    use iceberg_test_utils::{normalize_test_name, set_up};
+    use iceberg_test_utils::{get_minio_endpoint, 
normalize_test_name_with_parts, set_up};
     use reqsign::{AwsCredential, AwsCredentialLoad};
     use reqwest::Client;
 
-    const MINIO_PORT: u16 = 9000;
-    static DOCKER_COMPOSE_ENV: RwLock<Option<DockerCompose>> = 
RwLock::new(None);
-
-    #[ctor]
-    fn before_all() {
-        let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-        let docker_compose = DockerCompose::new(
-            normalize_test_name(module_path!()),
-            format!("{}/testdata/file_io_s3", env!("CARGO_MANIFEST_DIR")),
-        );
-        docker_compose.up();
-        guard.replace(docker_compose);
-    }
-
-    #[dtor]
-    fn after_all() {
-        let mut guard = DOCKER_COMPOSE_ENV.write().unwrap();
-        guard.take();
-    }
-
     async fn get_file_io() -> FileIO {
         set_up();
 
-        let container_ip = get_container_ip("minio");
-        let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
+        let minio_endpoint = get_minio_endpoint();
 
         FileIOBuilder::new("s3")
             .with_props(vec![
-                (S3_ENDPOINT, format!("http://{minio_socket_addr}";)),
+                (S3_ENDPOINT, minio_endpoint),
                 (S3_ACCESS_KEY_ID, "admin".to_string()),
                 (S3_SECRET_ACCESS_KEY, "password".to_string()),
                 (S3_REGION, "us-east-1".to_string()),
@@ -69,12 +48,6 @@ mod tests {
             .unwrap()
     }
 
-    fn get_container_ip(service_name: &str) -> IpAddr {
-        let guard = DOCKER_COMPOSE_ENV.read().unwrap();
-        let docker_compose = guard.as_ref().unwrap();
-        docker_compose.get_container_ip(service_name)
-    }
-
     #[tokio::test]
     async fn test_file_io_s3_exists() {
         let file_io = get_file_io().await;
@@ -85,23 +58,35 @@ mod tests {
     #[tokio::test]
     async fn test_file_io_s3_output() {
         let file_io = get_file_io().await;
-        assert!(!file_io.exists("s3://bucket1/test_output").await.unwrap());
-        let output_file = 
file_io.new_output("s3://bucket1/test_output").unwrap();
+        // Use unique file path based on module path to avoid conflicts
+        let output_path = format!(
+            "s3://bucket1/{}",
+            normalize_test_name_with_parts!("test_file_io_s3_output")
+        );
+        // Clean up from any previous test runs
+        let _ = file_io.delete(&output_path).await;
+        assert!(!file_io.exists(&output_path).await.unwrap());
+        let output_file = file_io.new_output(&output_path).unwrap();
         {
             output_file.write("123".into()).await.unwrap();
         }
-        assert!(file_io.exists("s3://bucket1/test_output").await.unwrap());
+        assert!(file_io.exists(&output_path).await.unwrap());
     }
 
     #[tokio::test]
     async fn test_file_io_s3_input() {
         let file_io = get_file_io().await;
-        let output_file = 
file_io.new_output("s3://bucket1/test_input").unwrap();
+        // Use unique file path based on module path to avoid conflicts
+        let file_path = format!(
+            "s3://bucket1/{}",
+            normalize_test_name_with_parts!("test_file_io_s3_input")
+        );
+        let output_file = file_io.new_output(&file_path).unwrap();
         {
             output_file.write("test_input".into()).await.unwrap();
         }
 
-        let input_file = file_io.new_input("s3://bucket1/test_input").unwrap();
+        let input_file = file_io.new_input(&file_path).unwrap();
 
         {
             let buffer = input_file.read().await.unwrap();
@@ -200,15 +185,13 @@ mod tests {
         let mock_loader = MockCredentialLoader::new_minio();
         let custom_loader = 
CustomAwsCredentialLoader::new(Arc::new(mock_loader));
 
-        // Get container info for endpoint
-        let container_ip = get_container_ip("minio");
-        let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
+        let minio_endpoint = get_minio_endpoint();
 
         // Build FileIO with custom credential loader
         let file_io_with_custom_creds = FileIOBuilder::new("s3")
             .with_extension(custom_loader)
             .with_props(vec![
-                (S3_ENDPOINT, format!("http://{minio_socket_addr}";)),
+                (S3_ENDPOINT, minio_endpoint),
                 (S3_REGION, "us-east-1".to_string()),
             ])
             .build()
@@ -229,15 +212,13 @@ mod tests {
         let mock_loader = MockCredentialLoader::new(None);
         let custom_loader = 
CustomAwsCredentialLoader::new(Arc::new(mock_loader));
 
-        // Get container info for endpoint
-        let container_ip = get_container_ip("minio");
-        let minio_socket_addr = SocketAddr::new(container_ip, MINIO_PORT);
+        let minio_endpoint = get_minio_endpoint();
 
         // Build FileIO with custom credential loader
         let file_io_with_custom_creds = FileIOBuilder::new("s3")
             .with_extension(custom_loader)
             .with_props(vec![
-                (S3_ENDPOINT, format!("http://{minio_socket_addr}";)),
+                (S3_ENDPOINT, minio_endpoint),
                 (S3_REGION, "us-east-1".to_string()),
             ])
             .build()
diff --git a/crates/integration_tests/Cargo.toml 
b/crates/integration_tests/Cargo.toml
index 07eea5f37..07291f29e 100644
--- a/crates/integration_tests/Cargo.toml
+++ b/crates/integration_tests/Cargo.toml
@@ -27,7 +27,6 @@ version = { workspace = true }
 [dependencies]
 arrow-array = { workspace = true }
 arrow-schema = { workspace = true }
-ctor = { workspace = true }
 datafusion = { workspace = true }
 futures = { workspace = true }
 iceberg = { workspace = true }
diff --git a/crates/integration_tests/src/lib.rs 
b/crates/integration_tests/src/lib.rs
index 44f6c3024..e99c74df6 100644
--- a/crates/integration_tests/src/lib.rs
+++ b/crates/integration_tests/src/lib.rs
@@ -16,19 +16,61 @@
 // under the License.
 
 use std::collections::HashMap;
+use std::sync::OnceLock;
 
 use iceberg::io::{S3_ACCESS_KEY_ID, S3_ENDPOINT, S3_REGION, 
S3_SECRET_ACCESS_KEY};
 use iceberg_catalog_rest::REST_CATALOG_PROP_URI;
 use iceberg_test_utils::docker::DockerCompose;
-use iceberg_test_utils::{normalize_test_name, set_up};
+use iceberg_test_utils::{
+    get_minio_endpoint, get_rest_catalog_endpoint, normalize_test_name, set_up,
+};
 
 const REST_CATALOG_PORT: u16 = 8181;
 
+/// Test fixture that manages Docker containers.
+/// This is kept for backward compatibility but deprecated in favor of 
GlobalTestFixture.
 pub struct TestFixture {
     pub _docker_compose: DockerCompose,
     pub catalog_config: HashMap<String, String>,
 }
 
+/// Global test fixture that uses environment-based configuration.
+/// This assumes Docker containers are started externally (e.g., via `make 
docker-up`).
+pub struct GlobalTestFixture {
+    pub catalog_config: HashMap<String, String>,
+}
+
+static GLOBAL_FIXTURE: OnceLock<GlobalTestFixture> = OnceLock::new();
+
+impl GlobalTestFixture {
+    /// Creates a new GlobalTestFixture from environment variables.
+    /// Uses default localhost endpoints if environment variables are not set.
+    pub fn from_env() -> Self {
+        set_up();
+
+        let rest_endpoint = get_rest_catalog_endpoint();
+        let minio_endpoint = get_minio_endpoint();
+
+        let catalog_config = HashMap::from([
+            (REST_CATALOG_PROP_URI.to_string(), rest_endpoint),
+            (S3_ENDPOINT.to_string(), minio_endpoint),
+            (S3_ACCESS_KEY_ID.to_string(), "admin".to_string()),
+            (S3_SECRET_ACCESS_KEY.to_string(), "password".to_string()),
+            (S3_REGION.to_string(), "us-east-1".to_string()),
+        ]);
+
+        GlobalTestFixture { catalog_config }
+    }
+}
+
+/// Returns a reference to the global test fixture.
+/// This fixture assumes Docker containers are started externally.
+pub fn get_test_fixture() -> &'static GlobalTestFixture {
+    GLOBAL_FIXTURE.get_or_init(GlobalTestFixture::from_env)
+}
+
+/// Legacy function to create a test fixture with Docker container management.
+/// Deprecated: prefer using `get_test_fixture()` with externally managed 
containers.
 pub fn set_test_fixture(func: &str) -> TestFixture {
     set_up();
     let docker_compose = DockerCompose::new(
diff --git a/crates/integration_tests/testdata/docker-compose.yaml 
b/crates/integration_tests/testdata/docker-compose.yaml
deleted file mode 100644
index 30fc3914f..000000000
--- a/crates/integration_tests/testdata/docker-compose.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-networks:
-  rest_bridge:
-
-services:
-  rest:
-    image: apache/iceberg-rest-fixture:1.10.0
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
-      - CATALOG_URI=jdbc:sqlite:file:/tmp/iceberg_rest_mode=memory
-      - CATALOG_WAREHOUSE=s3://icebergdata/demo
-      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
-      - CATALOG_S3_ENDPOINT=http://minio:9000
-    depends_on:
-      - minio
-    networks:
-      rest_bridge:
-    ports:
-      - 8181:8181
-    expose:
-      - 8181
-
-  minio:
-    image: minio/minio:RELEASE.2025-05-24T17-08-30Z
-    environment:
-      - MINIO_ROOT_USER=admin
-      - MINIO_ROOT_PASSWORD=password
-      - MINIO_DOMAIN=minio
-      - MINIO_DEFAULT_BUCKETS=icebergdata
-    hostname: icebergdata.minio
-    networks:
-      rest_bridge:
-    ports:
-      - 9001:9001
-    expose:
-      - 9001
-      - 9000
-    command: ["server", "/data", "--console-address", ":9001"]
-
-  mc:
-    depends_on:
-      - minio
-    image: minio/mc:RELEASE.2025-05-21T01-59-54Z
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-    entrypoint: >
-      /bin/sh -c " until (/usr/bin/mc alias set minio http://minio:9000 admin 
password) do echo '...waiting...' && sleep 1; done; /usr/bin/mc rm -r --force 
minio/icebergdata; /usr/bin/mc mb minio/icebergdata; /usr/bin/mc policy set 
public minio/icebergdata; tail -f /dev/null "
-    networks:
-      rest_bridge:
-
-  spark-iceberg:
-    build: spark/
-    networks:
-      rest_bridge:
-    depends_on:
-      - rest
-      - minio
-    environment:
-      - AWS_ACCESS_KEY_ID=admin
-      - AWS_SECRET_ACCESS_KEY=password
-      - AWS_REGION=us-east-1
-    ports:
-      - 15002:15002 # Spark Connect
-      - 4040:4040 # Spark UI
-    healthcheck:
-      test: ["CMD", "sh", "-c", "netstat -an | grep 15002 | grep LISTEN"]
-      interval: 30s
-      timeout: 10s
-      retries: 5
-      start_period: 90s
-
-  provision:
-    image: python:3.12-slim
-    networks:
-      rest_bridge:
-    depends_on:
-      spark-iceberg:
-        condition: service_healthy
-    entrypoint: ["/bin/sh", "-c", "pip install -q 'pyspark[connect]==4.0.1' && 
python3 /opt/spark/provision.py && touch /tmp/provision_complete && tail -f 
/dev/null"]
-    volumes:
-      - ./spark/provision.py:/opt/spark/provision.py:ro
-    healthcheck:
-      test: ["CMD-SHELL", "[ -f /tmp/provision_complete ]"]
-      interval: 2s
-      timeout: 2s
-      retries: 90
-      start_period: 20s
diff --git a/crates/integration_tests/tests/shared.rs 
b/crates/integration_tests/tests/shared.rs
index 6bdddaa6c..ab85e4d1a 100644
--- a/crates/integration_tests/tests/shared.rs
+++ b/crates/integration_tests/tests/shared.rs
@@ -15,22 +15,12 @@
 // specific language governing permissions and limitations
 // under the License.
 
-use std::sync::{Arc, OnceLock};
-
-use ctor::dtor;
-use iceberg_integration_tests::{TestFixture, set_test_fixture};
+use iceberg_integration_tests::{GlobalTestFixture, get_test_fixture};
 
 pub mod shared_tests;
 
-static DOCKER_CONTAINERS: OnceLock<Arc<TestFixture>> = OnceLock::new();
-
-pub fn get_shared_containers() -> &'static Arc<TestFixture> {
-    DOCKER_CONTAINERS.get_or_init(|| 
Arc::new(set_test_fixture("shared_tests")))
-}
-
-#[dtor]
-fn shutdown() {
-    if let Some(fixture) = DOCKER_CONTAINERS.get() {
-        fixture._docker_compose.down()
-    }
+/// Returns a reference to the shared test fixture.
+/// This assumes Docker containers are started externally via `make docker-up`.
+pub fn get_shared_containers() -> &'static GlobalTestFixture {
+    get_test_fixture()
 }
diff --git a/crates/integration_tests/tests/shared_tests/mod.rs 
b/crates/integration_tests/tests/shared_tests/mod.rs
index 065b14d5d..1e695c588 100644
--- a/crates/integration_tests/tests/shared_tests/mod.rs
+++ b/crates/integration_tests/tests/shared_tests/mod.rs
@@ -54,7 +54,7 @@ pub async fn random_ns() -> Namespace {
     ns
 }
 
-fn test_schema() -> Schema {
+pub fn test_schema() -> Schema {
     Schema::builder()
         .with_schema_id(1)
         .with_identifier_field_ids(vec![2])
diff --git a/crates/test_utils/Cargo.toml b/crates/test_utils/Cargo.toml
index 9a486b15e..58f930779 100644
--- a/crates/test_utils/Cargo.toml
+++ b/crates/test_utils/Cargo.toml
@@ -26,6 +26,7 @@ license = { workspace = true }
 repository = { workspace = true }
 
 [dependencies]
+iceberg = { workspace = true }
 tracing = { workspace = true }
 tracing-subscriber = { workspace = true }
 
diff --git a/crates/test_utils/src/lib.rs b/crates/test_utils/src/lib.rs
index 6d1104a1b..54ec54193 100644
--- a/crates/test_utils/src/lib.rs
+++ b/crates/test_utils/src/lib.rs
@@ -31,6 +31,8 @@ pub use common::*;
 mod common {
     use std::sync::Once;
 
+    use iceberg::{Catalog, NamespaceIdent};
+
     static INIT: Once = Once::new();
     pub fn set_up() {
         INIT.call_once(tracing_subscriber::fmt::init);
@@ -38,4 +40,83 @@ mod common {
     pub fn normalize_test_name(s: impl ToString) -> String {
         s.to_string().replace("::", "__").replace('.', "_")
     }
+
+    // Environment variable names for service endpoints
+    pub const ENV_MINIO_ENDPOINT: &str = "ICEBERG_TEST_MINIO_ENDPOINT";
+    pub const ENV_REST_CATALOG_ENDPOINT: &str = "ICEBERG_TEST_REST_ENDPOINT";
+    pub const ENV_HMS_ENDPOINT: &str = "ICEBERG_TEST_HMS_ENDPOINT";
+    pub const ENV_GLUE_ENDPOINT: &str = "ICEBERG_TEST_GLUE_ENDPOINT";
+    pub const ENV_GCS_ENDPOINT: &str = "ICEBERG_TEST_GCS_ENDPOINT";
+
+    // Default ports matching dev/docker-compose.yaml
+    pub const DEFAULT_MINIO_PORT: u16 = 9000;
+    pub const DEFAULT_REST_CATALOG_PORT: u16 = 8181;
+    pub const DEFAULT_HMS_PORT: u16 = 9083;
+    pub const DEFAULT_GLUE_PORT: u16 = 5000;
+    pub const DEFAULT_GCS_PORT: u16 = 4443;
+
+    /// Returns the MinIO S3-compatible endpoint.
+    /// Checks ICEBERG_TEST_MINIO_ENDPOINT env var, otherwise returns 
localhost default.
+    pub fn get_minio_endpoint() -> String {
+        std::env::var(ENV_MINIO_ENDPOINT)
+            .unwrap_or_else(|_| 
format!("http://localhost:{DEFAULT_MINIO_PORT}";))
+    }
+
+    /// Returns the REST catalog endpoint.
+    /// Checks ICEBERG_TEST_REST_ENDPOINT env var, otherwise returns localhost 
default.
+    pub fn get_rest_catalog_endpoint() -> String {
+        std::env::var(ENV_REST_CATALOG_ENDPOINT)
+            .unwrap_or_else(|_| 
format!("http://localhost:{DEFAULT_REST_CATALOG_PORT}";))
+    }
+
+    /// Returns the HMS (Hive Metastore) endpoint.
+    /// Checks ICEBERG_TEST_HMS_ENDPOINT env var, otherwise returns localhost 
default.
+    pub fn get_hms_endpoint() -> String {
+        std::env::var(ENV_HMS_ENDPOINT).unwrap_or_else(|_| 
format!("localhost:{DEFAULT_HMS_PORT}"))
+    }
+
+    /// Returns the Glue (Moto mock) endpoint.
+    /// Checks ICEBERG_TEST_GLUE_ENDPOINT env var, otherwise returns localhost 
default.
+    pub fn get_glue_endpoint() -> String {
+        std::env::var(ENV_GLUE_ENDPOINT)
+            .unwrap_or_else(|_| 
format!("http://localhost:{DEFAULT_GLUE_PORT}";))
+    }
+
+    /// Returns the GCS (fake-gcs-server) endpoint.
+    /// Checks ICEBERG_TEST_GCS_ENDPOINT env var, otherwise returns localhost 
default.
+    pub fn get_gcs_endpoint() -> String {
+        std::env::var(ENV_GCS_ENDPOINT)
+            .unwrap_or_else(|_| format!("http://localhost:{DEFAULT_GCS_PORT}";))
+    }
+
+    /// Helper to clean up a namespace and its tables before a test runs.
+    /// This handles the case where previous test runs left data in the 
persistent database.
+    pub async fn cleanup_namespace<C: Catalog>(catalog: &C, ns: 
&NamespaceIdent) {
+        // Try to drop all tables in the namespace first
+        if let Ok(tables) = catalog.list_tables(ns).await {
+            for table in tables {
+                let _ = catalog.drop_table(&table).await;
+            }
+        }
+        // Then try to drop the namespace itself
+        let _ = catalog.drop_namespace(ns).await;
+    }
+
+    /// Macro to generate a normalized test name with module path prefix.
+    /// Takes one or more string parts and joins them with the module path.
+    ///
+    /// Example:
+    /// ```ignore
+    /// // Returns something like "rest_catalog_test__test_create_table"
+    /// let name = normalize_test_name_with_parts!("test_create_table");
+    ///
+    /// // Returns something like "rest_catalog_test__apple__ios"
+    /// let name = normalize_test_name_with_parts!("apple", "ios");
+    /// ```
+    #[macro_export]
+    macro_rules! normalize_test_name_with_parts {
+        ($($part:expr),+) => {
+            $crate::normalize_test_name([module_path!(), $($part),+].join("_"))
+        };
+    }
 }
diff --git a/dev/docker-compose.yaml b/dev/docker-compose.yaml
new file mode 100644
index 000000000..0df017a75
--- /dev/null
+++ b/dev/docker-compose.yaml
@@ -0,0 +1,202 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Unified Docker Compose for all integration tests.
+# This file combines services needed for REST, HMS, Glue, S3, and GCS tests.
+# Start once with: docker compose -f dev/docker-compose.yaml up -d --wait
+# Stop with: docker compose -f dev/docker-compose.yaml down -v
+
+networks:
+  iceberg_test:
+
+services:
+  # 
=============================================================================
+  # MinIO - S3-compatible storage (shared by all tests)
+  # 
=============================================================================
+  minio:
+    image: minio/minio:RELEASE.2025-05-24T17-08-30Z
+    environment:
+      - MINIO_ROOT_USER=admin
+      - MINIO_ROOT_PASSWORD=password
+      - MINIO_DOMAIN=minio
+    hostname: minio
+    networks:
+      iceberg_test:
+        # Add aliases for virtual-hosted style bucket access
+        aliases:
+          - icebergdata.minio
+          - warehouse.minio
+          - bucket1.minio
+    ports:
+      - "9000:9000"
+      - "9001:9001"
+    command: ["server", "/data", "--console-address", ":9001"]
+    healthcheck:
+      test: ["CMD", "mc", "ready", "local"]
+      interval: 5s
+      timeout: 5s
+      retries: 5
+
+  # MinIO client - creates buckets for tests
+  mc:
+    depends_on:
+      minio:
+        condition: service_healthy
+    image: minio/mc:RELEASE.2025-05-21T01-59-54Z
+    environment:
+      - AWS_ACCESS_KEY_ID=admin
+      - AWS_SECRET_ACCESS_KEY=password
+      - AWS_REGION=us-east-1
+    entrypoint: >
+      /bin/sh -c "
+        /usr/bin/mc alias set minio http://minio:9000 admin password;
+        /usr/bin/mc mb --ignore-existing minio/icebergdata;
+        /usr/bin/mc mb --ignore-existing minio/warehouse;
+        /usr/bin/mc mb --ignore-existing minio/bucket1;
+        /usr/bin/mc policy set public minio/icebergdata;
+        /usr/bin/mc policy set public minio/warehouse;
+        /usr/bin/mc policy set public minio/bucket1;
+        echo 'Buckets created successfully';
+        tail -f /dev/null
+      "
+    networks:
+      iceberg_test:
+
+  # 
=============================================================================
+  # REST Catalog - Apache Iceberg REST Catalog
+  # 
=============================================================================
+  rest:
+    image: apache/iceberg-rest-fixture:1.10.0
+    environment:
+      - AWS_ACCESS_KEY_ID=admin
+      - AWS_SECRET_ACCESS_KEY=password
+      - AWS_REGION=us-east-1
+      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
+      - CATALOG_URI=jdbc:sqlite:file:/tmp/iceberg_rest_mode=memory
+      - CATALOG_WAREHOUSE=s3://icebergdata/demo
+      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
+      - CATALOG_S3_ENDPOINT=http://minio:9000
+    depends_on:
+      minio:
+        condition: service_healthy
+    networks:
+      iceberg_test:
+    ports:
+      - "8181:8181"
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8181/v1/config";]
+      interval: 5s
+      timeout: 5s
+      retries: 10
+      start_period: 10s
+
+  # 
=============================================================================
+  # Hive Metastore - HMS Catalog
+  # 
=============================================================================
+  hive-metastore:
+    build:
+      context: ./hms
+      dockerfile: Dockerfile
+    platform: ${DOCKER_DEFAULT_PLATFORM:-linux/amd64}
+    depends_on:
+      minio:
+        condition: service_healthy
+    networks:
+      iceberg_test:
+    ports:
+      - "9083:9083"
+    environment:
+      SERVICE_NAME: "metastore"
+      SERVICE_OPTS: "-Dmetastore.warehouse.dir=s3a://warehouse/hive/"
+    healthcheck:
+      test: ["CMD", "bash", "-c", "cat < /dev/null > /dev/tcp/localhost/9083"]
+      interval: 5s
+      timeout: 5s
+      retries: 20
+      start_period: 30s
+
+  # 
=============================================================================
+  # Moto - AWS Glue Mock for Glue Catalog tests
+  # 
=============================================================================
+  moto:
+    image: motoserver/moto:5.0.3
+    networks:
+      iceberg_test:
+    ports:
+      - "5000:5000"
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:5000/moto-api/";]
+      interval: 5s
+      timeout: 5s
+      retries: 5
+
+  # 
=============================================================================
+  # Fake GCS Server - GCS emulator for GCS tests
+  # 
=============================================================================
+  gcs-server:
+    image: 
fsouza/fake-gcs-server@sha256:36b0116fae5236e8def76ccb07761a9ca323e476f366a5f4bf449cac19deaf2d
+    networks:
+      iceberg_test:
+    ports:
+      - "4443:4443"
+    command: --scheme http
+
+  # 
=============================================================================
+  # Spark - Spark with Iceberg for integration tests
+  # 
=============================================================================
+  spark-iceberg:
+    build:
+      context: ./spark
+      dockerfile: Dockerfile
+    networks:
+      iceberg_test:
+    depends_on:
+      rest:
+        condition: service_healthy
+      minio:
+        condition: service_healthy
+    environment:
+      - AWS_ACCESS_KEY_ID=admin
+      - AWS_SECRET_ACCESS_KEY=password
+      - AWS_REGION=us-east-1
+    ports:
+      - "15002:15002"
+      - "4040:4040"
+    healthcheck:
+      test: ["CMD", "sh", "-c", "netstat -an | grep 15002 | grep LISTEN"]
+      interval: 30s
+      timeout: 10s
+      retries: 5
+      start_period: 90s
+
+  # Provision service - creates test data via Spark
+  provision:
+    image: python:3.12-slim
+    networks:
+      iceberg_test:
+    depends_on:
+      spark-iceberg:
+        condition: service_healthy
+    entrypoint: ["/bin/sh", "-c", "pip install -q 'pyspark[connect]==4.0.1' && 
python3 /opt/spark/provision.py && touch /tmp/provision_complete && tail -f 
/dev/null"]
+    volumes:
+      - ./spark/provision.py:/opt/spark/provision.py:ro
+    healthcheck:
+      test: ["CMD-SHELL", "[ -f /tmp/provision_complete ]"]
+      interval: 2s
+      timeout: 2s
+      retries: 90
+      start_period: 20s
diff --git a/crates/catalog/hms/testdata/hms_catalog/Dockerfile 
b/dev/hms/Dockerfile
similarity index 100%
rename from crates/catalog/hms/testdata/hms_catalog/Dockerfile
rename to dev/hms/Dockerfile
diff --git a/crates/catalog/hms/testdata/hms_catalog/core-site.xml 
b/dev/hms/core-site.xml
similarity index 96%
rename from crates/catalog/hms/testdata/hms_catalog/core-site.xml
rename to dev/hms/core-site.xml
index f0583a0bc..f23efa888 100644
--- a/crates/catalog/hms/testdata/hms_catalog/core-site.xml
+++ b/dev/hms/core-site.xml
@@ -1,51 +1,51 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>s3a://warehouse/hive</value>
-    </property>
-    <property>
-        <name>fs.s3a.impl</name>
-        <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
-    </property>
-    <property>
-        <name>fs.s3a.fast.upload</name>
-        <value>true</value>
-    </property>
-    <property>
-      <name>fs.s3a.endpoint</name>
-      <value>http://minio:9000</value>
-    </property>
-    <property>
-      <name>fs.s3a.access.key</name>
-      <value>admin</value>
-    </property>
-    <property>
-      <name>fs.s3a.secret.key</name>
-      <value>password</value>
-    </property>
-    <property>
-      <name>fs.s3a.connection.ssl.enabled</name>
-      <value>false</value>
-    </property>
-    <property>
-      <name>fs.s3a.path.style.access</name>
-      <value>true</value>
-    </property>
-</configuration>
\ No newline at end of file
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>s3a://warehouse/hive</value>
+    </property>
+    <property>
+        <name>fs.s3a.impl</name>
+        <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
+    </property>
+    <property>
+        <name>fs.s3a.fast.upload</name>
+        <value>true</value>
+    </property>
+    <property>
+      <name>fs.s3a.endpoint</name>
+      <value>http://minio:9000</value>
+    </property>
+    <property>
+      <name>fs.s3a.access.key</name>
+      <value>admin</value>
+    </property>
+    <property>
+      <name>fs.s3a.secret.key</name>
+      <value>password</value>
+    </property>
+    <property>
+      <name>fs.s3a.connection.ssl.enabled</name>
+      <value>false</value>
+    </property>
+    <property>
+      <name>fs.s3a.path.style.access</name>
+      <value>true</value>
+    </property>
+</configuration>
diff --git a/crates/integration_tests/testdata/spark/Dockerfile 
b/dev/spark/Dockerfile
similarity index 100%
rename from crates/integration_tests/testdata/spark/Dockerfile
rename to dev/spark/Dockerfile
diff --git a/crates/integration_tests/testdata/spark/provision.py 
b/dev/spark/provision.py
old mode 100755
new mode 100644
similarity index 100%
rename from crates/integration_tests/testdata/spark/provision.py
rename to dev/spark/provision.py
diff --git a/crates/integration_tests/testdata/spark/spark-defaults.conf 
b/dev/spark/spark-defaults.conf
similarity index 100%
rename from crates/integration_tests/testdata/spark/spark-defaults.conf
rename to dev/spark/spark-defaults.conf


Reply via email to