This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 68f8369ee49 [cloud](storage vault) Rewrite vaults regression test and 
fix two error (#42411) (#43207)
68f8369ee49 is described below

commit 68f8369ee49704ee3f2b82df3698ba3c8e2ee0fe
Author: Lei Zhang <[email protected]>
AuthorDate: Tue Nov 5 21:16:59 2024 +0800

    [cloud](storage vault) Rewrite vaults regression test and fix two error 
(#42411) (#43207)
    
    * Rewrite vaults regression test
    * Fix show storage vault stmt `default` column incorrectly
    * Fix create table cannot find storage vault after creating the vault
---
 cloud/src/meta-service/meta_service_resource.cpp   |   2 +
 .../org/apache/doris/catalog/StorageVault.java     |   2 +-
 .../java/org/apache/doris/qe/ShowExecutor.java     |   3 +-
 .../org/apache/doris/regression/suite/Suite.groovy |  22 ++-
 .../vault_p0/alter/test_alter_hdfs_vault.groovy    |  98 +++++++++++
 .../vault_p0/alter/test_alter_s3_vault.groovy      | 106 ++++++++++++
 .../create/test_create_vault.groovy}               | 179 +++++++++----------
 .../vault_p0/default/test_default_vault.groovy     | 136 +++++++++++++++
 .../forbid/test_forbid_vault.groovy}               |  10 +-
 .../vault_p0/privilege/test_vault_privilege.groovy | 190 +++++++++++++++++++++
 .../privilege/test_vault_privilege_restart.groovy  | 187 ++++++++++++++++++++
 .../suites/vaults/alter/alter_hdfs.groovy          | 119 -------------
 .../suites/vaults/alter/alter_s3.groovy            | 124 --------------
 .../suites/vaults/default/default.groovy           | 139 ---------------
 regression-test/suites/vaults/privilege.groovy     | 183 --------------------
 .../suites/vaults/privilege_restart.groovy         | 178 -------------------
 16 files changed, 832 insertions(+), 846 deletions(-)

diff --git a/cloud/src/meta-service/meta_service_resource.cpp 
b/cloud/src/meta-service/meta_service_resource.cpp
index b8bef65c91b..399e0964f4d 100644
--- a/cloud/src/meta-service/meta_service_resource.cpp
+++ b/cloud/src/meta-service/meta_service_resource.cpp
@@ -989,6 +989,7 @@ void 
MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr
             instance.set_default_storage_vault_id(vault.id());
             instance.set_default_storage_vault_name(vault.name());
         }
+        response->set_storage_vault_id(vault.id());
         LOG_INFO("try to put storage vault_id={}, vault_name={}, 
vault_key={}", vault.id(),
                  vault.name(), hex(vault_key));
     } break;
@@ -1006,6 +1007,7 @@ void 
MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr
             
instance.set_default_storage_vault_id(*instance.resource_ids().rbegin());
             
instance.set_default_storage_vault_name(*instance.storage_vault_names().rbegin());
         }
+        response->set_storage_vault_id(request->vault().id());
         break;
     }
     case AlterObjStoreInfoRequest::ADD_BUILT_IN_VAULT: {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
index 11867bcfb96..df9310526e4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java
@@ -229,7 +229,7 @@ public abstract class StorageVault {
         }
 
         int vaultIdIndex = IntStream.range(0, columns.size())
-                .filter(i -> columns.get(i).getName().equals("StorageVaultId"))
+                .filter(i -> columns.get(i).getName().equals("Id"))
                 .findFirst()
                 .orElse(-1);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
index 153823c2edc..6c584f7255a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
@@ -3407,8 +3407,7 @@ public class ShowExecutor {
             UserIdentity user = ctx.getCurrentUserIdentity();
             rows = resp.getStorageVaultList().stream()
                     .filter(storageVault -> auth.checkStorageVaultPriv(user, 
storageVault.getName(),
-                            PrivPredicate.USAGE)
-                    )
+                            PrivPredicate.USAGE))
                     .map(StorageVault::convertToShowStorageVaultProperties)
                     .collect(Collectors.toList());
             if (resp.hasDefaultStorageVaultId()) {
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
index 4aab3f774d7..cb3cdeb869e 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
@@ -17,16 +17,20 @@
 
 package org.apache.doris.regression.suite
 
-import org.awaitility.Awaitility
 import static java.util.concurrent.TimeUnit.SECONDS
-import groovy.json.JsonOutput
+
+import com.google.common.base.Strings
+import com.google.common.collect.ImmutableList
 import com.google.common.collect.Maps
 import com.google.common.util.concurrent.Futures
 import com.google.common.util.concurrent.ListenableFuture
 import com.google.common.util.concurrent.MoreExecutors
 import com.google.gson.Gson
+import groovy.json.JsonOutput
 import groovy.json.JsonSlurper
-import com.google.common.collect.ImmutableList
+import groovy.util.logging.Slf4j
+
+import org.awaitility.Awaitility
 import org.apache.commons.lang3.ObjectUtils
 import org.apache.doris.regression.Config
 import org.apache.doris.regression.RegressionTest
@@ -53,7 +57,6 @@ import org.jetbrains.annotations.NotNull
 import org.junit.jupiter.api.Assertions
 import org.slf4j.Logger
 import org.slf4j.LoggerFactory
-import groovy.util.logging.Slf4j
 
 import java.sql.Connection
 import java.io.File
@@ -1512,12 +1515,13 @@ class Suite implements GroovyInterceptable {
     }
 
     boolean enableStoragevault() {
-        boolean ret = false;
-        if (context.config.metaServiceHttpAddress == null || 
context.config.metaServiceHttpAddress.isEmpty() ||
-                context.config.instanceId == null || 
context.config.instanceId.isEmpty() ||
-                context.config.metaServiceToken == null || 
context.config.metaServiceToken.isEmpty()) {
-            return ret;
+        if (Strings.isNullOrEmpty(context.config.metaServiceHttpAddress)
+                || Strings.isNullOrEmpty(context.config.instanceId)
+                || Strings.isNullOrEmpty(context.config.metaServiceToken)) {
+            return false;
         }
+
+        boolean ret = false;
         def getInstanceInfo = { check_func ->
             httpTest {
                 endpoint context.config.metaServiceHttpAddress
diff --git a/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy
new file mode 100644
index 00000000000..3893d43c02a
--- /dev/null
+++ b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_alter_hdfs_vault", "nonConcurrent") {
+    def suiteName = name;
+    if (!isCloudMode()) {
+        logger.info("skip ${name} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${name} case, because storage vault not enabled")
+        return
+    }
+
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS ${suiteName}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${suiteName}",
+            "hadoop.username" = "hadoop"
+        );
+    """
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+                "type"="hdfs",
+                "path_prefix" = "${suiteName}"
+            );
+        """
+    }, "Alter property")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+                "type"="hdfs",
+                "fs.defaultFS" = "not_exist_vault"
+            );
+        """
+    }, "Alter property")
+
+    def vaultName = suiteName
+    String properties;
+
+    def vaultInfos = try_sql """show storage vault"""
+
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        if (name.equals(vaultName)) {
+            properties = vaultInfos[i][2]
+        }
+    }
+
+    def newVaultName = suiteName + "_new";
+    sql """
+        ALTER STORAGE VAULT ${vaultName}
+        PROPERTIES (
+            "type"="hdfs",
+            "VAULT_NAME" = "${newVaultName}",
+            "hadoop.username" = "hdfs"
+        );
+    """
+
+    vaultInfos = sql """ SHOW STORAGE VAULT; """
+    boolean exist = false
+
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        logger.info("name is ${name}, info ${vaultInfos[i]}")
+        if (name.equals(vaultName)) {
+            assertTrue(false);
+        }
+        if (name.equals(newVaultName)) {
+            assertTrue(vaultInfos[i][2].contains("""user: "hdfs" """))
+            exist = true
+        }
+    }
+    assertTrue(exist)
+    expectExceptionLike({sql """insert into ${suiteName} values("2", 
"2");"""}, "")
+}
diff --git a/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy 
b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy
new file mode 100644
index 00000000000..723422c6e0b
--- /dev/null
+++ b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy
@@ -0,0 +1,106 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_alter_s3_vault", "nonConcurrent") {
+    def suiteName = name;
+    if (!isCloudMode()) {
+        logger.info("skip ${suiteName} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${suiteName} case, because storage vault not 
enabled")
+        return
+    }
+
+    sql """
+        CREATE STORAGE VAULT IF NOT EXISTS ${suiteName}
+        PROPERTIES (
+            "type"="S3",
+            "s3.endpoint"="${getS3Endpoint()}",
+            "s3.region" = "${getS3Region()}",
+            "s3.access_key" = "${getS3AK()}",
+            "s3.secret_key" = "${getS3SK()}",
+            "s3.root.path" = "${suiteName}",
+            "s3.bucket" = "${getS3BucketName()}",
+            "s3.external_endpoint" = "",
+            "provider" = "${getS3Provider()}"
+        );
+    """
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+            "type"="S3",
+            "s3.bucket" = "error_bucket"
+            );
+        """
+    }, "Alter property")
+
+    expectExceptionLike({
+        sql """
+            ALTER STORAGE VAULT ${suiteName}
+            PROPERTIES (
+            "type"="S3",
+            "provider" = "${getS3Provider()}"
+            );
+        """
+    }, "Alter property")
+
+
+    def vaultName = suiteName
+    String properties;
+
+    def vaultInfos = try_sql """show storage vault"""
+
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        if (name.equals(vaultName)) {
+            properties = vaultInfos[i][2]
+        }
+    }
+
+    def newVaultName = suiteName + "_new";
+
+    sql """
+        ALTER STORAGE VAULT ${vaultName}
+        PROPERTIES (
+            "type"="S3",
+            "VAULT_NAME" = "${newVaultName}",
+            "s3.access_key" = "new_ak"
+        );
+    """
+
+    vaultInfos = sql """SHOW STORAGE VAULT;"""
+    boolean exist = false
+
+    for (int i = 0; i < vaultInfos.size(); i++) {
+        def name = vaultInfos[i][0]
+        logger.info("name is ${name}, info ${vaultInfos[i]}")
+        if (name.equals(vaultName)) {
+            assertTrue(false);
+        }
+        if (name.equals(newVaultName)) {
+            assertTrue(vaultInfos[i][2].contains("new_ak"))
+            exist = true
+        }
+    }
+    assertTrue(exist)
+    // failed to insert due to the wrong ak
+    expectExceptionLike({ sql """insert into alter_s3_vault_tbl values("2", 
"2");""" }, "")
+}
diff --git a/regression-test/suites/vaults/create/create.groovy 
b/regression-test/suites/vault_p0/create/test_create_vault.groovy
similarity index 54%
rename from regression-test/suites/vaults/create/create.groovy
rename to regression-test/suites/vault_p0/create/test_create_vault.groovy
index 32f22dbd89a..bf6ddc756df 100644
--- a/regression-test/suites/vaults/create/create.groovy
+++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy
@@ -15,9 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("create_vault", "nonConcurrent") {
+suite("test_create_vault", "nonConcurrent") {
+    if (!isCloudMode()) {
+        logger.info("skip test_create_vault case because not cloud mode")
+        return
+    }
+
     if (!enableStoragevault()) {
-        logger.info("skip create storgage vault case")
+        logger.info("skip test_create_vault case")
         return
     }
 
@@ -25,65 +30,57 @@ suite("create_vault", "nonConcurrent") {
         sql """
             CREATE STORAGE VAULT IF NOT EXISTS failed_vault
             PROPERTIES (
-            "type"="S3",
-            "fs.defaultFS"="${getHmsHdfsFs()}",
-            "path_prefix" = "ssb_sf1_p2",
-            "hadoop.username" = "hadoop"
+                "type"="S3",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "ssb_sf1_p2",
+                "hadoop.username" = "hadoop"
             );
-        """
-    }, "Missing")
+           """
+    }, "Missing [s3.endpoint] in properties")
 
     expectExceptionLike({
         sql """
             CREATE STORAGE VAULT IF NOT EXISTS failed_vault
             PROPERTIES (
-            "type"="hdfs",
-            "s3.bucket"="${getHmsHdfsFs()}",
-            "path_prefix" = "ssb_sf1_p2",
-            "hadoop.username" = "hadoop"
+                "type"="hdfs",
+                "s3.bucket"="${getHmsHdfsFs()}",
+                "path_prefix" = "ssb_sf1_p2",
+                "hadoop.username" = "hadoop"
             );
-        """
+            """
     }, "invalid fs_name")
 
     expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT IF NOT EXISTS failed_vault
-            PROPERTIES (
-            );
-        """
-    }, "Encountered")
+        sql """ CREATE STORAGE VAULT IF NOT EXISTS failed_vault PROPERTIES (); 
"""
+    }, "mismatched input ')'")
 
 
     sql """
         CREATE STORAGE VAULT IF NOT EXISTS create_hdfs_vault
         PROPERTIES (
-        "type"="hdfs",
-        "fs.defaultFS"="${getHmsHdfsFs()}",
-        "path_prefix" = "default_vault_ssb_hdfs_vault",
-        "hadoop.username" = "hadoop"
+            "type"="hdfs",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "default_vault_ssb_hdfs_vault",
+            "hadoop.username" = "hadoop"
         );
-    """
+        """
+
+    try_sql """ DROP TABLE IF EXISTS create_table_use_vault FORCE; """
 
-    try_sql """
-        drop table create_table_use_vault
-    """
-    
     sql """
         CREATE TABLE IF NOT EXISTS create_table_use_vault (
                 C_CUSTKEY     INTEGER NOT NULL,
                 C_NAME        INTEGER NOT NULL
-                )
-                DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = "create_hdfs_vault"
-                )
-    """
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = "create_hdfs_vault"
+        )
+        """
 
-    String create_stmt = sql """
-        show create table create_table_use_vault
-    """
+    String create_stmt = sql """ SHOW CREATE TABLE create_table_use_vault """
 
     logger.info("the create table stmt is ${create_stmt}")
     assertTrue(create_stmt.contains("create_hdfs_vault"))
@@ -92,9 +89,9 @@ suite("create_vault", "nonConcurrent") {
         sql """
             CREATE STORAGE VAULT create_hdfs_vault
             PROPERTIES (
-            "type"="hdfs",
-            "fs.defaultFS"="${getHmsHdfsFs()}",
-            "path_prefix" = "default_vault_ssb_hdfs_vault"
+                "type"="hdfs",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "default_vault_ssb_hdfs_vault"
             );
         """
     }, "already created")
@@ -103,60 +100,54 @@ suite("create_vault", "nonConcurrent") {
     sql """
         CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault
         PROPERTIES (
-        "type"="S3",
-        "s3.endpoint"="${getS3Endpoint()}",
-        "s3.region" = "${getS3Region()}",
-        "s3.access_key" = "${getS3AK()}",
-        "s3.secret_key" = "${getS3SK()}",
-        "s3.root.path" = "ssb_sf1_p2_s3",
-        "s3.bucket" = "${getS3BucketName()}",
-        "s3.external_endpoint" = "",
-        "provider" = "${getS3Provider()}"
-        );
-    """
-
-    expectExceptionLike({
-        sql """
-            CREATE STORAGE VAULT create_s3_vault
-            PROPERTIES (
             "type"="S3",
             "s3.endpoint"="${getS3Endpoint()}",
             "s3.region" = "${getS3Region()}",
             "s3.access_key" = "${getS3AK()}",
             "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "ssb_sf1_p2_s3",
+            "s3.root.path" = "test_create_s3_vault",
             "s3.bucket" = "${getS3BucketName()}",
             "s3.external_endpoint" = "",
             "provider" = "${getS3Provider()}"
+        );
+    """
+
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT create_s3_vault
+            PROPERTIES (
+                "type"="S3",
+                "s3.endpoint"="${getS3Endpoint()}",
+                "s3.region" = "${getS3Region()}",
+                "s3.access_key" = "${getS3AK()}",
+                "s3.secret_key" = "${getS3SK()}",
+                "s3.root.path" = "test_create_s3_vault",
+                "s3.bucket" = "${getS3BucketName()}",
+                "s3.external_endpoint" = "",
+                "provider" = "${getS3Provider()}"
             );
         """
     }, "already created")
 
     sql """
         CREATE TABLE IF NOT EXISTS create_table_use_s3_vault (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-                )
-                DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = "create_s3_vault"
-                )
+            C_CUSTKEY     INTEGER NOT NULL,
+            C_NAME        INTEGER NOT NULL
+        )
+        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+        PROPERTIES (
+            "replication_num" = "1",
+            "storage_vault_name" = "create_s3_vault"
+        )
     """
 
-    sql """
-        insert into create_table_use_s3_vault values(1,1);
-    """
+    sql """ insert into create_table_use_s3_vault values(1,1); """
 
-    sql """
-        select * from create_table_use_s3_vault;
-    """
+    sql """ select * from create_table_use_s3_vault; """
 
 
-    def vaults_info = try_sql """
-        show storage vault
-    """
+    def vaults_info = try_sql """ show storage vault """
 
     
     boolean create_hdfs_vault_exist = false;
@@ -182,16 +173,32 @@ suite("create_vault", "nonConcurrent") {
         sql """
             CREATE STORAGE VAULT IF NOT EXISTS built_in_storage_vault
             PROPERTIES (
-            "type"="S3",
-            "s3.endpoint"="${getS3Endpoint()}",
-            "s3.region" = "${getS3Region()}",
-            "s3.access_key" = "${getS3AK()}",
-            "s3.secret_key" = "${getS3SK()}",
-            "s3.root.path" = "ssb_sf1_p2_s3",
-            "s3.bucket" = "${getS3BucketName()}",
-            "s3.external_endpoint" = "",
-            "provider" = "${getS3Provider()}"
+                "type"="S3",
+                "s3.endpoint"="${getS3Endpoint()}",
+                "s3.region" = "${getS3Region()}",
+                "s3.access_key" = "${getS3AK()}",
+                "s3.secret_key" = "${getS3SK()}",
+                "s3.root.path" = "test_built_in_storage_vault",
+                "s3.bucket" = "${getS3BucketName()}",
+                "s3.external_endpoint" = "",
+                "provider" = "${getS3Provider()}"
             );
         """
     }, "already created")
+
+
+    expectExceptionLike({
+        sql """
+            CREATE TABLE IF NOT EXISTS create_table_with_not_exist_vault (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = "not_exist_vault"
+            )
+        """
+    }, "Storage vault 'not_exist_vault' does not exist")
 }
diff --git a/regression-test/suites/vault_p0/default/test_default_vault.groovy 
b/regression-test/suites/vault_p0/default/test_default_vault.groovy
new file mode 100644
index 00000000000..0ee871458b0
--- /dev/null
+++ b/regression-test/suites/vault_p0/default/test_default_vault.groovy
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_default_vault", "nonConcurrent") {
+    if (!isCloudMode()) {
+        logger.info("skip ${name} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${name} case")
+        return
+    }
+
+    try {
+        sql """ UNSET DEFAULT STORAGE VAULT; """
+
+        expectExceptionLike({
+            sql """ set not_exist as default storage vault """
+        }, "invalid storage vault name")
+
+        def tableName = "table_use_vault"
+        sql "DROP TABLE IF EXISTS ${tableName}"
+
+        expectExceptionLike({
+            sql """
+                CREATE TABLE ${tableName} (
+                    `key` INT,
+                    value INT
+                ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
+                PROPERTIES ('replication_num' = '1')
+            """
+        }, "No default storage vault")
+
+        sql """
+            CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault_for_default
+            PROPERTIES (
+                "type"="S3",
+                "s3.endpoint"="${getS3Endpoint()}",
+                "s3.region" = "${getS3Region()}",
+                "s3.access_key" = "${getS3AK()}",
+                "s3.secret_key" = "${getS3SK()}",
+                "s3.root.path" = "create_s3_vault_for_default",
+                "s3.bucket" = "${getS3BucketName()}",
+                "s3.external_endpoint" = "",
+                "provider" = "${getS3Provider()}",
+                "set_as_default" = "true"
+            );
+        """
+
+        sql """ set create_s3_vault_for_default as default storage vault """
+        def vaultInfos = sql """ SHOW STORAGE VAULT """
+        // check if create_s3_vault_for_default is set as default
+        for (int i = 0; i < vaultInfos.size(); i++) {
+            def name = vaultInfos[i][0]
+            if (name.equals("create_s3_vault_for_default")) {
+                // isDefault is true
+                assertEquals(vaultInfos[i][3], "true")
+            }
+        }
+
+        sql """ UNSET DEFAULT STORAGE VAULT; """
+        vaultInfos = sql """ SHOW STORAGE VAULT """
+        for (int i = 0; i < vaultInfos.size(); i++) {
+            assertEquals(vaultInfos[i][3], "false")
+        }
+
+
+        sql """ set built_in_storage_vault as default storage vault """
+
+        sql "DROP TABLE IF EXISTS ${tableName} FORCE;"
+        sql """
+            CREATE TABLE ${tableName} (
+                `key` INT,
+                value INT
+            ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
+            PROPERTIES ('replication_num' = '1')
+        """
+
+        sql """ insert into ${tableName} values(1, 1); """
+        result """ select * from ${tableName}; """
+        assertEqual(result.size(), 1)
+        assertEqual(result[0][0], 1)
+
+        def create_table_stmt = sql """ show create table ${tableName} """
+        assertTrue(create_table_stmt[0][1].contains("built_in_storage_vault"))
+
+        sql """
+            CREATE STORAGE VAULT IF NOT EXISTS create_default_hdfs_vault
+            PROPERTIES (
+                "type"="hdfs",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "default_vault_ssb_hdfs_vault",
+                "hadoop.username" = "hadoop"
+            );
+        """
+
+        sql """ set create_default_hdfs_vault as default storage vault """
+
+        sql "DROP TABLE IF EXISTS ${tableName} FORCE;"
+        sql """
+            CREATE TABLE ${tableName} (
+                `key` INT,
+                value INT
+            ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
+            PROPERTIES ('replication_num' = '1')
+        """
+
+        create_table_stmt = sql """ show create table ${tableName} """
+        
assertTrue(create_table_stmt[0][1].contains("create_default_hdfs_vault"))
+
+        expectExceptionLike({
+            sql """
+                alter table ${tableName} set("storage_vault_name" = 
"built_in_storage_vault");
+            """
+        }, "You can not modify")
+
+    } finally {
+        sql """ set built_in_storage_vault as default storage vault """
+        sql """ set built_in_storage_vault as default storage vault """
+    }
+}
diff --git a/regression-test/suites/vaults/forbid/forbid.groovy 
b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
similarity index 89%
rename from regression-test/suites/vaults/forbid/forbid.groovy
rename to regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
index 15fba18fc6d..da31ae532af 100644
--- a/regression-test/suites/vaults/forbid/forbid.groovy
+++ b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy
@@ -15,14 +15,14 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("forbid_vault") {
-    if (enableStoragevault()) {
-        logger.info("skip forbid storage vault case because storage vault 
enabled")
+suite("test_forbid_vault") {
+    if (!isCloudMode()) {
+        logger.info("skip ${name} case, because not cloud mode")
         return
     }
 
-    if (!isCloudMode()) {
-        logger.info("skip forbid storage vault case because not cloud mode")
+    if (enableStoragevault()) {
+        logger.info("skip ${name} case, because storage vault enabled")
         return
     }
 
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy
new file mode 100644
index 00000000000..feedbadb3b7
--- /dev/null
+++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy
@@ -0,0 +1,190 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+
+suite("test_vault_privilege", "nonConcurrent") {
+    if (!isCloudMode()) {
+        logger.info("skip ${name} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${name} case, because storage vault not enabled")
+        return
+    }
+
+    try {
+        def vault1 = "test_privilege_vault1"
+        def table1 = "test_privilege_vault_t1"
+        def table2 = "test_privilege_vault_t2"
+        def table3 = "test_privilege_vault_t3"
+
+        sql """
+            CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
+            PROPERTIES (
+                "type"="hdfs",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "test_vault_privilege"
+            );
+        """
+
+        def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        assertTrue(storageVaults.contains(vault1))
+
+        sql """
+            SET ${vault1} AS DEFAULT STORAGE VAULT
+        """
+        sql """
+            UNSET DEFAULT STORAGE VAULT
+        """
+
+        sql """
+            DROP TABLE IF EXISTS ${table1};
+        """
+
+        sql """
+            CREATE TABLE IF NOT EXISTS ${table1} (
+                C_CUSTKEY     INTEGER NOT NULL,
+                C_NAME        INTEGER NOT NULL
+            )
+            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+            PROPERTIES (
+                "replication_num" = "1",
+                "storage_vault_name" = ${vault1}
+            )
+        """
+
+        def user1 = "test_privilege_vault_user1"
+        sql """drop user if exists ${user1}"""
+        sql """create user ${user1} identified by 'Cloud12345'"""
+        sql """ GRANT create_priv ON *.*.* TO '${user1}'; """
+
+        def vault2 = "test_privilege_vault2"
+        // Only users with admin role can create storage vault
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            expectExceptionLike({
+                sql """
+                    CREATE STORAGE VAULT IF NOT EXISTS ${vault2}
+                    PROPERTIES (
+                    "type"="hdfs",
+                    "fs.defaultFS"="${getHmsHdfsFs()}",
+                    "path_prefix" = "test_vault_privilege"
+                    );
+                """
+            }, "denied")
+        }
+
+        // Only users with admin role can set/unset default storage vault
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            expectExceptionLike({
+                sql """
+                    SET ${vault1} AS DEFAULT STORAGE VAULT
+                """
+            }, "denied")
+        }
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            expectExceptionLike({
+                sql """
+                    UNSET DEFAULT STORAGE VAULT
+                """
+            }, "denied")
+        }
+
+        def result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+                sql " SHOW STORAGE VAULT; "
+        }
+        assertTrue(result.isEmpty())
+
+        sql """
+            DROP TABLE IF EXISTS ${table2};
+        """
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            expectExceptionLike({
+                sql """
+                    CREATE TABLE IF NOT EXISTS ${table2} (
+                            C_CUSTKEY     INTEGER NOT NULL,
+                            C_NAME        INTEGER NOT NULL
+                            )
+                            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                            PROPERTIES (
+                            "replication_num" = "1",
+                            "storage_vault_name" = ${vault1}
+                            )
+                """
+            }, "USAGE denied")
+        }
+
+        sql """
+            GRANT usage_priv ON STORAGE VAULT '${vault1}' TO '${user1}';
+        """
+
+        result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+                sql " SHOW STORAGE VAULT; "
+        }
+        storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        assertTrue(storageVaults.contains(vault1))
+
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            sql """
+                CREATE TABLE IF NOT EXISTS ${table2} (
+                        C_CUSTKEY     INTEGER NOT NULL,
+                        C_NAME        INTEGER NOT NULL
+                        )
+                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                        PROPERTIES (
+                        "replication_num" = "1",
+                        "storage_vault_name" = ${vault1}
+                        )
+            """
+        }
+
+        sql """
+            REVOKE usage_priv ON STORAGE VAULT '${vault1}' FROM '${user1}';
+        """
+
+        result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+                sql " SHOW STORAGE VAULT; "
+        }
+        assertTrue(result.isEmpty())
+
+        sql """
+            DROP TABLE IF EXISTS ${table3};
+        """
+        connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
+            expectExceptionLike({
+                sql """
+                    CREATE TABLE IF NOT EXISTS ${table3} (
+                            C_CUSTKEY     INTEGER NOT NULL,
+                            C_NAME        INTEGER NOT NULL
+                            )
+                            DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                            DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                            PROPERTIES (
+                            "replication_num" = "1",
+                            "storage_vault_name" = ${vault1}
+                            )
+                """
+            }, "USAGE denied")
+        }
+    } finally {
+        sql """ set built_in_storage_vault as default storage vault """
+    }
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy
new file mode 100644
index 00000000000..7ff5ec0792b
--- /dev/null
+++ 
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy
@@ -0,0 +1,187 @@
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+
+// This test suite is intent to test the granted privilege for specific user 
will
+// not disappear
+suite("test_vault_privilege_restart", "nonConcurrent") {
+    if (!isCloudMode()) {
+        logger.info("skip ${name} case, because not cloud mode")
+        return
+    }
+
+    if (!enableStoragevault()) {
+        logger.info("skip ${name} case, because storage vault not enabled")
+        return
+    }
+
+    try {
+        // user1 will be kept before and after running this test in order to 
check
+        // the granted vault privilege is persisted well eventhough FE 
restarts many times
+        def user1 = "test_privilege_vault_restart_user1"
+        def passwd = "Cloud12345"
+
+        def vault1 = "test_privilege_vault_restart_vault1"
+        // this vaule is derived from current file location: 
regression-test/vaults
+        def db = context.dbName
+
+        def table1 = "test_privilege_vault_restart_t1"
+        def table2 = "test_privilege_vault_restart_t2"
+        def hdfsLinkWeDontReallyCare = "127.0.0.1:10086" // a dummy link, it 
doesn't need to work
+
+        
//==========================================================================
+        // prepare the basic vault and tables for further check
+        
//==========================================================================
+        sql """
+            CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
+            PROPERTIES (
+            "type"="hdfs",
+            "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
+            "path_prefix" = "test_vault_privilege_restart"
+            );
+        """
+
+        def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        logger.info("all vaults: ${storageVaults}")
+        org.junit.Assert.assertTrue("${vault1} is not present after creating, 
all vaults: ${storageVaults}", storageVaults.contains(vault1))
+
+        def allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        logger.info("all tables ${allTables}")
+
+        // table1 is the sign to check if the user1 has been created and 
granted well
+        def targetTableExist = allTables.contains(table1) 
+
+        if (targetTableExist) { 
+            // the grant procedure at least run once before, user1 has been 
granted vault1
+            logger.info("${user1} has been granted with usage_priv to 
${vault1} before")
+        } else {
+            logger.info("this is the frist run, or there was a crash during 
the very first run, ${user1} has not been granted with usage_priv to ${vault1} 
before")
+            // create user and grant storage vault and create a table with 
that vault
+            sql """drop user if exists ${user1}"""
+            sql """create user ${user1} identified by '${passwd}'"""
+            sql """
+                GRANT usage_priv ON storage vault ${vault1} TO '${user1}';
+            """
+            sql """
+                GRANT create_priv ON *.*.* TO '${user1}';
+            """
+
+            // ATTN: create table1, if successful, the sign has been set
+            //       there wont be any execuse that user1 misses the privilege 
to vault1 from now on
+            sql """
+                CREATE TABLE IF NOT EXISTS ${table1} (
+                        C_CUSTKEY     INTEGER NOT NULL,
+                        C_NAME        INTEGER NOT NULL
+                        )
+                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                        PROPERTIES (
+                        "replication_num" = "1",
+                        "storage_vault_name" = ${vault1}
+                        )
+            """
+        }
+
+        
//==========================================================================
+        // check the prepared users and tables
+        
//==========================================================================
+        def allUsers = (sql " SHOW all grants ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        logger.info("all users: ${allUsers}")
+        def userPresent = !(allUsers.stream().filter(i -> 
i.contains(user1)).collect(Collectors.toSet()).isEmpty())
+        org.junit.Assert.assertTrue("${user1} is not in the priv table 
${allUsers}", userPresent)
+
+        allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        logger.info("all tables: ${allTables}")
+        org.junit.Assert.assertTrue("${table1} is not present, all tables: 
${allUsers}", allTables.contains(table1))
+
+        // Test user privilege, the newly created user cannot create or set 
default vault
+        // Only users with admin role can create storage vault
+        connect(user = user1, password = passwd, url = context.config.jdbcUrl) 
{
+            sql """use ${db}"""
+            expectExceptionLike({
+                sql """
+                    CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
+                    PROPERTIES (
+                    "type"="hdfs",
+                    "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
+                    "path_prefix" = "test_vault_privilege"
+                    );
+                """
+            }, "denied")
+        }
+        // Only users with admin role can set/unset default storage vault
+        connect(user = user1, password = passwd, url = context.config.jdbcUrl) 
{
+            sql """use ${db}"""
+            expectExceptionLike({
+                sql """
+                    SET ${vault1} AS DEFAULT STORAGE VAULT
+                """
+            }, "denied")
+        }
+        connect(user = user1, password = passwd, url = context.config.jdbcUrl) 
{
+            sql """use ${db}"""
+            expectExceptionLike({
+                sql """
+                    UNSET DEFAULT STORAGE VAULT
+                """
+            }, "denied")
+        }
+
+        // user1 should see vault1
+        def result = connect(user = user1, password = passwd, url = 
context.config.jdbcUrl) {
+            sql """use ${db}"""
+            sql " SHOW STORAGE VAULT; "
+        }
+        storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
+        org.junit.Assert.assertTrue("${user1} cannot see granted vault 
${vault1} in result ${result}", storageVaults.contains(vault1))
+
+
+        
//==========================================================================
+        // to test that user1 has the privilege of vault1 to create new tables
+        // this is the main test for granted vault privilege after restarting 
FE
+        
//==========================================================================
+        sql """
+            DROP TABLE IF EXISTS ${table2} force;
+        """
+        connect(user = user1, password = passwd, url = context.config.jdbcUrl) 
{
+            sql """use ${db}"""
+            sql """
+                CREATE TABLE ${table2} (
+                        C_CUSTKEY     INTEGER NOT NULL,
+                        C_NAME        INTEGER NOT NULL
+                        )
+                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
+                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+                        PROPERTIES (
+                        "replication_num" = "1",
+                        "storage_vault_name" = ${vault1}
+                        )
+            """
+        }
+
+        result = connect(user = user1, password = passwd, url = 
context.config.jdbcUrl) {
+            sql """use ${db}"""
+            sql " SHOW create table ${table2}; "
+        }
+        logger.info("show create table ${table2}, result ${result}")
+        org.junit.Assert.assertTrue("missing storage vault properties 
${vault1} in table ${table2}", result.toString().contains(vault1))
+    } finally {
+        sql """ set built_in_storage_vault as default storage vault """
+    }
+}
diff --git a/regression-test/suites/vaults/alter/alter_hdfs.groovy 
b/regression-test/suites/vaults/alter/alter_hdfs.groovy
deleted file mode 100644
index 1a1299a93cc..00000000000
--- a/regression-test/suites/vaults/alter/alter_hdfs.groovy
+++ /dev/null
@@ -1,119 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("alter_hdfs_vault", "nonConcurrent") {
-    if (!enableStoragevault()) {
-        logger.info("skip alter hdfs storgage vault case")
-        return
-    }
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS alter_hdfs_vault
-        PROPERTIES (
-        "type"="HDFS",
-        "fs.defaultFS"="${getHmsHdfsFs()}",
-        "path_prefix" = "ssb_sf1_p2",
-        "hadoop.username" = "hadoop"
-        );
-    """
-
-    sql """
-        CREATE TABLE IF NOT EXISTS alter_hdfs_vault_tbl (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-                )
-                DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = "alter_hdfs_vault"
-                )
-    """
-
-    sql """
-        insert into alter_hdfs_vault_tbl values("1", "1");
-    """
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT alter_hdfs_vault
-            PROPERTIES (
-            "type"="hdfs",
-            "path_prefix" = "ssb_sf1_p3"
-            );
-        """
-    }, "Alter property")
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT alter_hdfs_vault
-            PROPERTIES (
-            "type"="hdfs",
-            "fs.defaultFS" = "ssb_sf1_p3"
-            );
-        """
-    }, "Alter property")
-
-    def vault_name = "alter_hdfs_vault"
-    String properties;
-
-    def vaults_info = try_sql """
-        show storage vault
-    """
-
-    for (int i = 0; i < vaults_info.size(); i++) {
-        def name = vaults_info[i][0]
-        if (name.equals(vault_name)) {
-            properties = vaults_info[i][2]
-        }
-    }
-    
-    sql """
-        ALTER STORAGE VAULT alter_hdfs_vault
-        PROPERTIES (
-        "type"="hdfs",
-        "VAULT_NAME" = "alter_hdfs_vault_new_name",
-        "hadoop.username" = "hdfs"
-        );
-    """
-
-    def new_vault_name = "alter_hdfs_vault_new_name"
-
-    vaults_info = sql """
-        SHOW STORAGE VAULT;
-    """
-    boolean exist = false
-
-    for (int i = 0; i < vaults_info.size(); i++) {
-        def name = vaults_info[i][0]
-        logger.info("name is ${name}, info ${vaults_info[i]}")
-        if (name.equals(vault_name)) {
-            exist = true
-        }
-        if (name.equals(new_vault_name)) {
-            assertTrue(vaults_info[i][2].contains(""""hadoop.username" = 
"hdfs"""""))
-        }
-    }
-    assertFalse(exist)
-
-    // failed to insert due to the wrong ak
-    expectExceptionLike({
-        sql """
-            insert into alter_hdfs_vault_tbl values("2", "2");
-        """
-    }, "")
-}
diff --git a/regression-test/suites/vaults/alter/alter_s3.groovy 
b/regression-test/suites/vaults/alter/alter_s3.groovy
deleted file mode 100644
index 37f9edd0415..00000000000
--- a/regression-test/suites/vaults/alter/alter_s3.groovy
+++ /dev/null
@@ -1,124 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("alter_s3_vault", "nonConcurrent") {
-    if (!enableStoragevault()) {
-        logger.info("skip alter s3 storgage vault case")
-        return
-    }
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS alter_s3_vault
-        PROPERTIES (
-        "type"="S3",
-        "s3.endpoint"="${getS3Endpoint()}",
-        "s3.region" = "${getS3Region()}",
-        "s3.access_key" = "${getS3AK()}",
-        "s3.secret_key" = "${getS3SK()}",
-        "s3.root.path" = "ssb_sf1_p2_s3",
-        "s3.bucket" = "${getS3BucketName()}",
-        "s3.external_endpoint" = "",
-        "provider" = "${getS3Provider()}"
-        );
-    """
-
-    sql """
-        CREATE TABLE IF NOT EXISTS alter_s3_vault_tbl (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-                )
-                DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = "alter_s3_vault"
-                )
-    """
-
-    sql """
-        insert into alter_s3_vault_tbl values("1", "1");
-    """
-
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT alter_s3_vault
-            PROPERTIES (
-            "type"="S3",
-            "s3.bucket" = "error_bucket"
-            );
-        """
-    }, "Alter property")
-    expectExceptionLike({
-        sql """
-            ALTER STORAGE VAULT alter_s3_vault
-            PROPERTIES (
-            "type"="S3",
-            "provider" = "${getS3Provider()}"
-            );
-        """
-    }, "Alter property")
-
-    def vault_name = "alter_s3_vault"
-    String properties;
-
-    def vaults_info = try_sql """
-        show storage vault
-    """
-
-    for (int i = 0; i < vaults_info.size(); i++) {
-        def name = vaults_info[i][0]
-        if (name.equals(vault_name)) {
-            properties = vaults_info[i][2]
-        }
-    }
-    
-    sql """
-        ALTER STORAGE VAULT alter_s3_vault
-        PROPERTIES (
-        "type"="S3",
-        "VAULT_NAME" = "alter_s3_vault",
-        "s3.access_key" = "new_ak"
-        );
-    """
-
-    def new_vault_name = "alter_s3_vault_new"
-
-    vaults_info = sql """
-        SHOW STORAGE VAULT;
-    """
-    boolean exist = false
-
-    for (int i = 0; i < vaults_info.size(); i++) {
-        def name = vaults_info[i][0]
-        logger.info("name is ${name}, info ${vaults_info[i]}")
-        if (name.equals(vault_name)) {
-            exist = true
-        }
-        if (name.equals(new_vault_name)) {
-            assertTrue(vaults_info[i][2].contains(""""s3.access_key" = 
"new_ak"""""))
-        }
-    }
-    assertFalse(exist)
-
-    // failed to insert due to the wrong ak
-    expectExceptionLike({
-        sql """
-            insert into alter_s3_vault_tbl values("2", "2");
-        """
-    }, "")
-
-}
diff --git a/regression-test/suites/vaults/default/default.groovy 
b/regression-test/suites/vaults/default/default.groovy
deleted file mode 100644
index 6d3f5e3d3de..00000000000
--- a/regression-test/suites/vaults/default/default.groovy
+++ /dev/null
@@ -1,139 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("default_vault", "nonConcurrent") {
-    if (!enableStoragevault()) {
-        logger.info("skip create storgage vault case")
-        return
-    }
-    expectExceptionLike({
-        sql """
-            set not_exist as default storage vault
-        """
-    }, "invalid storage vault name")
-
-    def tableName = "table_use_vault"
-
-    expectExceptionLike({
-        sql "DROP TABLE IF EXISTS ${tableName}"
-        sql """
-            CREATE TABLE ${tableName} (
-                `key` INT,
-                value INT
-            ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
-            PROPERTIES ('replication_num' = '1')
-        """
-    }, "supply")
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault_for_default
-        PROPERTIES (
-        "type"="S3",
-        "s3.endpoint"="${getS3Endpoint()}",
-        "s3.region" = "${getS3Region()}",
-        "s3.access_key" = "${getS3AK()}",
-        "s3.secret_key" = "${getS3SK()}",
-        "s3.root.path" = "ssb_sf1_p2_s3",
-        "s3.bucket" = "${getS3BucketName()}",
-        "s3.external_endpoint" = "",
-        "provider" = "${getS3Provider()}",
-        "set_as_default" = "true"
-        );
-    """
-
-    def vaults_info = sql """
-        show storage vault
-    """
-
-    // check if create_s3_vault_for_default is set as default
-    for (int i = 0; i < vaults_info.size(); i++) {
-        def name = vaults_info[i][0]
-        if (name.equals("create_s3_vault_for_default")) {
-            // isDefault is true
-            assertEquals(vaults_info[i][3], "true")
-        }
-    }
-
-
-    sql """
-        set built_in_storage_vault as default storage vault
-    """
-
-
-    sql "DROP TABLE IF EXISTS ${tableName}"
-    sql """
-        CREATE TABLE ${tableName} (
-            `key` INT,
-            value INT
-        ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
-        PROPERTIES ('replication_num' = '1')
-    """
-
-
-    sql """
-        set built_in_storage_vault as default storage vault
-    """
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS create_default_hdfs_vault
-        PROPERTIES (
-        "type"="hdfs",
-        "fs.defaultFS"="${getHmsHdfsFs()}",
-        "path_prefix" = "default_vault_ssb_hdfs_vault",
-        "hadoop.username" = "hadoop"
-        );
-    """
-
-    sql """
-        set create_default_hdfs_vault as default storage vault
-    """
-
-    sql "DROP TABLE IF EXISTS ${tableName}"
-    sql """
-        CREATE TABLE ${tableName} (
-            `key` INT,
-            value INT
-        ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1
-        PROPERTIES ('replication_num' = '1')
-    """
-    sql """
-        insert into ${tableName} values(1, 1);
-    """
-    sql """
-        select * from ${tableName};
-    """
-
-    def create_table_stmt = sql """
-        show create table ${tableName}
-    """
-
-    assertTrue(create_table_stmt[0][1].contains("create_default_hdfs_vault"))
-
-    expectExceptionLike({
-        sql """
-            alter table ${tableName} set("storage_vault_name" = 
"built_in_storage_vault");
-        """
-    }, "You can not modify")
-
-    try {
-        sql """
-            set null as default storage vault
-        """
-    } catch (Exception e) {
-    }
-
-}
diff --git a/regression-test/suites/vaults/privilege.groovy 
b/regression-test/suites/vaults/privilege.groovy
deleted file mode 100644
index 3225c6a2915..00000000000
--- a/regression-test/suites/vaults/privilege.groovy
+++ /dev/null
@@ -1,183 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-import java.util.stream.Collectors;
-
-suite("test_privilege_vault", "nonConcurrent") {
-    if (!enableStoragevault()) {
-        logger.info("skip test_privilege_vault case")
-        return
-    }
-
-    def vault1 = "test_privilege_vault1"
-    def table1 = "test_privilege_vault_t1"
-    def table2 = "test_privilege_vault_t2"
-    def table3 = "test_privilege_vault_t3"
-
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-        PROPERTIES (
-        "type"="hdfs",
-        "fs.defaultFS"="${getHmsHdfsFs()}",
-        "path_prefix" = "test_vault_privilege"
-        );
-    """
-
-    def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    assertTrue(storageVaults.contains(vault1))
-
-    sql """
-        SET ${vault1} AS DEFAULT STORAGE VAULT
-    """
-    sql """
-        UNSET DEFAULT STORAGE VAULT
-    """
-
-    sql """
-        DROP TABLE IF EXISTS ${table1};
-    """
-
-    sql """
-        CREATE TABLE IF NOT EXISTS ${table1} (
-                C_CUSTKEY     INTEGER NOT NULL,
-                C_NAME        INTEGER NOT NULL
-                )
-                DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                PROPERTIES (
-                "replication_num" = "1",
-                "storage_vault_name" = ${vault1}
-                )
-    """
-
-    def user1 = "test_privilege_vault_user1"
-    sql """drop user if exists ${user1}"""
-    sql """create user ${user1} identified by 'Cloud12345'"""
-    sql """
-        GRANT create_priv ON *.*.* TO '${user1}';
-    """
-
-    def vault2 = "test_privilege_vault2"
-    // Only users with admin role can create storage vault
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        expectExceptionLike({
-            sql """
-                CREATE STORAGE VAULT IF NOT EXISTS ${vault2}
-                PROPERTIES (
-                "type"="hdfs",
-                "fs.defaultFS"="${getHmsHdfsFs()}",
-                "path_prefix" = "test_vault_privilege"
-                );
-            """
-        }, "denied")
-    }
-
-    // Only users with admin role can set/unset default storage vault
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        expectExceptionLike({
-            sql """
-                SET ${vault1} AS DEFAULT STORAGE VAULT
-            """
-        }, "denied")
-    }
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        expectExceptionLike({
-            sql """
-                UNSET DEFAULT STORAGE VAULT
-            """
-        }, "denied")
-    }
-
-    def result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-            sql " SHOW STORAGE VAULT; "
-    }
-    assertTrue(result.isEmpty())
-
-    sql """
-        DROP TABLE IF EXISTS ${table2};
-    """
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        expectExceptionLike({
-            sql """
-                CREATE TABLE IF NOT EXISTS ${table2} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vault1}
-                        )
-            """
-        }, "USAGE denied")
-    }
-
-    sql """
-        GRANT usage_priv ON STORAGE VAULT '${vault1}' TO '${user1}';
-    """
-
-    result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-            sql " SHOW STORAGE VAULT; "
-    }
-    storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    assertTrue(storageVaults.contains(vault1))
-
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        sql """
-            CREATE TABLE IF NOT EXISTS ${table2} (
-                    C_CUSTKEY     INTEGER NOT NULL,
-                    C_NAME        INTEGER NOT NULL
-                    )
-                    DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                    DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                    PROPERTIES (
-                    "replication_num" = "1",
-                    "storage_vault_name" = ${vault1}
-                    )
-        """
-    }
-
-    sql """
-        REVOKE usage_priv ON STORAGE VAULT '${vault1}' FROM '${user1}';
-    """
-
-    result = connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-            sql " SHOW STORAGE VAULT; "
-    }
-    assertTrue(result.isEmpty())
-
-    sql """
-        DROP TABLE IF EXISTS ${table3};
-    """
-    connect(user = user1, password = 'Cloud12345', url = 
context.config.jdbcUrl) {
-        expectExceptionLike({
-            sql """
-                CREATE TABLE IF NOT EXISTS ${table3} (
-                        C_CUSTKEY     INTEGER NOT NULL,
-                        C_NAME        INTEGER NOT NULL
-                        )
-                        DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                        DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                        PROPERTIES (
-                        "replication_num" = "1",
-                        "storage_vault_name" = ${vault1}
-                        )
-            """
-        }, "USAGE denied")
-    }
-}
\ No newline at end of file
diff --git a/regression-test/suites/vaults/privilege_restart.groovy 
b/regression-test/suites/vaults/privilege_restart.groovy
deleted file mode 100644
index 4e8c8fcc04d..00000000000
--- a/regression-test/suites/vaults/privilege_restart.groovy
+++ /dev/null
@@ -1,178 +0,0 @@
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-import java.util.stream.Collectors;
-
-// This test suite is intent to test the granted privilege for specific user 
will
-// not disappear
-suite("test_privilege_vault_restart", "nonConcurrent") {
-    if (!enableStoragevault()) {
-        logger.info("skip test_privilege_vault_restart case")
-        return
-    }
-
-    // user1 will be kept before and after running this test in order to check
-    // the granted vault privilege is persisted well eventhough FE restarts 
many times
-    def user1 = "test_privilege_vault_restart_user1"
-    def passwd = "Cloud12345"
-
-    def vault1 = "test_privilege_vault_restart_vault1"
-    // this vaule is derived from current file location: regression-test/vaults
-    def db = "regression_test_vaults"
-    def table1 = "test_privilege_vault_restart_t1"
-    def table2 = "test_privilege_vault_restart_t2"
-    def hdfsLinkWeDontReallyCare = "127.0.0.1:10086" // a dummy link, it 
doesn't need to work
-
-    
//==========================================================================
-    // prepare the basic vault and tables for further check
-    
//==========================================================================
-    sql """
-        CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-        PROPERTIES (
-        "type"="hdfs",
-        "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
-        "path_prefix" = "test_vault_privilege_restart"
-        );
-    """
-
-    def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    logger.info("all vaults: ${storageVaults}")
-    org.junit.Assert.assertTrue("${vault1} is not present after creating, all 
vaults: ${storageVaults}", storageVaults.contains(vault1))
-
-    def allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    logger.info("all tables ${allTables}")
-
-    // table1 is the sign to check if the user1 has been created and granted 
well
-    def targetTableExist = allTables.contains(table1) 
-
-    if (targetTableExist) { 
-        // the grant procedure at least run once before, user1 has been 
granted vault1
-        logger.info("${user1} has been granted with usage_priv to ${vault1} 
before")
-    } else {
-        logger.info("this is the frist run, or there was a crash during the 
very first run, ${user1} has not been granted with usage_priv to ${vault1} 
before")
-        // create user and grant storage vault and create a table with that 
vault
-        sql """drop user if exists ${user1}"""
-        sql """create user ${user1} identified by '${passwd}'"""
-        sql """
-            GRANT usage_priv ON storage vault ${vault1} TO '${user1}';
-        """
-        sql """
-            GRANT create_priv ON *.*.* TO '${user1}';
-        """
-
-        // ATTN: create table1, if successful, the sign has been set
-        //       there wont be any execuse that user1 misses the privilege to 
vault1 from now on
-        sql """
-            CREATE TABLE IF NOT EXISTS ${table1} (
-                    C_CUSTKEY     INTEGER NOT NULL,
-                    C_NAME        INTEGER NOT NULL
-                    )
-                    DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                    DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                    PROPERTIES (
-                    "replication_num" = "1",
-                    "storage_vault_name" = ${vault1}
-                    )
-        """
-    }
-
-    
//==========================================================================
-    // check the prepared users and tables
-    
//==========================================================================
-    def allUsers = (sql " SHOW all grants ").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    logger.info("all users: ${allUsers}")
-    def userPresent = !(allUsers.stream().filter(i -> 
i.contains(user1)).collect(Collectors.toSet()).isEmpty())
-    org.junit.Assert.assertTrue("${user1} is not in the priv table 
${allUsers}", userPresent)
-
-    allTables = (sql " SHOW tables").stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    logger.info("all tables: ${allTables}")
-    org.junit.Assert.assertTrue("${table1} is not present, all tables: 
${allUsers}", allTables.contains(table1))
-
-    // Test user privilege, the newly created user cannot create or set 
default vault
-    // Only users with admin role can create storage vault
-    connect(user = user1, password = passwd, url = context.config.jdbcUrl) {
-        sql """use ${db}"""
-        expectExceptionLike({
-            sql """
-                CREATE STORAGE VAULT IF NOT EXISTS ${vault1}
-                PROPERTIES (
-                "type"="hdfs",
-                "fs.defaultFS"="${hdfsLinkWeDontReallyCare}",
-                "path_prefix" = "test_vault_privilege"
-                );
-            """
-        }, "denied")
-    }
-    // Only users with admin role can set/unset default storage vault
-    connect(user = user1, password = passwd, url = context.config.jdbcUrl) {
-        sql """use ${db}"""
-        expectExceptionLike({
-            sql """
-                SET ${vault1} AS DEFAULT STORAGE VAULT
-            """
-        }, "denied")
-    }
-    connect(user = user1, password = passwd, url = context.config.jdbcUrl) {
-        sql """use ${db}"""
-        expectExceptionLike({
-            sql """
-                UNSET DEFAULT STORAGE VAULT
-            """
-        }, "denied")
-    }
-
-    // user1 should see vault1
-    def result = connect(user = user1, password = passwd, url = 
context.config.jdbcUrl) {
-        sql """use ${db}"""
-        sql " SHOW STORAGE VAULT; "
-    }
-    storageVaults = result.stream().map(row -> 
row[0]).collect(Collectors.toSet())
-    org.junit.Assert.assertTrue("${user1} cannot see granted vault ${vault1} 
in result ${result}", storageVaults.contains(vault1))
-
-
-    
//==========================================================================
-    // to test that user1 has the privilege of vault1 to create new tables
-    // this is the main test for granted vault privilege after restarting FE
-    
//==========================================================================
-    sql """
-        DROP TABLE IF EXISTS ${table2} force;
-    """
-    connect(user = user1, password = passwd, url = context.config.jdbcUrl) {
-        sql """use ${db}"""
-        sql """
-            CREATE TABLE ${table2} (
-                    C_CUSTKEY     INTEGER NOT NULL,
-                    C_NAME        INTEGER NOT NULL
-                    )
-                    DUPLICATE KEY(C_CUSTKEY, C_NAME)
-                    DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
-                    PROPERTIES (
-                    "replication_num" = "1",
-                    "storage_vault_name" = ${vault1}
-                    )
-        """
-    }
-
-    result = connect(user = user1, password = passwd, url = 
context.config.jdbcUrl) {
-        sql """use ${db}"""
-        sql " SHOW create table ${table2}; "
-    }
-    logger.info("show create table ${table2}, result ${result}")
-    org.junit.Assert.assertTrue("missing storage vault properties ${vault1} in 
table ${table2}", result.toString().contains(vault1))
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to