This is an automated email from the ASF dual-hosted git repository.

maxyang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry-backup.git


The following commit(s) were added to refs/heads/main by this push:
     new 4648d84e fix(test): Prevent panic in AfterSuite when BeforeSuite fails
4648d84e is described below

commit 4648d84ec126c540678b462ceb57ae099f943a81
Author: Robert Mu <[email protected]>
AuthorDate: Wed Oct 15 18:01:19 2025 +0800

    fix(test): Prevent panic in AfterSuite when BeforeSuite fails
    
    If the BeforeSuite hook fails before the database connection is
    initialized, the `connectionPool` variable remains nil. A common cause
    is a misconfigured test environment, such as when `greenplum_path.sh`
    is not sourced, leading to a "command not found" error for `createdb`.
    
    The previous AfterSuite logic would then attempt to dereference this
    nil pointer, causing a panic.
    
    ```
    Running Suite: database query tests - 
/home/cbdb/Projects/gpbackup/integration
    
==============================================================================
    Random Seed: 1760521199 - will randomize all specs
    
    Will run 549 of 552 specs
    ------------------------------
    [BeforeSuite] [FAILED] [0.003 seconds]
    [BeforeSuite]
    /home/cbdb/Projects/gpbackup/integration/integration_suite_test.go:54
    
      [FAILED] Cannot create database testdb; is GPDB running?
      In [BeforeSuite] at: 
/home/cbdb/Projects/gpbackup/integration/integration_suite_test.go:58 @ 
10/15/25 17:40:06.839
    ------------------------------
    [AfterSuite] [PANICKED] [0.000 seconds]
    [AfterSuite]
    /home/cbdb/Projects/gpbackup/integration/integration_suite_test.go:127
    
      [PANICKED] Test Panicked
      In [AfterSuite] at: /usr/lib/go-1.23/src/runtime/panic.go:262 @ 10/15/25 
17:40:06.839
    
      runtime error: invalid memory address or nil pointer dereference
    
      Full Stack Trace
        github.com/apache/cloudberry-backup/integration.init.func10()
            
/home/cbdb/Projects/gpbackup/integration/integration_suite_test.go:129 +0x26
    ------------------------------
    
    Summarizing 2 Failures:
      [FAIL] [BeforeSuite]
      /home/cbdb/Projects/gpbackup/integration/integration_suite_test.go:58
      [PANICKED!] [AfterSuite]
      /usr/lib/go-1.23/src/runtime/panic.go:262
    
    Ran 0 of 552 Specs in 0.023 seconds
    FAIL! -- A BeforeSuite node failed so all tests were skipped.
    --- FAIL: TestQueries (0.03s)
    FAIL
    
    Ginkgo ran 1 suite in 7.163251311s
    ```
---
 integration/integration_suite_test.go | 45 ++++++++++++++++++++++-------------
 1 file changed, 28 insertions(+), 17 deletions(-)

diff --git a/integration/integration_suite_test.go 
b/integration/integration_suite_test.go
index 15de88ea..da5627b5 100644
--- a/integration/integration_suite_test.go
+++ b/integration/integration_suite_test.go
@@ -126,28 +126,39 @@ var _ = BeforeEach(func() {
 
 var _ = AfterSuite(func() {
        CleanupBuildArtifacts()
-       if connectionPool.Version.IsGPDB() && 
connectionPool.Version.Before("6") {
-               testutils.DestroyTestFilespace(connectionPool)
-       } else {
-               remoteOutput := testCluster.GenerateAndExecuteCommand(
-                       "Removing /tmp/test_dir* directories on all hosts",
-                       cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
-                       func(contentID int) string {
-                               return fmt.Sprintf("rm -rf /tmp/test_dir*")
-                       })
-               if remoteOutput.NumErrors != 0 {
-                       Fail("Could not remove /tmp/testdir* directories on 1 
or more hosts")
-               }
-       }
+
+       // connectionPool can be nil if BeforeSuite fails before the connection 
is established.
+       // This can happen if the test environment is not set up correctly, for 
example if
+       // greenplum_path.sh was not sourced and the `createdb` command cannot 
be found.
+       // A nil check is necessary to prevent a panic during cleanup.
        if connectionPool != nil {
+               if connectionPool.Version.IsGPDB() && 
connectionPool.Version.Before("6") {
+                       testutils.DestroyTestFilespace(connectionPool)
+               } else {
+                       remoteOutput := testCluster.GenerateAndExecuteCommand(
+                               "Removing /tmp/test_dir* directories on all 
hosts",
+                               cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
+                               func(contentID int) string {
+                                       return fmt.Sprintf("rm -rf 
/tmp/test_dir*")
+                               })
+                       if remoteOutput.NumErrors != 0 {
+                               Fail("Could not remove /tmp/testdir* 
directories on 1 or more hosts")
+                       }
+               }
+
                connectionPool.Close()
                err := exec.Command("dropdb", "testdb").Run()
                Expect(err).To(BeNil())
+
+               // The following cleanup also depends on a successful 
BeforeSuite.
+               // It creates a new connection which requires the logger to be 
initialized,
+               // and it drops roles that would only have been created if 
setup succeeded.
+               template1Conn := testutils.SetupTestDbConn("template1")
+               testhelper.AssertQueryRuns(template1Conn, "DROP ROLE testrole")
+               testhelper.AssertQueryRuns(template1Conn, "DROP ROLE 
anothertestrole")
+               template1Conn.Close()
        }
-       connection1 := testutils.SetupTestDbConn("template1")
-       testhelper.AssertQueryRuns(connection1, "DROP ROLE testrole")
-       testhelper.AssertQueryRuns(connection1, "DROP ROLE anothertestrole")
-       connection1.Close()
+
        _ = os.RemoveAll("/tmp/helper_test")
        _ = os.RemoveAll(examplePluginTestDir)
 })


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to