This is an automated email from the ASF dual-hosted git repository.

gfphoenix78 pushed a commit to branch sync-with-upstream
in repository https://gitbox.apache.org/repos/asf/cloudberry-gpbackup.git


The following commit(s) were added to refs/heads/sync-with-upstream by this 
push:
     new af1fedf7 fix(test): Skip resize cluster test for Cloudberry due to 
replicated table issue (#36)
af1fedf7 is described below

commit af1fedf7c0ddda9ddea7ec7114210f9d90593302
Author: Robert Mu <[email protected]>
AuthorDate: Fri Sep 5 15:17:17 2025 +0800

    fix(test): Skip resize cluster test for Cloudberry due to replicated table 
issue (#36)
    
    Skip resize cluster test in end-to-end suite for Cloudberry due to
    COPY FROM ON SEGMENT issue with replicated tables. Add detailed TODO
    comment in restore/data.go explaining the root cause of row count
    validation failures during resize operations.
    
    References Cloudberry issue #1298 for tracking the upstream fix.
    Ensures test stability while maintaining compatibility with
    Greenplum behavior.
    
    The test is temporarily disabled for Cloudberry until the upstream
    issue with COPY command returning incorrect row counts for replicated
    tables is resolved.
    
    See: https://github.com/apache/cloudberry/issues/1298
---
 end_to_end/end_to_end_suite_test.go | 8 ++++++++
 restore/data.go                     | 7 +++++++
 2 files changed, 15 insertions(+)

diff --git a/end_to_end/end_to_end_suite_test.go 
b/end_to_end/end_to_end_suite_test.go
index af9b575c..2e9ef036 100644
--- a/end_to_end/end_to_end_suite_test.go
+++ b/end_to_end/end_to_end_suite_test.go
@@ -2288,6 +2288,14 @@ LANGUAGE plpgsql NO SQL;`)
                                        if useOldBackupVersion {
                                                Skip("Resize-cluster was only 
added in version 1.26")
                                        }
+
+                                       // TODO: Re-enable this test for 
Cloudberry once the following issue is fixed:
+                                       // 
https://github.com/apache/cloudberry/issues/1298
+                                       if restoreConn.Version.IsCBDB() {
+                                               Skip(`This test is skipped for 
Cloudberry due to an issue with COPY FROM ON SEGMENT for
+                                                       replicated tables 
(https://github.com/apache/cloudberry/issues/1298).`)
+                                       }
+
                                        extractDirectory := 
extractSavedTarFile(backupDir, tarBaseName)
                                        defer 
testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone 
CASCADE;`)
 
diff --git a/restore/data.go b/restore/data.go
index 18c0bcd1..2709af18 100644
--- a/restore/data.go
+++ b/restore/data.go
@@ -140,6 +140,13 @@ func restoreSingleTableData(fpInfo *filepath.FilePathInfo, 
entry toc.Coordinator
                numRowsRestored /= int64(destSize)
        }
 
+       // TODO: When restoring a replicated table to a cluster with a 
different number of segments (resize),
+       // this check may fail for Cloudberry. This is because gprestore 
assumes that the COPY
+       // command for a replicated table returns the total number of rows 
copied across all segments
+       // (N * rows_per_segment) and divides the result by N to get the actual 
row count.
+       // However, due to Cloudberry issue 
https://github.com/apache/cloudberry/issues/1298,
+       // its COPY ON SEGMENT command returns only the base row count 
(rows_per_segment).
+       // The subsequent division in gprestore leads to a miscalculated row 
count, causing this check to fail.
        err := CheckRowsRestored(numRowsRestored, numRowsBackedUp, tableName)
        if err != nil {
                gplog.Error(err.Error())


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to