This is an automated email from the ASF dual-hosted git repository.
gfphoenix78 pushed a commit to branch sync-with-upstream
in repository https://gitbox.apache.org/repos/asf/cloudberry-gpbackup.git
The following commit(s) were added to refs/heads/sync-with-upstream by this
push:
new 91a99d51 feat(backup): Add support for Cloudberry remote storage
objects (#16)
91a99d51 is described below
commit 91a99d518f279ba1f9d1fe80383542fa0c58f3be
Author: Robert Mu <[email protected]>
AuthorDate: Thu Aug 21 08:53:11 2025 +0800
feat(backup): Add support for Cloudberry remote storage objects (#16)
This commit introduces comprehensive support for backing up and restoring
Cloudberry-specific remote storage objects, including remote
tablespaces, storage servers, and storage user mappings.
Key changes include:
1.Generic Option Parsing Refactoring
2.Enhanced Tablespace Backup
3.Storage Server & User Mapping Support
4.Comprehensive Testing
---
backup/backup.go | 5 ++
backup/dependencies.go | 4 +
backup/metadata_globals.go | 125 ++++++++++++++++++++++++--
backup/metadata_globals_test.go | 70 ++++++++++++++-
backup/queries_acl.go | 4 +
backup/queries_globals.go | 109 +++++++++++++++++++++-
backup/wrappers.go | 16 ++++
integration/metadata_globals_create_test.go | 134 ++++++++++++++++++++++++++++
restore/restore.go | 3 +-
testutils/functions.go | 6 +-
toc/toc.go | 4 +
utils/util.go | 20 +++++
12 files changed, 487 insertions(+), 13 deletions(-)
diff --git a/backup/backup.go b/backup/backup.go
index cb3c83be..f6c5762f 100644
--- a/backup/backup.go
+++ b/backup/backup.go
@@ -231,6 +231,11 @@ func backupGlobals(metadataFile *utils.FileWithByteCount) {
backupDatabaseGUCs(metadataFile)
backupRoleGUCs(metadataFile)
+ if connectionPool.Version.IsCBDB() {
+ backupStorageServers(metadataFile)
+ backupStorageUserMappings(metadataFile)
+ }
+
logCompletionMessage("Global database metadata backup")
}
diff --git a/backup/dependencies.go b/backup/dependencies.go
index 81e81f5c..eac546bb 100644
--- a/backup/dependencies.go
+++ b/backup/dependencies.go
@@ -57,6 +57,10 @@ var (
PG_TYPE_OID uint32 = 1247
PG_USER_MAPPING_OID uint32 = 1418
+ // CBDB only
+ GP_STORAGE_USER_MAPPING_OID uint32 = 6131
+ GP_STORAGE_SERVER_OID uint32 = 6015
+
FIRST_NORMAL_OBJECT_ID uint32 = 16384
)
diff --git a/backup/metadata_globals.go b/backup/metadata_globals.go
index 8dfa2970..c200c8ef 100644
--- a/backup/metadata_globals.go
+++ b/backup/metadata_globals.go
@@ -2,6 +2,7 @@ package backup
import (
"fmt"
+ "sort"
"strconv"
"strings"
@@ -414,24 +415,132 @@ func PrintRoleMembershipStatements(metadataFile
*utils.FileWithByteCount, objToc
func PrintCreateTablespaceStatements(metadataFile *utils.FileWithByteCount,
objToc *toc.TOC, tablespaces []Tablespace, tablespaceMetadata MetadataMap) {
for _, tablespace := range tablespaces {
start := metadataFile.ByteCount
+
+ // Step 1: Use the utility function to parse options into a map.
+ optionsMap := utils.ParseOptions(tablespace.Options)
+
+ // Step 2: Determine the location string, handling remote
tablespaces.
locationStr := ""
- if tablespace.SegmentLocations == nil {
- locationStr = fmt.Sprintf("FILESPACE %s",
tablespace.FileLocation)
- } else if len(tablespace.SegmentLocations) == 0 {
- locationStr = fmt.Sprintf("LOCATION %s",
tablespace.FileLocation)
+ if remotePath, ok := optionsMap["path"]; ok {
+ locationStr = fmt.Sprintf("LOCATION %s", remotePath)
+ delete(optionsMap, "path")
} else {
- locationStr = fmt.Sprintf("LOCATION %s\n\tWITH (%s)",
tablespace.FileLocation, strings.Join(tablespace.SegmentLocations, ", "))
+ // Fallback to standard tablespace location logic.
+ if tablespace.SegmentLocations == nil {
+ locationStr = fmt.Sprintf("FILESPACE %s",
tablespace.FileLocation)
+ } else if len(tablespace.SegmentLocations) == 0 {
+ locationStr = fmt.Sprintf("LOCATION %s",
tablespace.FileLocation)
+ } else {
+ locationStr = fmt.Sprintf("LOCATION %s\n\tWITH
(%s)", tablespace.FileLocation, strings.Join(tablespace.SegmentLocations, ", "))
+ }
+ }
+
+ // Step 3: Separate special options for CREATE: 'server' and
'storage'.
+ server, hasServer := optionsMap["server"]
+ if hasServer {
+ delete(optionsMap, "server")
+ }
+
+ withOptions := []string{}
+ if storageVal, ok := optionsMap["storage"]; ok {
+ withOptions = append(withOptions, fmt.Sprintf("storage
= %s", storageVal))
+ delete(optionsMap, "storage")
+ }
+
+ // Step 4: Rebuild the remaining options string for the ALTER
statement.
+ alterOptionsKeys := make([]string, 0, len(optionsMap))
+ for k := range optionsMap {
+ alterOptionsKeys = append(alterOptionsKeys, k)
}
- metadataFile.MustPrintf("\n\nCREATE TABLESPACE %s %s;",
tablespace.Tablespace, locationStr)
+ sort.Strings(alterOptionsKeys)
+
+ alterOptions := make([]string, len(alterOptionsKeys))
+ for i, k := range alterOptionsKeys {
+ alterOptions[i] = fmt.Sprintf("%s = %s", k,
optionsMap[k])
+ }
+ alterOptionsStr := strings.Join(alterOptions, ", ")
+
+ // Step 5: Construct and print the CREATE TABLESPACE statement.
+ metadataFile.MustPrintf("\n\nCREATE TABLESPACE %s %s",
tablespace.Tablespace, locationStr)
+ // Add WITH clause only for options like 'storage', assuming it
doesn't conflict with segment location WITH.
+ if len(withOptions) > 0 {
+ metadataFile.MustPrintf(" WITH (%s)",
strings.Join(withOptions, ", "))
+ }
+
+ if hasServer {
+ metadataFile.MustPrintf(" SERVER %s", server)
+ if tablespace.Spcfilehandlerbin != "" &&
tablespace.Spcfilehandlersrc != "" {
+ metadataFile.MustPrintf(" HANDLER '%s, %s'",
tablespace.Spcfilehandlerbin, tablespace.Spcfilehandlersrc)
+ }
+ }
+ metadataFile.MustPrintf(";")
section, entry := tablespace.GetMetadataEntry()
objToc.AddMetadataEntry(section, entry, start,
metadataFile.ByteCount, []uint32{0, 0})
- if tablespace.Options != "" {
+ // Step 6: If there are any remaining "alterable" options,
print the ALTER TABLESPACE statement.
+ if alterOptionsStr != "" {
start = metadataFile.ByteCount
- metadataFile.MustPrintf("\n\nALTER TABLESPACE %s SET
(%s);\n", tablespace.Tablespace, tablespace.Options)
+ metadataFile.MustPrintf("\n\nALTER TABLESPACE %s SET
(%s);\n", tablespace.Tablespace, alterOptionsStr)
objToc.AddMetadataEntry(section, entry, start,
metadataFile.ByteCount, []uint32{0, 0})
}
PrintObjectMetadata(metadataFile, objToc,
tablespaceMetadata[tablespace.GetUniqueID()], tablespace, "", []uint32{0, 0})
}
}
+
+func buildStorageOptionsString(optionsMap map[string]string) string {
+ // Sort the keys for deterministic, consistent backup output.
+ keys := make([]string, 0, len(optionsMap))
+ for k := range optionsMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ optionParts := make([]string, len(keys))
+ for i, k := range keys {
+ // Assuming all option values should be single-quoted in the
DDL.
+ optionParts[i] = fmt.Sprintf("%s '%s'", k, optionsMap[k])
+ }
+ return strings.Join(optionParts, ", ")
+}
+
+func PrintCreateStorageServerStatements(metadataFile *utils.FileWithByteCount,
toc *toc.TOC, servers []StorageServer, serverMetadata MetadataMap) {
+ for _, server := range servers {
+ start := metadataFile.ByteCount
+
+ optionsMap := utils.ParseOptions(server.ServerOptions)
+ if len(optionsMap) > 0 {
+ optionsStr := buildStorageOptionsString(optionsMap)
+ metadataFile.MustPrintf("\n\nCREATE STORAGE SERVER %s
OPTIONS(%s);", server.Server, optionsStr)
+ } else {
+ // It's possible for a storage server to have no
options, though unlikely.
+ metadataFile.MustPrintf("\n\nCREATE STORAGE SERVER
%s;", server.Server)
+ }
+
+ section, entry := server.GetMetadataEntry()
+ toc.AddMetadataEntry(section, entry, start,
metadataFile.ByteCount, []uint32{0, 0})
+
+ // TODO: Re-enable metadata printing for STORAGE SERVER once
Cloudberry supports
+ // `ALTER STORAGE SERVER ... OWNER TO ...` and `COMMENT ON
STORAGE SERVER ...`.
+ // These DDLs currently cause a syntax error.
+ //PrintObjectMetadata(metadataFile, toc,
serverMetadata[server.GetUniqueID()], server, "", []uint32{0, 0})
+ }
+}
+
+func PrintCreateStorageUserMappingStatements(metadataFile
*utils.FileWithByteCount, toc *toc.TOC, users []StorageUserMapping) {
+ for _, user := range users {
+ start := metadataFile.ByteCount
+
+ optionsMap := utils.ParseOptions(user.Options)
+ if len(optionsMap) > 0 {
+ optionsStr := buildStorageOptionsString(optionsMap)
+ metadataFile.MustPrintf("\n\nCREATE STORAGE USER
MAPPING FOR %s STORAGE SERVER %s OPTIONS (%s);", user.User, user.Server,
optionsStr)
+ } else {
+ // A user mapping without options may also be possible.
+ metadataFile.MustPrintf("\n\nCREATE STORAGE USER
MAPPING FOR %s STORAGE SERVER %s;", user.User, user.Server)
+ }
+
+ section, entry := user.GetMetadataEntry()
+ toc.AddMetadataEntry(section, entry, start,
metadataFile.ByteCount, []uint32{0, 0})
+ }
+}
diff --git a/backup/metadata_globals_test.go b/backup/metadata_globals_test.go
index e32adf0c..7e223877 100644
--- a/backup/metadata_globals_test.go
+++ b/backup/metadata_globals_test.go
@@ -465,7 +465,75 @@ GRANT ALL ON TABLESPACE test_tablespace TO testrole;`,
testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "",
"test_tablespace", toc.OBJ_TABLESPACE)
testutils.ExpectEntry(tocfile.GlobalEntries, 1, "", "",
"test_tablespace", toc.OBJ_TABLESPACE)
expectedStatements := []string{`CREATE TABLESPACE
test_tablespace LOCATION '/data/dir';`,
- `ALTER TABLESPACE test_tablespace SET
(param1=val1, param2=val2);`}
+ `ALTER TABLESPACE test_tablespace SET (param1 =
val1, param2 = val2);`}
+ testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, expectedStatements...)
+ })
+ It("prints a remote tablespace with server, path and handler
options", func() {
+ expectedTablespace := backup.Tablespace{
+ Oid: 1,
+ Tablespace: "remote_ts",
+ FileLocation: "", // Ignored when path is
present
+ Options: "server=s3_server,
path='/bucket/path', random_page_cost=4",
+ Spcfilehandlerbin: "$libdir/dfs_tablespace",
+ Spcfilehandlersrc: "remote_file_handler",
+ }
+ emptyMetadataMap := backup.MetadataMap{}
+ backup.PrintCreateTablespaceStatements(backupfile,
tocfile, []backup.Tablespace{expectedTablespace}, emptyMetadataMap)
+
+ expectedStatements := []string{
+ `CREATE TABLESPACE remote_ts LOCATION
'/bucket/path' SERVER s3_server HANDLER '$libdir/dfs_tablespace,
remote_file_handler';`,
+ `ALTER TABLESPACE remote_ts SET
(random_page_cost = 4);`,
+ }
+ testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, expectedStatements...)
+ })
+ })
+
+ Describe("PrintCreateStorageServerStatements", func() {
+ It("prints a storage server without options", func() {
+ expectedServer := backup.StorageServer{Oid: 1, Server:
"test_server"}
+ emptyMetadataMap := backup.MetadataMap{}
+
+ backup.PrintCreateStorageServerStatements(backupfile,
tocfile, []backup.StorageServer{expectedServer}, emptyMetadataMap)
+
+ testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, `CREATE STORAGE SERVER test_server;`)
+ })
+
+ It("prints a storage server with options", func() {
+ expectedServer := backup.StorageServer{
+ Oid: 1,
+ Server: "test_server",
+ ServerOptions: "protocol=s3, region=us-east-1,
endpoint=s3.example.com",
+ }
+ emptyMetadataMap := backup.MetadataMap{}
+
+ backup.PrintCreateStorageServerStatements(backupfile,
tocfile, []backup.StorageServer{expectedServer}, emptyMetadataMap)
+
+ // Note: The order is deterministic because we sort the
keys.
+ expectedStatements := []string{`CREATE STORAGE SERVER
test_server OPTIONS(endpoint 's3.example.com', protocol 's3', region
'us-east-1');`}
+ testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, expectedStatements...)
+ })
+ })
+
+ Describe("PrintCreateStorageUserMappingStatements", func() {
+ It("prints a storage user mapping without options", func() {
+ expectedUserMapping := backup.StorageUserMapping{Oid:
1, User: "testrole", Server: "test_server"}
+
backup.PrintCreateStorageUserMappingStatements(backupfile, tocfile,
[]backup.StorageUserMapping{expectedUserMapping})
+
+ testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, `CREATE STORAGE USER MAPPING FOR testrole STORAGE SERVER test_server;`)
+ })
+
+ It("prints a storage user mapping with options", func() {
+ expectedUserMapping := backup.StorageUserMapping{
+ Oid: 1,
+ User: "testrole",
+ Server: "test_server",
+ Options: "secretkey=mysecret, accesskey=mykey",
+ }
+
+
backup.PrintCreateStorageUserMappingStatements(backupfile, tocfile,
[]backup.StorageUserMapping{expectedUserMapping})
+
+ // Note: The order is deterministic because we sort the
keys.
+ expectedStatements := []string{`CREATE STORAGE USER
MAPPING FOR testrole STORAGE SERVER test_server OPTIONS (accesskey 'mykey',
secretkey 'mysecret');`}
testutils.AssertBufferContents(tocfile.GlobalEntries,
buffer, expectedStatements...)
})
})
diff --git a/backup/queries_acl.go b/backup/queries_acl.go
index 06cfa54f..4163cb65 100644
--- a/backup/queries_acl.go
+++ b/backup/queries_acl.go
@@ -68,6 +68,7 @@ var (
TYPE_TRIGGER MetadataQueryParams
TYPE_TYPE MetadataQueryParams
TYPE_POLICY MetadataQueryParams
+ TYPE_STORAGE_SERVER MetadataQueryParams // CBDB only
)
func InitializeMetadataParams(connectionPool *dbconn.DBConn) {
@@ -118,6 +119,9 @@ func InitializeMetadataParams(connectionPool
*dbconn.DBConn) {
if (connectionPool.Version.IsGPDB() &&
connectionPool.Version.AtLeast("6")) || connectionPool.Version.IsCBDB() {
TYPE_TYPE.ACLField = "typacl"
}
+
+ // CBDB only
+ TYPE_STORAGE_SERVER = MetadataQueryParams{ObjectType:
toc.OBJ_STORAGE_SERVER, NameField: "srvname", OidField: "oid", ACLField:
"srvacl", OwnerField: "srvowner", CatalogTable: "gp_storage_server", Shared:
true}
}
type MetadataQueryStruct struct {
diff --git a/backup/queries_globals.go b/backup/queries_globals.go
index a387b2e1..9be9d49d 100644
--- a/backup/queries_globals.go
+++ b/backup/queries_globals.go
@@ -551,6 +551,9 @@ type Tablespace struct {
FileLocation string // FILESPACE in 5, LOCATION in 6 and later
SegmentLocations []string
Options string
+ // CBDB only
+ Spcfilehandlerbin string
+ Spcfilehandlersrc string
}
func (t Tablespace) GetMetadataEntry() (string, toc.MetadataEntry) {
@@ -591,17 +594,36 @@ func GetTablespaces(connectionPool *dbconn.DBConn)
[]Tablespace {
WHERE spcname != 'pg_default'
AND spcname != 'pg_global'`
+ cbdbQuery := `
+ SELECT oid,
+ quote_ident(spcname) AS tablespace,
+ CASE
+ WHEN spcfilehandlerbin IS NOT NULL THEN ''
+ ELSE '''' ||
pg_catalog.pg_tablespace_location(oid)::text || ''''
+ END AS filelocation,
+ coalesce(array_to_string(spcoptions, ', '), '') AS options,
+ coalesce(spcfilehandlerbin, '') AS spcfilehandlerbin,
+ coalesce(spcfilehandlersrc, '') AS spcfilehandlersrc
+ FROM pg_tablespace
+ WHERE spcname != 'pg_default'
+ AND spcname != 'pg_global'`
+
results := make([]Tablespace, 0)
var err error
if connectionPool.Version.IsGPDB() &&
connectionPool.Version.Before("6") {
err = connectionPool.Select(&results, before6Query)
+ } else if connectionPool.Version.IsCBDB() {
+ err = connectionPool.Select(&results, cbdbQuery)
} else {
err = connectionPool.Select(&results, atLeast6Query)
+ }
+ gplog.FatalOnError(err)
+
+ if (connectionPool.Version.IsGPDB() &&
connectionPool.Version.AtLeast("6")) || connectionPool.Version.IsCBDB() {
for i := 0; i < len(results); i++ {
results[i].SegmentLocations =
GetSegmentTablespaces(connectionPool, results[i].Oid)
}
}
- gplog.FatalOnError(err)
return results
}
@@ -624,3 +646,88 @@ func GetDBSize(connectionPool *dbconn.DBConn) string {
gplog.FatalOnError(err)
return size.DBSize
}
+
+type StorageUserMapping struct {
+ Oid uint32
+ User string
+ Server string
+ Options string
+}
+
+func (sum StorageUserMapping) GetMetadataEntry() (string, toc.MetadataEntry) {
+ return "global",
+ toc.MetadataEntry{
+ Schema: "",
+ Name: sum.FQN(),
+ ObjectType: toc.OBJ_STORAGE_USER_MAPPING,
+ ReferenceObject: "",
+ StartByte: 0,
+ EndByte: 0,
+ }
+}
+
+func (sum StorageUserMapping) GetUniqueID() UniqueID {
+ return UniqueID{ClassID: GP_STORAGE_USER_MAPPING_OID, Oid: sum.Oid}
+}
+
+func (sum StorageUserMapping) FQN() string {
+ return fmt.Sprintf("%s ON %s", sum.User, sum.Server)
+}
+
+func GetStorageUserMapping(connectionPool *dbconn.DBConn) []StorageUserMapping
{
+ usersQuery := `SELECT
+ u.oid as Oid,
+ quote_ident(pg_get_userbyid(u.umuser)) as User,
+ quote_ident(s.srvname) as Server,
+ coalesce(array_to_string(u.umoptions, ', '), '') AS options
+ FROM gp_storage_user_mapping u join gp_storage_server s on
u.umserver = s.oid`
+
+ users := make([]StorageUserMapping, 0)
+ if err := connectionPool.Select(&users, usersQuery); err != nil {
+ gplog.FatalOnError(err)
+ }
+ return users
+}
+
+type StorageServer struct {
+ Oid uint32
+ Server string
+ ServerOwner string
+ ServerOptions string
+}
+
+func (ss StorageServer) GetMetadataEntry() (string, toc.MetadataEntry) {
+ return "global",
+ toc.MetadataEntry{
+ Schema: "",
+ Name: ss.FQN(),
+ ObjectType: toc.OBJ_STORAGE_SERVER,
+ ReferenceObject: "",
+ StartByte: 0,
+ EndByte: 0,
+ }
+}
+
+func (ss StorageServer) GetUniqueID() UniqueID {
+ return UniqueID{ClassID: GP_STORAGE_SERVER_OID, Oid: ss.Oid}
+}
+
+func (ss StorageServer) FQN() string {
+ return ss.Server
+}
+
+func GetStorageServers(connectionPool *dbconn.DBConn) []StorageServer {
+ serversQuery := `SELECT
+ s.oid as Oid,
+ quote_ident(s.srvname) as Server,
+ quote_ident(pg_get_userbyid(s.srvowner)) as ServerOwner,
+ coalesce(array_to_string(s.srvoptions, ', '), '') AS
Serveroptions
+ FROM gp_storage_server s
+ where srvname != 'local_server'`
+
+ servers := make([]StorageServer, 0)
+ if err := connectionPool.Select(&servers, serversQuery); err != nil {
+ gplog.FatalOnError(err)
+ }
+ return servers
+}
diff --git a/backup/wrappers.go b/backup/wrappers.go
index 4516a21e..1898897a 100644
--- a/backup/wrappers.go
+++ b/backup/wrappers.go
@@ -762,3 +762,19 @@ func backupIncrementalMetadata() {
aoTableEntries := GetAOIncrementalMetadata(connectionPool)
globalTOC.IncrementalMetadata.AO = aoTableEntries
}
+
+func backupStorageServers(metadataFile *utils.FileWithByteCount) {
+ gplog.Verbose("Writing CREATE SERVER statements to metadata file")
+ servers := GetStorageServers(connectionPool)
+ objectCounts[toc.OBJ_STORAGE_SERVER] = len(servers)
+ serverMetadata := GetMetadataForObjectType(connectionPool,
TYPE_STORAGE_SERVER)
+ PrintCreateStorageServerStatements(metadataFile, globalTOC, servers,
serverMetadata)
+}
+
+func backupStorageUserMappings(metadataFile *utils.FileWithByteCount) {
+ gplog.Verbose("Writing CREATE STORAGE USER MAPPING statements to
metadata file")
+ users := GetStorageUserMapping(connectionPool)
+ objectCounts[toc.OBJ_STORAGE_USER_MAPPING] = len(users)
+
+ PrintCreateStorageUserMappingStatements(metadataFile, globalTOC, users)
+}
diff --git a/integration/metadata_globals_create_test.go
b/integration/metadata_globals_create_test.go
index 2c6a2032..f3bda727 100644
--- a/integration/metadata_globals_create_test.go
+++ b/integration/metadata_globals_create_test.go
@@ -589,5 +589,139 @@ var _ = Describe("backup integration create statement
tests", func() {
}
Fail("Tablespace 'test_tablespace' was not created")
})
+
+ It("creates a basic remote tablespace", func() {
+ // TODO: Re-enable this test once Cloudberry natively
supports remote tablespaces.
+ // This test is skipped because creating a remote
tablespace correctly requires a
+ // specific database extension to be installed. That
extension intercepts the
+ // 'CREATE TABLESPACE' command to prevent the database
from checking for a local
+ // directory, which does not exist for a remote
tablespace and would otherwise
+ // cause an error.
+ Skip("Skipping remote tablespace test: requires a
specific database plugin not present in the test environment.")
+
+ // The test logic below is preserved for when this test
can be re-enabled.
+ if !connectionPool.Version.IsCBDB() {
+ Skip("Test is for CBDB remote tablespaces only")
+ }
+ testhelper.AssertQueryRuns(connectionPool, `CREATE
STORAGE SERVER test_server OPTIONS(protocol 's3', endpoint 's3.example.com')`)
+ defer testhelper.AssertQueryRuns(connectionPool, `DROP
STORAGE SERVER test_server`)
+
+ tablespaceToCreate := backup.Tablespace{
+ Tablespace:
"test_remote_tablespace_basic",
+ Options: "server=test_server,
path='/test/path', random_page_cost=4",
+ Spcfilehandlerbin: "$libdir/dfs_tablespace",
+ Spcfilehandlersrc: "remote_file_handler",
+ }
+ emptyMetadataMap := backup.MetadataMap{}
+ numTablespaces :=
len(backup.GetTablespaces(connectionPool))
+
+ backup.PrintCreateTablespaceStatements(backupfile,
tocfile, []backup.Tablespace{tablespaceToCreate}, emptyMetadataMap)
+
+ gbuffer := BufferWithBytes([]byte(buffer.String()))
+ entries, _ :=
testutils.SliceBufferByEntries(tocfile.GlobalEntries, gbuffer)
+ for _, entry := range entries {
+ testhelper.AssertQueryRuns(connectionPool,
entry)
+ }
+ defer testhelper.AssertQueryRuns(connectionPool, "DROP
TABLESPACE test_remote_tablespace_basic")
+
+ resultTablespaces :=
backup.GetTablespaces(connectionPool)
+ Expect(resultTablespaces).To(HaveLen(numTablespaces +
1))
+
+ var resultTablespace backup.Tablespace
+ for _, ts := range resultTablespaces {
+ if ts.Tablespace ==
"test_remote_tablespace_basic" {
+ resultTablespace = ts
+ break
+ }
+ }
+ if resultTablespace.Tablespace == "" {
+ Fail("Tablespace 'test_remote_tablespace_basic'
was not created")
+ }
+
+
Expect(resultTablespace.Options).To(ContainSubstring("server=test_server"))
+
Expect(resultTablespace.Options).To(ContainSubstring("path='/test/path'"))
+
Expect(resultTablespace.Options).To(ContainSubstring("random_page_cost=4"))
+
Expect(resultTablespace.Spcfilehandlerbin).To(Equal(tablespaceToCreate.Spcfilehandlerbin))
+
Expect(resultTablespace.Spcfilehandlersrc).To(Equal(tablespaceToCreate.Spcfilehandlersrc))
+ Expect(resultTablespace.FileLocation).To(BeEmpty())
+ })
+ })
+
+ Describe("PrintCreateStorageServerStatements", func() {
+ It("creates a basic storage server with owner and comment",
func() {
+ if !connectionPool.Version.IsCBDB() {
+ Skip("Test is for CBDB storage servers only")
+ }
+
+ serverToCreate := backup.StorageServer{
+ Oid: 1,
+ Server: "test_server",
+ ServerOptions: "protocol=s3, region=us-east-1,
endpoint=s3.example.com",
+ }
+ serverMetadataMap :=
testutils.DefaultMetadataMap(toc.OBJ_STORAGE_SERVER, false, true, true, false)
+ serverMetadata :=
serverMetadataMap[serverToCreate.GetUniqueID()]
+ serverMetadata.Owner = "testrole"
+ numServers :=
len(backup.GetStorageServers(connectionPool))
+
+ backup.PrintCreateStorageServerStatements(backupfile,
tocfile, []backup.StorageServer{serverToCreate}, serverMetadataMap)
+ testhelper.AssertQueryRuns(connectionPool,
buffer.String())
+ defer testhelper.AssertQueryRuns(connectionPool, "DROP
STORAGE SERVER test_server")
+
+ resultServers :=
backup.GetStorageServers(connectionPool)
+ Expect(resultServers).To(HaveLen(numServers + 1))
+ var resultServer backup.StorageServer
+ for _, srv := range resultServers {
+ if srv.Server == "test_server" {
+ resultServer = srv
+ break
+ }
+ }
+ if resultServer.Server == "" {
+ Fail("Storage Server 'test_server' was not
created")
+ }
+
Expect(resultServer.ServerOptions).To(ContainSubstring("protocol=s3"))
+
Expect(resultServer.ServerOptions).To(ContainSubstring("region=us-east-1"))
+
Expect(resultServer.ServerOptions).To(ContainSubstring("endpoint=s3.example.com"))
+ })
+ })
+
+ Describe("PrintCreateStorageUserMappingStatements", func() {
+ It("creates a basic storage user mapping with a comment",
func() {
+ if !connectionPool.Version.IsCBDB() {
+ Skip("Test is for CBDB storage user mappings
only")
+ }
+
+ testhelper.AssertQueryRuns(connectionPool, `CREATE
STORAGE SERVER test_server_for_mapping OPTIONS(protocol 's3', endpoint
's3.example.com')`)
+ defer testhelper.AssertQueryRuns(connectionPool, `DROP
STORAGE SERVER test_server_for_mapping`)
+
+ mappingToCreate := backup.StorageUserMapping{
+ Oid: 1,
+ User: "testrole",
+ Server: "test_server_for_mapping",
+ Options: "accesskey=mykey, secretkey=mysecret",
+ }
+ numMappings :=
len(backup.GetStorageUserMapping(connectionPool))
+
+
backup.PrintCreateStorageUserMappingStatements(backupfile, tocfile,
[]backup.StorageUserMapping{mappingToCreate})
+
+ testhelper.AssertQueryRuns(connectionPool,
buffer.String())
+ defer testhelper.AssertQueryRuns(connectionPool, "DROP
STORAGE USER MAPPING FOR testrole STORAGE SERVER test_server_for_mapping")
+
+ resultMappings :=
backup.GetStorageUserMapping(connectionPool)
+ Expect(resultMappings).To(HaveLen(numMappings + 1))
+ var resultMapping backup.StorageUserMapping
+ for _, m := range resultMappings {
+ if m.User == "testrole" && m.Server ==
"test_server_for_mapping" {
+ resultMapping = m
+ break
+ }
+ }
+ if resultMapping.User == "" {
+ Fail("Storage User Mapping for 'testrole' on
'test_server_for_mapping' was not created")
+ }
+
Expect(resultMapping.Options).To(ContainSubstring("accesskey=mykey"))
+
Expect(resultMapping.Options).To(ContainSubstring("secretkey=mysecret"))
+
+ })
})
})
diff --git a/restore/restore.go b/restore/restore.go
index 72878e1a..6fcb3abe 100644
--- a/restore/restore.go
+++ b/restore/restore.go
@@ -209,7 +209,8 @@ func createDatabase(metadataFilename string) {
func restoreGlobal(metadataFilename string) {
objectTypes := []string{toc.OBJ_SESSION_GUC, toc.OBJ_DATABASE_GUC,
toc.OBJ_DATABASE_METADATA,
- toc.OBJ_RESOURCE_QUEUE, toc.OBJ_RESOURCE_GROUP, toc.OBJ_ROLE,
toc.OBJ_ROLE_GUC, toc.OBJ_ROLE_GRANT, toc.OBJ_TABLESPACE}
+ toc.OBJ_RESOURCE_QUEUE, toc.OBJ_RESOURCE_GROUP, toc.OBJ_ROLE,
toc.OBJ_ROLE_GUC, toc.OBJ_ROLE_GRANT, toc.OBJ_TABLESPACE,
+ toc.OBJ_STORAGE_USER_MAPPING, toc.OBJ_STORAGE_SERVER}
if MustGetFlagBool(options.CREATE_DB) {
objectTypes = append(objectTypes, toc.OBJ_DATABASE)
}
diff --git a/testutils/functions.go b/testutils/functions.go
index b4586e3b..416f1114 100644
--- a/testutils/functions.go
+++ b/testutils/functions.go
@@ -253,6 +253,8 @@ var objNameToClassID = map[string]uint32{
toc.OBJ_RULE: 2618,
toc.OBJ_SCHEMA: 2615,
toc.OBJ_SEQUENCE: 1259,
+ toc.OBJ_STORAGE_SERVER: 6015,
+ toc.OBJ_STORAGE_USER_MAPPING: 6131,
toc.OBJ_TABLE: 1259,
toc.OBJ_TABLESPACE: 1213,
toc.OBJ_TEXT_SEARCH_CONFIGURATION: 3602,
@@ -280,7 +282,7 @@ func DefaultACLForType(grantee string, objType string)
backup.ACL {
Truncate: objType == toc.OBJ_TABLE || objType == toc.OBJ_VIEW
|| objType == toc.OBJ_MATERIALIZED_VIEW,
References: objType == toc.OBJ_TABLE || objType == toc.OBJ_VIEW
|| objType == toc.OBJ_FOREIGN_TABLE || objType == toc.OBJ_MATERIALIZED_VIEW,
Trigger: objType == toc.OBJ_TABLE || objType == toc.OBJ_VIEW
|| objType == toc.OBJ_FOREIGN_TABLE || objType == toc.OBJ_MATERIALIZED_VIEW,
- Usage: objType == toc.OBJ_LANGUAGE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_SEQUENCE || objType ==
toc.OBJ_FOREIGN_DATA_WRAPPER || objType == toc.OBJ_FOREIGN_SERVER,
+ Usage: objType == toc.OBJ_LANGUAGE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_SEQUENCE || objType ==
toc.OBJ_FOREIGN_DATA_WRAPPER || objType == toc.OBJ_FOREIGN_SERVER || objType ==
toc.OBJ_STORAGE_SERVER,
Execute: objType == toc.OBJ_FUNCTION || objType ==
toc.OBJ_AGGREGATE,
Create: objType == toc.OBJ_DATABASE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_TABLESPACE,
Temporary: objType == toc.OBJ_DATABASE,
@@ -298,7 +300,7 @@ func DefaultACLForTypeWithGrant(grantee string, objType
string) backup.ACL {
TruncateWithGrant: objType == toc.OBJ_TABLE || objType ==
toc.OBJ_VIEW || objType == toc.OBJ_MATERIALIZED_VIEW,
ReferencesWithGrant: objType == toc.OBJ_TABLE || objType ==
toc.OBJ_VIEW || objType == toc.OBJ_MATERIALIZED_VIEW,
TriggerWithGrant: objType == toc.OBJ_TABLE || objType ==
toc.OBJ_VIEW || objType == toc.OBJ_MATERIALIZED_VIEW,
- UsageWithGrant: objType == toc.OBJ_LANGUAGE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_SEQUENCE || objType ==
toc.OBJ_FOREIGN_DATA_WRAPPER || objType == toc.OBJ_FOREIGN_SERVER,
+ UsageWithGrant: objType == toc.OBJ_LANGUAGE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_SEQUENCE || objType ==
toc.OBJ_FOREIGN_DATA_WRAPPER || objType == toc.OBJ_FOREIGN_SERVER || objType ==
toc.OBJ_STORAGE_SERVER,
ExecuteWithGrant: objType == toc.OBJ_FUNCTION,
CreateWithGrant: objType == toc.OBJ_DATABASE || objType ==
toc.OBJ_SCHEMA || objType == toc.OBJ_TABLESPACE,
TemporaryWithGrant: objType == toc.OBJ_DATABASE,
diff --git a/toc/toc.go b/toc/toc.go
index 18a1de81..7ca8eece 100644
--- a/toc/toc.go
+++ b/toc/toc.go
@@ -120,6 +120,10 @@ const (
OBJ_TYPE = "TYPE"
OBJ_USER_MAPPING = "USER MAPPING"
OBJ_VIEW = "VIEW"
+
+ // CBDB only
+ OBJ_STORAGE_SERVER = "STORAGE SERVER"
+ OBJ_STORAGE_USER_MAPPING = "STORAGE USER MAPPING"
)
func NewTOC(filename string) *TOC {
diff --git a/utils/util.go b/utils/util.go
index 9ecd4855..9da47572 100644
--- a/utils/util.go
+++ b/utils/util.go
@@ -304,3 +304,23 @@ func GetFileHash(filename string) ([32]byte, error) {
}
return filehash, nil
}
+
+/*
+ * Parses a string of the form "key1=val1, key2=val2, ..." into a map.
+ * This is useful for parsing options columns from PostgreSQL/GreenplumDB.
+ */
+func ParseOptions(optionsStr string) map[string]string {
+ optionsMap := make(map[string]string)
+ if optionsStr == "" {
+ return optionsMap
+ }
+
+ pairs := strings.Split(optionsStr, ", ")
+ for _, pair := range pairs {
+ parts := strings.SplitN(pair, "=", 2)
+ if len(parts) == 2 {
+ optionsMap[parts[0]] = parts[1]
+ }
+ }
+ return optionsMap
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]