On Wed, Mar 24, 2021 at 12:05:27PM -0400, Jan Wieck wrote:
> On 3/24/21 12:04 PM, Jan Wieck wrote:
>> In any case I changed the options so that they behave the same way, the
>> existing -o and -O (for old/new postmaster options) work. I don't think
>> it would be wise to have option forwarding work differently between
>> options for postmaster and options for pg_dump/pg_restore.
>
> Attaching the actual diff might help.
I'd like to revive this thread, so I've created a commitfest entry [0] and
attached a hastily rebased patch that compiles and passes the tests. I am
aiming to spend some more time on this in the near future.
[0] https://commitfest.postgresql.org/39/3841/
--
Nathan Bossart
Amazon Web Services: https://aws.amazon.com
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index c8a70d9bc1..faf1953e18 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -858,6 +858,11 @@ RunWorker(ArchiveHandle *AH, ParallelSlot *slot)
*/
WaitForCommands(AH, pipefd);
+ /*
+ * Close an eventually open BLOB batch transaction.
+ */
+ CommitBlobTransaction((Archive *)AH);
+
/*
* Disconnect from database and clean up.
*/
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index fcc5f6bd05..f16ecdecc0 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -220,6 +220,8 @@ typedef struct Archive
int numWorkers; /* number of parallel processes */
char *sync_snapshot_id; /* sync snapshot id for parallel operation */
+ int blobBatchSize; /* # of blobs to restore per transaction */
+
/* info needed for string escaping */
int encoding; /* libpq code for client_encoding */
bool std_strings; /* standard_conforming_strings */
@@ -290,6 +292,7 @@ extern void WriteData(Archive *AH, const void *data, size_t dLen);
extern int StartBlob(Archive *AH, Oid oid);
extern int EndBlob(Archive *AH, Oid oid);
+extern void CommitBlobTransaction(Archive *AH);
extern void CloseArchive(Archive *AH);
extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 233198afc0..7cfbed5e75 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -68,6 +68,7 @@ typedef struct _parallelReadyList
bool sorted; /* are valid entries currently sorted? */
} ParallelReadyList;
+static int blobBatchCount = 0;
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
const int compression, bool dosync, ArchiveMode mode,
@@ -266,6 +267,8 @@ CloseArchive(Archive *AHX)
int res = 0;
ArchiveHandle *AH = (ArchiveHandle *) AHX;
+ CommitBlobTransaction(AHX);
+
AH->ClosePtr(AH);
/* Close the output */
@@ -279,6 +282,23 @@ CloseArchive(Archive *AHX)
pg_fatal("could not close output file: %m");
}
+/* Public */
+void
+CommitBlobTransaction(Archive *AHX)
+{
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
+
+ if (blobBatchCount > 0)
+ {
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "-- End BLOB restore batch\n");
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "COMMIT;\n\n");
+
+ blobBatchCount = 0;
+ }
+}
+
/* Public */
void
SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
@@ -3489,6 +3509,57 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
{
RestoreOptions *ropt = AH->public.ropt;
+ /* We restore BLOBs in batches to reduce XID consumption */
+ if (strcmp(te->desc, "BLOB") == 0 && AH->public.blobBatchSize > 0)
+ {
+ if (blobBatchCount > 0)
+ {
+ /* We are inside a BLOB restore transaction */
+ if (blobBatchCount >= AH->public.blobBatchSize)
+ {
+ /*
+ * We did reach the batch size with the previous BLOB.
+ * Commit and start a new batch.
+ */
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "-- BLOB batch size reached\n");
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "COMMIT;\n");
+ ahprintf(AH, "BEGIN;\n\n");
+
+ blobBatchCount = 1;
+ }
+ else
+ {
+ /* This one still fits into the current batch */
+ blobBatchCount++;
+ }
+ }
+ else
+ {
+ /* Not inside a transaction, start a new batch */
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "-- Start BLOB restore batch\n");
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "BEGIN;\n\n");
+
+ blobBatchCount = 1;
+ }
+ }
+ else
+ {
+ /* Not a BLOB. If we have a BLOB batch open, close it. */
+ if (blobBatchCount > 0)
+ {
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "-- End BLOB restore batch\n");
+ ahprintf(AH, "--\n");
+ ahprintf(AH, "COMMIT;\n\n");
+
+ blobBatchCount = 0;
+ }
+ }
+
/* Select owner, schema, tablespace and default AM as necessary */
_becomeOwner(AH, te);
_selectOutputSchema(AH, te->namespace);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index d25709ad5f..17c0dd7f0c 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -196,11 +196,20 @@ static inline void dumpComment(Archive *fout, const char *type,
const char *name, const char *namespace,
const char *owner, CatalogId catalogId,
int subid, DumpId dumpId);
+static bool dumpCommentQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ const char *type, const char *name,
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId,
+ const char *initdb_comment);
static int findComments(Oid classoid, Oid objoid, CommentItem **items);
static void collectComments(Archive *fout);
static void dumpSecLabel(Archive *fout, const char *type, const char *name,
const char *namespace, const char *owner,
CatalogId catalogId, int subid, DumpId dumpId);
+static bool dumpSecLabelQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ const char *type, const char *name,
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId);
static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
static void collectSecLabels(Archive *fout);
static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
@@ -256,6 +265,12 @@ static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
const char *type, const char *name, const char *subname,
const char *nspname, const char *owner,
const DumpableAcl *dacl);
+static bool dumpACLQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ DumpId objDumpId, DumpId altDumpId,
+ const char *type, const char *name,
+ const char *subname,
+ const char *nspname, const char *owner,
+ const DumpableAcl *dacl);
static void getDependencies(Archive *fout);
static void BuildArchiveDependencies(Archive *fout);
@@ -3477,11 +3492,43 @@ dumpBlob(Archive *fout, const BlobInfo *binfo)
{
PQExpBuffer cquery = createPQExpBuffer();
PQExpBuffer dquery = createPQExpBuffer();
+ PQExpBuffer tag = createPQExpBuffer();
+ teSection section = SECTION_PRE_DATA;
appendPQExpBuffer(cquery,
"SELECT pg_catalog.lo_create('%s');\n",
binfo->dobj.name);
+ /*
+ * In binary upgrade mode we put all the queries to restore
+ * one large object into a single TOC entry and emit it as
+ * SECTION_DATA so that they can be restored in parallel.
+ */
+ if (fout->dopt->binary_upgrade)
+ {
+ section = SECTION_DATA;
+
+ /* Dump comment if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpCommentQuery(fout, cquery, tag, "LARGE OBJECT",
+ binfo->dobj.name, NULL, binfo->rolname,
+ binfo->dobj.catId, 0, binfo->dobj.dumpId, NULL);
+
+ /* Dump security label if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
+ dumpSecLabelQuery(fout, cquery, tag, "LARGE OBJECT",
+ binfo->dobj.name,
+ NULL, binfo->rolname,
+ binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+ /* Dump ACL if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_ACL)
+ dumpACLQuery(fout, cquery, tag,
+ binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+ binfo->dobj.name, NULL,
+ NULL, binfo->rolname, &binfo->dacl);
+ }
+
appendPQExpBuffer(dquery,
"SELECT pg_catalog.lo_unlink('%s');\n",
binfo->dobj.name);
@@ -3491,27 +3538,30 @@ dumpBlob(Archive *fout, const BlobInfo *binfo)
ARCHIVE_OPTS(.tag = binfo->dobj.name,
.owner = binfo->rolname,
.description = "BLOB",
- .section = SECTION_PRE_DATA,
+ .section = section,
.createStmt = cquery->data,
.dropStmt = dquery->data));
- /* Dump comment if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
- dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
- NULL, binfo->rolname,
- binfo->dobj.catId, 0, binfo->dobj.dumpId);
+ if (!fout->dopt->binary_upgrade)
+ {
+ /* Dump comment if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
+ NULL, binfo->rolname,
+ binfo->dobj.catId, 0, binfo->dobj.dumpId);
- /* Dump security label if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
- dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
- NULL, binfo->rolname,
- binfo->dobj.catId, 0, binfo->dobj.dumpId);
+ /* Dump security label if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
+ dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
+ NULL, binfo->rolname,
+ binfo->dobj.catId, 0, binfo->dobj.dumpId);
- /* Dump ACL if any */
- if (binfo->dobj.dump & DUMP_COMPONENT_ACL)
- dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
- binfo->dobj.name, NULL,
- NULL, binfo->rolname, &binfo->dacl);
+ /* Dump ACL if any */
+ if (binfo->dobj.dump & DUMP_COMPONENT_ACL)
+ dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+ binfo->dobj.name, NULL,
+ NULL, binfo->rolname, &binfo->dacl);
+ }
destroyPQExpBuffer(cquery);
destroyPQExpBuffer(dquery);
@@ -9442,6 +9492,38 @@ dumpCommentExtended(Archive *fout, const char *type,
const char *owner, CatalogId catalogId,
int subid, DumpId dumpId,
const char *initdb_comment)
+{
+ PQExpBuffer query = createPQExpBuffer();
+ PQExpBuffer tag = createPQExpBuffer();
+
+ if (dumpCommentQuery(fout, query, tag, type, name, namespace, owner,
+ catalogId, subid, dumpId, initdb_comment))
+ {
+ /*
+ * We mark comments as SECTION_NONE because they really belong in the
+ * same section as their parent, whether that is pre-data or
+ * post-data.
+ */
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = namespace,
+ .owner = owner,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &dumpId,
+ .nDeps = 1));
+ }
+ destroyPQExpBuffer(query);
+ destroyPQExpBuffer(tag);
+}
+
+static bool
+dumpCommentQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ const char *type, const char *name,
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId,
+ const char *initdb_comment)
{
DumpOptions *dopt = fout->dopt;
CommentItem *comments;
@@ -9449,19 +9531,19 @@ dumpCommentExtended(Archive *fout, const char *type,
/* do nothing, if --no-comments is supplied */
if (dopt->no_comments)
- return;
+ return false;
/* Comments are schema not data ... except blob comments are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
- return;
+ return false;
}
else
{
/* We do dump blob comments in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
- return;
+ return false;
}
/* Search for comments associated with catalogId, using table */
@@ -9499,9 +9581,6 @@ dumpCommentExtended(Archive *fout, const char *type,
/* If a comment exists, build COMMENT ON statement */
if (ncomments > 0)
{
- PQExpBuffer query = createPQExpBuffer();
- PQExpBuffer tag = createPQExpBuffer();
-
appendPQExpBuffer(query, "COMMENT ON %s ", type);
if (namespace && *namespace)
appendPQExpBuffer(query, "%s.", fmtId(namespace));
@@ -9511,24 +9590,10 @@ dumpCommentExtended(Archive *fout, const char *type,
appendPQExpBuffer(tag, "%s %s", type, name);
- /*
- * We mark comments as SECTION_NONE because they really belong in the
- * same section as their parent, whether that is pre-data or
- * post-data.
- */
- ArchiveEntry(fout, nilCatalogId, createDumpId(),
- ARCHIVE_OPTS(.tag = tag->data,
- .namespace = namespace,
- .owner = owner,
- .description = "COMMENT",
- .section = SECTION_NONE,
- .createStmt = query->data,
- .deps = &dumpId,
- .nDeps = 1));
-
- destroyPQExpBuffer(query);
- destroyPQExpBuffer(tag);
+ return true;
}
+
+ return false;
}
/*
@@ -14423,23 +14488,65 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
const DumpableAcl *dacl)
{
DumpId aclDumpId = InvalidDumpId;
+ PQExpBuffer query = createPQExpBuffer();
+ PQExpBuffer tag = createPQExpBuffer();
+
+ if (dumpACLQuery(fout, query, tag, objDumpId, altDumpId,
+ type, name, subname, nspname, owner, dacl))
+ {
+ DumpId aclDeps[2];
+ int nDeps = 0;
+
+ if (subname)
+ appendPQExpBuffer(tag, "COLUMN %s.%s", name, subname);
+ else
+ appendPQExpBuffer(tag, "%s %s", type, name);
+
+ aclDeps[nDeps++] = objDumpId;
+ if (altDumpId != InvalidDumpId)
+ aclDeps[nDeps++] = altDumpId;
+
+ aclDumpId = createDumpId();
+
+ ArchiveEntry(fout, nilCatalogId, aclDumpId,
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = nspname,
+ .owner = owner,
+ .description = "ACL",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = aclDeps,
+ .nDeps = nDeps));
+
+ }
+
+ destroyPQExpBuffer(query);
+ destroyPQExpBuffer(tag);
+
+ return aclDumpId;
+}
+
+static bool
+dumpACLQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ DumpId objDumpId, DumpId altDumpId,
+ const char *type, const char *name, const char *subname,
+ const char *nspname, const char *owner,
+ const DumpableAcl *dacl)
+{
DumpOptions *dopt = fout->dopt;
const char *acls = dacl->acl;
const char *acldefault = dacl->acldefault;
char privtype = dacl->privtype;
const char *initprivs = dacl->initprivs;
const char *baseacls;
- PQExpBuffer sql;
/* Do nothing if ACL dump is not enabled */
if (dopt->aclsSkip)
- return InvalidDumpId;
+ return false;
/* --data-only skips ACLs *except* BLOB ACLs */
if (dopt->dataOnly && strcmp(type, "LARGE OBJECT") != 0)
- return InvalidDumpId;
-
- sql = createPQExpBuffer();
+ return false;
/*
* In binary upgrade mode, we don't run an extension's script but instead
@@ -14457,13 +14564,13 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
if (dopt->binary_upgrade && privtype == 'e' &&
initprivs && *initprivs != '\0')
{
- appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
+ appendPQExpBufferStr(query, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
if (!buildACLCommands(name, subname, nspname, type,
initprivs, acldefault, owner,
- "", fout->remoteVersion, sql))
+ "", fout->remoteVersion, query))
pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
initprivs, acldefault, name, type);
- appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
+ appendPQExpBufferStr(query, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
}
/*
@@ -14485,43 +14592,19 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
if (!buildACLCommands(name, subname, nspname, type,
acls, baseacls, owner,
- "", fout->remoteVersion, sql))
+ "", fout->remoteVersion, query))
pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
acls, baseacls, name, type);
- if (sql->len > 0)
+ if (query->len > 0 && tag != NULL)
{
- PQExpBuffer tag = createPQExpBuffer();
- DumpId aclDeps[2];
- int nDeps = 0;
-
if (subname)
appendPQExpBuffer(tag, "COLUMN %s.%s", name, subname);
else
appendPQExpBuffer(tag, "%s %s", type, name);
-
- aclDeps[nDeps++] = objDumpId;
- if (altDumpId != InvalidDumpId)
- aclDeps[nDeps++] = altDumpId;
-
- aclDumpId = createDumpId();
-
- ArchiveEntry(fout, nilCatalogId, aclDumpId,
- ARCHIVE_OPTS(.tag = tag->data,
- .namespace = nspname,
- .owner = owner,
- .description = "ACL",
- .section = SECTION_NONE,
- .createStmt = sql->data,
- .deps = aclDeps,
- .nDeps = nDeps));
-
- destroyPQExpBuffer(tag);
}
- destroyPQExpBuffer(sql);
-
- return aclDumpId;
+ return true;
}
/*
@@ -14546,35 +14629,59 @@ static void
dumpSecLabel(Archive *fout, const char *type, const char *name,
const char *namespace, const char *owner,
CatalogId catalogId, int subid, DumpId dumpId)
+{
+ PQExpBuffer query = createPQExpBuffer();
+ PQExpBuffer tag = createPQExpBuffer();
+
+ if (dumpSecLabelQuery(fout, query, tag, type, name,
+ namespace, owner, catalogId, subid, dumpId))
+ {
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = namespace,
+ .owner = owner,
+ .description = "SECURITY LABEL",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &dumpId,
+ .nDeps = 1));
+ }
+
+ destroyPQExpBuffer(query);
+ destroyPQExpBuffer(tag);
+}
+
+static bool
+dumpSecLabelQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+ const char *type, const char *name,
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId)
{
DumpOptions *dopt = fout->dopt;
SecLabelItem *labels;
int nlabels;
int i;
- PQExpBuffer query;
/* do nothing, if --no-security-labels is supplied */
if (dopt->no_security_labels)
- return;
+ return false;
/* Security labels are schema not data ... except blob labels are data */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
- return;
+ return false;
}
else
{
/* We do dump blob security labels in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
- return;
+ return false;
}
/* Search for security labels associated with catalogId, using table */
nlabels = findSecLabels(catalogId.tableoid, catalogId.oid, &labels);
- query = createPQExpBuffer();
-
for (i = 0; i < nlabels; i++)
{
/*
@@ -14595,22 +14702,11 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
if (query->len > 0)
{
- PQExpBuffer tag = createPQExpBuffer();
-
appendPQExpBuffer(tag, "%s %s", type, name);
- ArchiveEntry(fout, nilCatalogId, createDumpId(),
- ARCHIVE_OPTS(.tag = tag->data,
- .namespace = namespace,
- .owner = owner,
- .description = "SECURITY LABEL",
- .section = SECTION_NONE,
- .createStmt = query->data,
- .deps = &dumpId,
- .nDeps = 1));
- destroyPQExpBuffer(tag);
+ return true;
}
- destroyPQExpBuffer(query);
+ return false;
}
/*
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 049a100634..2159f72ffb 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -60,6 +60,7 @@ main(int argc, char **argv)
int c;
int exit_code;
int numWorkers = 1;
+ int blobBatchSize = 0;
Archive *AH;
char *inputFileSpec;
static int disable_triggers = 0;
@@ -123,6 +124,7 @@ main(int argc, char **argv)
{"no-publications", no_argument, &no_publications, 1},
{"no-security-labels", no_argument, &no_security_labels, 1},
{"no-subscriptions", no_argument, &no_subscriptions, 1},
+ {"restore-blob-batch-size", required_argument, NULL, 4},
{NULL, 0, NULL, 0}
};
@@ -286,6 +288,10 @@ main(int argc, char **argv)
set_dump_section(optarg, &(opts->dumpSections));
break;
+ case 4: /* # of blobs to restore per transaction */
+ blobBatchSize = atoi(optarg);
+ break;
+
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -405,6 +411,7 @@ main(int argc, char **argv)
SortTocFromFile(AH);
AH->numWorkers = numWorkers;
+ AH->blobBatchSize = blobBatchSize;
if (opts->tocSummary)
PrintTOCSummary(AH);
@@ -478,6 +485,8 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
+ printf(_(" --restore-blob-batch-size=NUM\n"
+ " attempt to restore NUM large objects per transaction\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 29b9e44f78..9b838c88e5 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -53,8 +53,11 @@ generate_old_dump(void)
parallel_exec_prog(log_file_name, NULL,
"\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
+ "%s "
"--binary-upgrade --format=custom %s --file=\"%s/%s\" %s",
new_cluster.bindir, cluster_conn_opts(&old_cluster),
+ user_opts.pg_dump_opts ?
+ user_opts.pg_dump_opts : "",
log_opts.verbose ? "--verbose" : "",
log_opts.dumpdir,
sql_file_name, escaped_connstr.data);
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index fbab1c4fb7..4bcd925874 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -56,6 +56,8 @@ parseCommandLine(int argc, char *argv[])
{"socketdir", required_argument, NULL, 's'},
{"verbose", no_argument, NULL, 'v'},
{"clone", no_argument, NULL, 1},
+ {"dump-options", required_argument, NULL, 2},
+ {"restore-options", required_argument, NULL, 3},
{NULL, 0, NULL, 0}
};
@@ -194,6 +196,34 @@ parseCommandLine(int argc, char *argv[])
user_opts.transfer_mode = TRANSFER_MODE_CLONE;
break;
+ case 2:
+ /* append option? */
+ if (!user_opts.pg_dump_opts)
+ user_opts.pg_dump_opts = pg_strdup(optarg);
+ else
+ {
+ char *old_opts = user_opts.pg_dump_opts;
+
+ user_opts.pg_dump_opts = psprintf("%s %s",
+ old_opts, optarg);
+ free(old_opts);
+ }
+ break;
+
+ case 3:
+ /* append option? */
+ if (!user_opts.pg_restore_opts)
+ user_opts.pg_restore_opts = pg_strdup(optarg);
+ else
+ {
+ char *old_opts = user_opts.pg_restore_opts;
+
+ user_opts.pg_restore_opts = psprintf("%s %s",
+ old_opts, optarg);
+ free(old_opts);
+ }
+ break;
+
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
os_info.progname);
@@ -283,6 +313,8 @@ usage(void)
printf(_(" -v, --verbose enable verbose internal logging\n"));
printf(_(" -V, --version display version information, then exit\n"));
printf(_(" --clone clone instead of copying files to new cluster\n"));
+ printf(_(" --dump-options=OPTIONS options to pass to pg_dump\n"));
+ printf(_(" --restore-options=OPTIONS options to pass to pg_restore\n"));
printf(_(" -?, --help show this help, then exit\n"));
printf(_("\n"
"Before running pg_upgrade you must:\n"
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 115faa222e..3b98312ed2 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -457,10 +457,13 @@ create_new_objects(void)
true,
true,
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
+ "%s "
"--dbname postgres \"%s/%s\"",
new_cluster.bindir,
cluster_conn_opts(&new_cluster),
create_opts,
+ user_opts.pg_restore_opts ?
+ user_opts.pg_restore_opts : "",
log_opts.dumpdir,
sql_file_name);
@@ -495,10 +498,13 @@ create_new_objects(void)
parallel_exec_prog(log_file_name,
NULL,
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
+ "%s "
"--dbname template1 \"%s/%s\"",
new_cluster.bindir,
cluster_conn_opts(&new_cluster),
create_opts,
+ user_opts.pg_restore_opts ?
+ user_opts.pg_restore_opts : "",
log_opts.dumpdir,
sql_file_name);
}
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index 60c3c8dd68..477de6f717 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -295,6 +295,8 @@ typedef struct
transferMode transfer_mode; /* copy files or link them? */
int jobs; /* number of processes/threads to use */
char *socketdir; /* directory to use for Unix sockets */
+ char *pg_dump_opts; /* options to pass to pg_dump */
+ char *pg_restore_opts; /* options to pass to pg_dump */
} UserOpts;
typedef struct