On 3/24/21 12:04 PM, Jan Wieck wrote:
In any case I changed the options so that they behave the same way, the
existing -o and -O (for old/new postmaster options) work. I don't think
it would be wise to have option forwarding work differently between
options for postmaster and options for pg_dump/pg_restore.

Attaching the actual diff might help.

--
Jan Wieck
Principle Database Engineer
Amazon Web Services
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index c7351a4..4a611d0 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -864,6 +864,11 @@ RunWorker(ArchiveHandle *AH, ParallelSlot *slot)
 	WaitForCommands(AH, pipefd);
 
 	/*
+	 * Close an eventually open BLOB batch transaction.
+	 */
+	CommitBlobTransaction((Archive *)AH);
+
+	/*
 	 * Disconnect from database and clean up.
 	 */
 	set_cancel_slot_archive(slot, NULL);
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 0296b9b..cd8a590 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -203,6 +203,8 @@ typedef struct Archive
 	int			numWorkers;		/* number of parallel processes */
 	char	   *sync_snapshot_id;	/* sync snapshot id for parallel operation */
 
+	int			blobBatchSize;	/* # of blobs to restore per transaction */
+
 	/* info needed for string escaping */
 	int			encoding;		/* libpq code for client_encoding */
 	bool		std_strings;	/* standard_conforming_strings */
@@ -269,6 +271,7 @@ extern void WriteData(Archive *AH, const void *data, size_t dLen);
 extern int	StartBlob(Archive *AH, Oid oid);
 extern int	EndBlob(Archive *AH, Oid oid);
 
+extern void	CommitBlobTransaction(Archive *AH);
 extern void CloseArchive(Archive *AH);
 
 extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 1f82c64..8331e8a 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -68,6 +68,7 @@ typedef struct _parallelReadyList
 	bool		sorted;			/* are valid entries currently sorted? */
 } ParallelReadyList;
 
+static int		blobBatchCount = 0;
 
 static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
 							   const int compression, bool dosync, ArchiveMode mode,
@@ -265,6 +266,8 @@ CloseArchive(Archive *AHX)
 	int			res = 0;
 	ArchiveHandle *AH = (ArchiveHandle *) AHX;
 
+	CommitBlobTransaction(AHX);
+
 	AH->ClosePtr(AH);
 
 	/* Close the output */
@@ -279,6 +282,23 @@ CloseArchive(Archive *AHX)
 
 /* Public */
 void
+CommitBlobTransaction(Archive *AHX)
+{
+	ArchiveHandle *AH = (ArchiveHandle *) AHX;
+
+	if (blobBatchCount > 0)
+	{
+		ahprintf(AH, "--\n");
+		ahprintf(AH, "-- End BLOB restore batch\n");
+		ahprintf(AH, "--\n");
+		ahprintf(AH, "COMMIT;\n\n");
+
+		blobBatchCount = 0;
+	}
+}
+
+/* Public */
+void
 SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
 {
 	/* Caller can omit dump options, in which case we synthesize them */
@@ -3531,6 +3551,57 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
 {
 	RestoreOptions *ropt = AH->public.ropt;
 
+	/* We restore BLOBs in batches to reduce XID consumption */
+	if (strcmp(te->desc, "BLOB") == 0 && AH->public.blobBatchSize > 0)
+	{
+		if (blobBatchCount > 0)
+		{
+			/* We are inside a BLOB restore transaction */
+			if (blobBatchCount >= AH->public.blobBatchSize)
+			{
+				/*
+				 * We did reach the batch size with the previous BLOB.
+				 * Commit and start a new batch.
+				 */
+				ahprintf(AH, "--\n");
+				ahprintf(AH, "-- BLOB batch size reached\n");
+				ahprintf(AH, "--\n");
+				ahprintf(AH, "COMMIT;\n");
+				ahprintf(AH, "BEGIN;\n\n");
+
+				blobBatchCount = 1;
+			}
+			else
+			{
+				/* This one still fits into the current batch */
+				blobBatchCount++;
+			}
+		}
+		else
+		{
+			/* Not inside a transaction, start a new batch */
+			ahprintf(AH, "--\n");
+			ahprintf(AH, "-- Start BLOB restore batch\n");
+			ahprintf(AH, "--\n");
+			ahprintf(AH, "BEGIN;\n\n");
+
+			blobBatchCount = 1;
+		}
+	}
+	else
+	{
+		/* Not a BLOB. If we have a BLOB batch open, close it. */
+		if (blobBatchCount > 0)
+		{
+			ahprintf(AH, "--\n");
+			ahprintf(AH, "-- End BLOB restore batch\n");
+			ahprintf(AH, "--\n");
+			ahprintf(AH, "COMMIT;\n\n");
+
+			blobBatchCount = 0;
+		}
+	}
+
 	/* Select owner, schema, tablespace and default AM as necessary */
 	_becomeOwner(AH, te);
 	_selectOutputSchema(AH, te->namespace);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index f8bec3f..f153f08 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -165,12 +165,20 @@ static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
 static void dumpComment(Archive *fout, const char *type, const char *name,
 						const char *namespace, const char *owner,
 						CatalogId catalogId, int subid, DumpId dumpId);
+static bool dumpCommentQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+							 const char *type, const char *name,
+							 const char *namespace, const char *owner,
+							 CatalogId catalogId, int subid, DumpId dumpId);
 static int	findComments(Archive *fout, Oid classoid, Oid objoid,
 						 CommentItem **items);
 static int	collectComments(Archive *fout, CommentItem **items);
 static void dumpSecLabel(Archive *fout, const char *type, const char *name,
 						 const char *namespace, const char *owner,
 						 CatalogId catalogId, int subid, DumpId dumpId);
+static bool dumpSecLabelQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+							  const char *type, const char *name,
+							  const char *namespace, const char *owner,
+							  CatalogId catalogId, int subid, DumpId dumpId);
 static int	findSecLabels(Archive *fout, Oid classoid, Oid objoid,
 						  SecLabelItem **items);
 static int	collectSecLabels(Archive *fout, SecLabelItem **items);
@@ -227,6 +235,13 @@ static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
 					  const char *nspname, const char *owner,
 					  const char *acls, const char *racls,
 					  const char *initacls, const char *initracls);
+static bool dumpACLQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+						 DumpId objDumpId, DumpId altDumpId,
+						 const char *type, const char *name,
+						 const char *subname,
+						 const char *nspname, const char *owner,
+						 const char *acls, const char *racls,
+						 const char *initacls, const char *initracls);
 
 static void getDependencies(Archive *fout);
 static void BuildArchiveDependencies(Archive *fout);
@@ -3468,11 +3483,44 @@ dumpBlob(Archive *fout, const BlobInfo *binfo)
 {
 	PQExpBuffer cquery = createPQExpBuffer();
 	PQExpBuffer dquery = createPQExpBuffer();
+	PQExpBuffer tag    = createPQExpBuffer();
+	teSection	section = SECTION_PRE_DATA;
 
 	appendPQExpBuffer(cquery,
 					  "SELECT pg_catalog.lo_create('%s');\n",
 					  binfo->dobj.name);
 
+	/*
+	 * In binary upgrade mode we put all the queries to restore
+	 * one large object into a single TOC entry and emit it as
+	 * SECTION_DATA so that they can be restored in parallel.
+	 */
+	if (fout->dopt->binary_upgrade)
+	{
+		section = SECTION_DATA;
+
+		/* Dump comment if any */
+		if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+			dumpCommentQuery(fout, cquery, tag, "LARGE OBJECT",
+							 binfo->dobj.name, NULL, binfo->rolname,
+							 binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+		/* Dump security label if any */
+		if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
+			dumpSecLabelQuery(fout, cquery, tag, "LARGE OBJECT",
+							  binfo->dobj.name,
+							  NULL, binfo->rolname,
+							  binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+		/* Dump ACL if any */
+		if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
+			dumpACLQuery(fout, cquery, tag,
+						 binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+						 binfo->dobj.name, NULL,
+						 NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
+						 binfo->initblobacl, binfo->initrblobacl);
+	}
+
 	appendPQExpBuffer(dquery,
 					  "SELECT pg_catalog.lo_unlink('%s');\n",
 					  binfo->dobj.name);
@@ -3482,28 +3530,31 @@ dumpBlob(Archive *fout, const BlobInfo *binfo)
 					 ARCHIVE_OPTS(.tag = binfo->dobj.name,
 								  .owner = binfo->rolname,
 								  .description = "BLOB",
-								  .section = SECTION_PRE_DATA,
+								  .section = section,
 								  .createStmt = cquery->data,
 								  .dropStmt = dquery->data));
 
-	/* Dump comment if any */
-	if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
-		dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
-					NULL, binfo->rolname,
-					binfo->dobj.catId, 0, binfo->dobj.dumpId);
-
-	/* Dump security label if any */
-	if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
-		dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
-					 NULL, binfo->rolname,
-					 binfo->dobj.catId, 0, binfo->dobj.dumpId);
-
-	/* Dump ACL if any */
-	if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
-		dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
-				binfo->dobj.name, NULL,
-				NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
-				binfo->initblobacl, binfo->initrblobacl);
+	if (!fout->dopt->binary_upgrade)
+	{
+		/* Dump comment if any */
+		if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+			dumpComment(fout, "LARGE OBJECT", binfo->dobj.name,
+						NULL, binfo->rolname,
+						binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+		/* Dump security label if any */
+		if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
+			dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name,
+						 NULL, binfo->rolname,
+						 binfo->dobj.catId, 0, binfo->dobj.dumpId);
+
+		/* Dump ACL if any */
+		if (binfo->blobacl && (binfo->dobj.dump & DUMP_COMPONENT_ACL))
+			dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT",
+					binfo->dobj.name, NULL,
+					NULL, binfo->rolname, binfo->blobacl, binfo->rblobacl,
+					binfo->initblobacl, binfo->initrblobacl);
+	}
 
 	destroyPQExpBuffer(cquery);
 	destroyPQExpBuffer(dquery);
@@ -9868,25 +9919,56 @@ dumpComment(Archive *fout, const char *type, const char *name,
 			const char *namespace, const char *owner,
 			CatalogId catalogId, int subid, DumpId dumpId)
 {
+	PQExpBuffer query = createPQExpBuffer();
+	PQExpBuffer tag = createPQExpBuffer();
+
+	if (dumpCommentQuery(fout, query, tag, type, name, namespace, owner,
+						 catalogId, subid, dumpId))
+	{
+		/*
+		 * We mark comments as SECTION_NONE because they really belong in the
+		 * same section as their parent, whether that is pre-data or
+		 * post-data.
+		 */
+		ArchiveEntry(fout, nilCatalogId, createDumpId(),
+					 ARCHIVE_OPTS(.tag = tag->data,
+								  .namespace = namespace,
+								  .owner = owner,
+								  .description = "COMMENT",
+								  .section = SECTION_NONE,
+								  .createStmt = query->data,
+								  .deps = &dumpId,
+								  .nDeps = 1));
+	}
+	destroyPQExpBuffer(query);
+	destroyPQExpBuffer(tag);
+}
+
+static bool
+dumpCommentQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+				 const char *type, const char *name,
+				 const char *namespace, const char *owner,
+				 CatalogId catalogId, int subid, DumpId dumpId)
+{
 	DumpOptions *dopt = fout->dopt;
 	CommentItem *comments;
 	int			ncomments;
 
 	/* do nothing, if --no-comments is supplied */
 	if (dopt->no_comments)
-		return;
+		return false;
 
 	/* Comments are schema not data ... except blob comments are data */
 	if (strcmp(type, "LARGE OBJECT") != 0)
 	{
 		if (dopt->dataOnly)
-			return;
+			return false;
 	}
 	else
 	{
 		/* We do dump blob comments in binary-upgrade mode */
 		if (dopt->schemaOnly && !dopt->binary_upgrade)
-			return;
+			return false;
 	}
 
 	/* Search for comments associated with catalogId, using table */
@@ -9905,9 +9987,6 @@ dumpComment(Archive *fout, const char *type, const char *name,
 	/* If a comment exists, build COMMENT ON statement */
 	if (ncomments > 0)
 	{
-		PQExpBuffer query = createPQExpBuffer();
-		PQExpBuffer tag = createPQExpBuffer();
-
 		appendPQExpBuffer(query, "COMMENT ON %s ", type);
 		if (namespace && *namespace)
 			appendPQExpBuffer(query, "%s.", fmtId(namespace));
@@ -9917,24 +9996,10 @@ dumpComment(Archive *fout, const char *type, const char *name,
 
 		appendPQExpBuffer(tag, "%s %s", type, name);
 
-		/*
-		 * We mark comments as SECTION_NONE because they really belong in the
-		 * same section as their parent, whether that is pre-data or
-		 * post-data.
-		 */
-		ArchiveEntry(fout, nilCatalogId, createDumpId(),
-					 ARCHIVE_OPTS(.tag = tag->data,
-								  .namespace = namespace,
-								  .owner = owner,
-								  .description = "COMMENT",
-								  .section = SECTION_NONE,
-								  .createStmt = query->data,
-								  .deps = &dumpId,
-								  .nDeps = 1));
-
-		destroyPQExpBuffer(query);
-		destroyPQExpBuffer(tag);
+		return true;
 	}
+
+	return false;
 }
 
 /*
@@ -15070,18 +15135,63 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
 		const char *initacls, const char *initracls)
 {
 	DumpId		aclDumpId = InvalidDumpId;
+	PQExpBuffer query = createPQExpBuffer();
+	PQExpBuffer tag = createPQExpBuffer();
+
+	if (dumpACLQuery(fout, query, tag, objDumpId, altDumpId,
+					 type, name, subname, nspname, owner,
+					 acls, racls, initacls, initracls))
+	{
+		DumpId		aclDeps[2];
+		int			nDeps = 0;
+
+		if (subname)
+			appendPQExpBuffer(tag, "COLUMN %s.%s", name, subname);
+		else
+			appendPQExpBuffer(tag, "%s %s", type, name);
+
+		aclDeps[nDeps++] = objDumpId;
+		if (altDumpId != InvalidDumpId)
+			aclDeps[nDeps++] = altDumpId;
+
+		aclDumpId = createDumpId();
+
+		ArchiveEntry(fout, nilCatalogId, aclDumpId,
+					 ARCHIVE_OPTS(.tag = tag->data,
+								  .namespace = nspname,
+								  .owner = owner,
+								  .description = "ACL",
+								  .section = SECTION_NONE,
+								  .createStmt = query->data,
+								  .deps = aclDeps,
+								  .nDeps = nDeps));
+
+	}
+
+	destroyPQExpBuffer(query);
+	destroyPQExpBuffer(tag);
+
+	return aclDumpId;
+}
+
+static bool
+dumpACLQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+			 DumpId objDumpId, DumpId altDumpId,
+			 const char *type, const char *name, const char *subname,
+			 const char *nspname, const char *owner,
+			 const char *acls, const char *racls,
+			 const char *initacls, const char *initracls)
+{
 	DumpOptions *dopt = fout->dopt;
-	PQExpBuffer sql;
+	bool		haveACL = false;
 
 	/* Do nothing if ACL dump is not enabled */
 	if (dopt->aclsSkip)
-		return InvalidDumpId;
+		return false;
 
 	/* --data-only skips ACLs *except* BLOB ACLs */
 	if (dopt->dataOnly && strcmp(type, "LARGE OBJECT") != 0)
-		return InvalidDumpId;
-
-	sql = createPQExpBuffer();
+		return false;
 
 	/*
 	 * Check to see if this object has had any initial ACLs included for it.
@@ -15093,54 +15203,31 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
 	 */
 	if (strlen(initacls) != 0 || strlen(initracls) != 0)
 	{
-		appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
+		haveACL = true;
+		appendPQExpBufferStr(query, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
 		if (!buildACLCommands(name, subname, nspname, type,
 							  initacls, initracls, owner,
-							  "", fout->remoteVersion, sql))
+							  "", fout->remoteVersion, query))
 			fatal("could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)",
 				  initacls, initracls, name, type);
-		appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
+		appendPQExpBufferStr(query, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
 	}
 
 	if (!buildACLCommands(name, subname, nspname, type,
 						  acls, racls, owner,
-						  "", fout->remoteVersion, sql))
+						  "", fout->remoteVersion, query))
 		fatal("could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)",
 			  acls, racls, name, type);
 
-	if (sql->len > 0)
+	if (haveACL && tag != NULL)
 	{
-		PQExpBuffer tag = createPQExpBuffer();
-		DumpId		aclDeps[2];
-		int			nDeps = 0;
-
 		if (subname)
 			appendPQExpBuffer(tag, "COLUMN %s.%s", name, subname);
 		else
 			appendPQExpBuffer(tag, "%s %s", type, name);
-
-		aclDeps[nDeps++] = objDumpId;
-		if (altDumpId != InvalidDumpId)
-			aclDeps[nDeps++] = altDumpId;
-
-		aclDumpId = createDumpId();
-
-		ArchiveEntry(fout, nilCatalogId, aclDumpId,
-					 ARCHIVE_OPTS(.tag = tag->data,
-								  .namespace = nspname,
-								  .owner = owner,
-								  .description = "ACL",
-								  .section = SECTION_NONE,
-								  .createStmt = sql->data,
-								  .deps = aclDeps,
-								  .nDeps = nDeps));
-
-		destroyPQExpBuffer(tag);
 	}
 
-	destroyPQExpBuffer(sql);
-
-	return aclDumpId;
+	return haveACL;
 }
 
 /*
@@ -15166,34 +15253,58 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
 			 const char *namespace, const char *owner,
 			 CatalogId catalogId, int subid, DumpId dumpId)
 {
+	PQExpBuffer query = createPQExpBuffer();
+	PQExpBuffer tag = createPQExpBuffer();
+
+	if (dumpSecLabelQuery(fout, query, tag, type, name,
+						  namespace, owner, catalogId, subid, dumpId))
+	{
+		ArchiveEntry(fout, nilCatalogId, createDumpId(),
+					 ARCHIVE_OPTS(.tag = tag->data,
+								  .namespace = namespace,
+								  .owner = owner,
+								  .description = "SECURITY LABEL",
+								  .section = SECTION_NONE,
+								  .createStmt = query->data,
+								  .deps = &dumpId,
+								  .nDeps = 1));
+	}
+
+	destroyPQExpBuffer(query);
+	destroyPQExpBuffer(tag);
+}
+
+static bool
+dumpSecLabelQuery(Archive *fout, PQExpBuffer query, PQExpBuffer tag,
+				  const char *type, const char *name,
+				  const char *namespace, const char *owner,
+				  CatalogId catalogId, int subid, DumpId dumpId)
+{
 	DumpOptions *dopt = fout->dopt;
 	SecLabelItem *labels;
 	int			nlabels;
 	int			i;
-	PQExpBuffer query;
 
 	/* do nothing, if --no-security-labels is supplied */
 	if (dopt->no_security_labels)
-		return;
+		return false;
 
 	/* Security labels are schema not data ... except blob labels are data */
 	if (strcmp(type, "LARGE OBJECT") != 0)
 	{
 		if (dopt->dataOnly)
-			return;
+			return false;
 	}
 	else
 	{
 		/* We do dump blob security labels in binary-upgrade mode */
 		if (dopt->schemaOnly && !dopt->binary_upgrade)
-			return;
+			return false;
 	}
 
 	/* Search for security labels associated with catalogId, using table */
 	nlabels = findSecLabels(fout, catalogId.tableoid, catalogId.oid, &labels);
 
-	query = createPQExpBuffer();
-
 	for (i = 0; i < nlabels; i++)
 	{
 		/*
@@ -15214,22 +15325,11 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
 
 	if (query->len > 0)
 	{
-		PQExpBuffer tag = createPQExpBuffer();
-
 		appendPQExpBuffer(tag, "%s %s", type, name);
-		ArchiveEntry(fout, nilCatalogId, createDumpId(),
-					 ARCHIVE_OPTS(.tag = tag->data,
-								  .namespace = namespace,
-								  .owner = owner,
-								  .description = "SECURITY LABEL",
-								  .section = SECTION_NONE,
-								  .createStmt = query->data,
-								  .deps = &dumpId,
-								  .nDeps = 1));
-		destroyPQExpBuffer(tag);
+		return true;
 	}
 
-	destroyPQExpBuffer(query);
+	return false;
 }
 
 /*
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 589b4ae..b16db03 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -59,6 +59,7 @@ main(int argc, char **argv)
 	int			c;
 	int			exit_code;
 	int			numWorkers = 1;
+	int			blobBatchSize = 0;
 	Archive    *AH;
 	char	   *inputFileSpec;
 	static int	disable_triggers = 0;
@@ -120,6 +121,7 @@ main(int argc, char **argv)
 		{"no-publications", no_argument, &no_publications, 1},
 		{"no-security-labels", no_argument, &no_security_labels, 1},
 		{"no-subscriptions", no_argument, &no_subscriptions, 1},
+		{"restore-blob-batch-size", required_argument, NULL, 4},
 
 		{NULL, 0, NULL, 0}
 	};
@@ -280,6 +282,10 @@ main(int argc, char **argv)
 				set_dump_section(optarg, &(opts->dumpSections));
 				break;
 
+			case 4:				/* # of blobs to restore per transaction */
+				blobBatchSize = atoi(optarg);
+				break;
+
 			default:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
 				exit_nicely(1);
@@ -434,6 +440,7 @@ main(int argc, char **argv)
 		SortTocFromFile(AH);
 
 	AH->numWorkers = numWorkers;
+	AH->blobBatchSize = blobBatchSize;
 
 	if (opts->tocSummary)
 		PrintTOCSummary(AH);
@@ -506,6 +513,8 @@ usage(const char *progname)
 	printf(_("  --use-set-session-authorization\n"
 			 "                               use SET SESSION AUTHORIZATION commands instead of\n"
 			 "                               ALTER OWNER commands to set ownership\n"));
+	printf(_("  --restore-blob-batch-size=NUM\n"
+			 "                               attempt to restore NUM large objects per transaction\n"));
 
 	printf(_("\nConnection options:\n"));
 	printf(_("  -h, --host=HOSTNAME      database server host or socket directory\n"));
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 33d9591..183bb6d 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -52,8 +52,11 @@ generate_old_dump(void)
 
 		parallel_exec_prog(log_file_name, NULL,
 						   "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
+						   "%s "
 						   "--binary-upgrade --format=custom %s %s --file=\"%s\" %s",
 						   new_cluster.bindir, cluster_conn_opts(&old_cluster),
+						   user_opts.pg_dump_opts ?
+								user_opts.pg_dump_opts : "",
 						   log_opts.verbose ? "--verbose" : "",
 						   user_opts.ind_coll_unknown ?
 						   "--index-collation-versions-unknown" : "",
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index 9c9b313..d0efb9f 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -57,6 +57,8 @@ parseCommandLine(int argc, char *argv[])
 		{"verbose", no_argument, NULL, 'v'},
 		{"clone", no_argument, NULL, 1},
 		{"index-collation-versions-unknown", no_argument, NULL, 2},
+		{"dump-options", required_argument, NULL, 3},
+		{"restore-options", required_argument, NULL, 4},
 
 		{NULL, 0, NULL, 0}
 	};
@@ -208,6 +210,34 @@ parseCommandLine(int argc, char *argv[])
 				user_opts.ind_coll_unknown = true;
 				break;
 
+			case 3:
+				/* append option? */
+				if (!user_opts.pg_dump_opts)
+					user_opts.pg_dump_opts = pg_strdup(optarg);
+				else
+				{
+					char	   *old_opts = user_opts.pg_dump_opts;
+
+					user_opts.pg_dump_opts = psprintf("%s %s",
+													  old_opts, optarg);
+					free(old_opts);
+				}
+				break;
+
+			case 4:
+				/* append option? */
+				if (!user_opts.pg_restore_opts)
+					user_opts.pg_restore_opts = pg_strdup(optarg);
+				else
+				{
+					char	   *old_opts = user_opts.pg_restore_opts;
+
+					user_opts.pg_restore_opts = psprintf("%s %s",
+														 old_opts, optarg);
+					free(old_opts);
+				}
+				break;
+
 			default:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
 						os_info.progname);
@@ -314,6 +344,8 @@ usage(void)
 	printf(_("  --clone                       clone instead of copying files to new cluster\n"));
 	printf(_("  --index-collation-versions-unknown\n"));
 	printf(_("                                mark text indexes as needing to be rebuilt\n"));
+	printf(_("  --dump-options=OPTIONS        options to pass to pg_dump\n"));
+	printf(_("  --restore-options=OPTIONS     options to pass to pg_restore\n"));
 	printf(_("  -?, --help                    show this help, then exit\n"));
 	printf(_("\n"
 			 "Before running pg_upgrade you must:\n"
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index e23b8ca..6f6b12d 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -348,10 +348,13 @@ create_new_objects(void)
 				  true,
 				  true,
 				  "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
+				  "%s "
 				  "--dbname postgres \"%s\"",
 				  new_cluster.bindir,
 				  cluster_conn_opts(&new_cluster),
 				  create_opts,
+				  user_opts.pg_restore_opts ?
+						user_opts.pg_restore_opts : "",
 				  sql_file_name);
 
 		break;					/* done once we've processed template1 */
@@ -385,10 +388,13 @@ create_new_objects(void)
 		parallel_exec_prog(log_file_name,
 						   NULL,
 						   "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
+						   "%s "
 						   "--dbname template1 \"%s\"",
 						   new_cluster.bindir,
 						   cluster_conn_opts(&new_cluster),
 						   create_opts,
+						   user_opts.pg_restore_opts ?
+								user_opts.pg_restore_opts : "",
 						   sql_file_name);
 	}
 
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index 919a784..4b7959e 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -293,6 +293,8 @@ typedef struct
 	int			jobs;			/* number of processes/threads to use */
 	char	   *socketdir;		/* directory to use for Unix sockets */
 	bool		ind_coll_unknown;	/* mark unknown index collation versions */
+	char	   *pg_dump_opts;	/* options to pass to pg_dump */
+	char	   *pg_restore_opts;	/* options to pass to pg_dump */
 } UserOpts;
 
 typedef struct

Reply via email to