On Wed, Feb 04, 2026 at 10:06:29AM -0600, Nathan Bossart wrote:
> IIUC your critique is that this doesn't explain the overwriting behavior
> like the older comment does.  I'll work on adding that.
> 
> [...]
> 
> I'm considering a couple of options here, but it seems like the easiest
> thing to do is to move the TRUNCATE commands to the end of the dump file.
> At least, that seems to be sufficient for our existing tests.  If that
> seems okay to you, I can work on putting together a patch.

Here is a rough first draft of a patch that does this.

-- 
nathan
>From c911f5b16132514a461f72b550ee8cd86dfadc1d Mon Sep 17 00:00:00 2001
From: Nathan Bossart <[email protected]>
Date: Wed, 4 Feb 2026 14:04:18 -0600
Subject: [PATCH v1 1/1] fix pg_largeobject_metadata file transfer

---
 src/bin/pg_dump/pg_dump.c | 39 ++++++++++++++++++++++++++++-----------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 2bebefd0ba2..3ba57492fa6 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -379,6 +379,7 @@ static const char *getFormattedTypeName(Archive *fout, Oid 
oid, OidOptions opts)
 static void getLOs(Archive *fout);
 static void dumpLO(Archive *fout, const LoInfo *loinfo);
 static int     dumpLOs(Archive *fout, const void *arg);
+static void dumpLOTruncation(Archive *fout);
 static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
 static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
 static void dumpPublicationTable(Archive *fout, const PublicationRelInfo 
*pubrinfo);
@@ -1157,7 +1158,10 @@ main(int argc, char **argv)
                 * subsequent COMMENT and SECURITY LABEL commands work.  
pg_upgrade
                 * can't copy/link the files from older versions because aclitem
                 * (needed by pg_largeobject_metadata.lomacl) changed its 
storage
-                * format in v16.
+                * format in v16.  At the end of the dump, we'll generate a 
TRUNCATE
+                * command for pg_largeobject_metadata so that it's contents are
+                * cleared in preparation for the subsequent file transfer by
+                * pg_upgrade.
                 */
                if (fout->remoteVersion >= 160000)
                        lo_metadata->dataObj->filtercond = "WHERE oid IN "
@@ -1243,6 +1247,13 @@ main(int argc, char **argv)
        for (i = 0; i < numObjs; i++)
                dumpDumpableObject(fout, dobjs[i]);
 
+       /*
+        * For binary upgrades, set relfrozenxids, relminmxids, and relfilenodes
+        * of pg_largeobject and maybe pg_largeobject_metadata, and remove all
+        * their files.  We will transfer them from the old cluster as needed.
+        */
+       dumpLOTruncation(fout);
+
        /*
         * Set up options info to ensure we dump what we want.
         */
@@ -3671,6 +3682,20 @@ dumpDatabase(Archive *fout)
                                                                  .dropStmt = 
delQry->data,
                                                                  .deps = 
&dbDumpId));
 
+       PQclear(res);
+
+       free(qdatname);
+       destroyPQExpBuffer(dbQry);
+       destroyPQExpBuffer(delQry);
+       destroyPQExpBuffer(creaQry);
+       destroyPQExpBuffer(labelq);
+}
+
+static void
+dumpLOTruncation(Archive *fout)
+{
+       DumpOptions *dopt = fout->dopt;
+
        /*
         * pg_largeobject comes from the old system intact, so set its
         * relfrozenxids, relminmxids and relfilenode.
@@ -3769,14 +3794,14 @@ dumpDatabase(Archive *fout)
                ArchiveEntry(fout, nilCatalogId, createDumpId(),
                                         ARCHIVE_OPTS(.tag = "pg_largeobject",
                                                                  .description 
= "pg_largeobject",
-                                                                 .section = 
SECTION_PRE_DATA,
+                                                                 .section = 
SECTION_POST_DATA,
                                                                  .createStmt = 
loOutQry->data));
 
                if (fout->remoteVersion >= 160000)
                        ArchiveEntry(fout, nilCatalogId, createDumpId(),
                                                 ARCHIVE_OPTS(.tag = 
"pg_largeobject_metadata",
                                                                          
.description = "pg_largeobject_metadata",
-                                                                         
.section = SECTION_PRE_DATA,
+                                                                         
.section = SECTION_POST_DATA,
                                                                          
.createStmt = lomOutQry->data));
 
                PQclear(lo_res);
@@ -3787,14 +3812,6 @@ dumpDatabase(Archive *fout)
                destroyPQExpBuffer(loOutQry);
                destroyPQExpBuffer(lomOutQry);
        }
-
-       PQclear(res);
-
-       free(qdatname);
-       destroyPQExpBuffer(dbQry);
-       destroyPQExpBuffer(delQry);
-       destroyPQExpBuffer(creaQry);
-       destroyPQExpBuffer(labelq);
 }
 
 /*
-- 
2.50.1 (Apple Git-155)

Reply via email to