I overflowed my homedir while testing with pg_reload, and got:
|pg_restore: error: could not write to large object (result:
18446744073709551615, expected: 30)
src/bin/pg_dump/pg_backup_archiver.c
f (res != AH->lo_buf_used)
fatal("could not write to large object (result: %lu, expected:
%lu)",
(unsigned long) res, (unsigned long) AH->lo_buf_used);
; 18446744073709551615 - 1<<64
-1
I guess casting to long was the best option c. 2002 (commit 6faf8024f) but I
gather the modern way is with %z.
I confirmed this fixes the message.
|pg_restore: error: could not write to large object (result: -1, expected:
16384)
--
Justin
>From 38d1f4ca314b9381a8fe5cbf90d4bc9b390b2fca Mon Sep 17 00:00:00 2001
From: Justin Pryzby <[email protected]>
Date: Sat, 17 Oct 2020 19:28:25 -0500
Subject: [PATCH v1] print size_t with %zd rather than casting to %lu
See also:
6faf8024facacd9cc30ce37b7ec9abb75238e0fd
be11f8400d7d99e8ae6602f3175e04b4f0c99376
---
src/bin/pg_dump/pg_backup_archiver.c | 18 +++++++++---------
src/bin/pg_dump/pg_backup_tar.c | 6 +++---
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d61b290d2a..86dc355c9b 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -1640,13 +1640,13 @@ dump_lo_buf(ArchiveHandle *AH)
size_t res;
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
- pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)",
- "wrote %lu bytes of large object data (result = %lu)",
+ pg_log_debug(ngettext("wrote %zd byte of large object data (result = %zd)",
+ "wrote %zd bytes of large object data (result = %zd)",
AH->lo_buf_used),
- (unsigned long) AH->lo_buf_used, (unsigned long) res);
+ AH->lo_buf_used, res);
if (res != AH->lo_buf_used)
- fatal("could not write to large object (result: %lu, expected: %lu)",
- (unsigned long) res, (unsigned long) AH->lo_buf_used);
+ fatal("could not write to large object (result: %zd, expected: %zd)",
+ res, AH->lo_buf_used);
}
else
{
@@ -2130,8 +2130,8 @@ _discoverArchiveFormat(ArchiveHandle *AH)
if (ferror(fh))
fatal("could not read input file: %m");
else
- fatal("input file is too short (read %lu, expected 5)",
- (unsigned long) cnt);
+ fatal("input file is too short (read %zd, expected 5)",
+ cnt);
}
/* Save it, just in case we need it later */
@@ -3794,8 +3794,8 @@ ReadHead(ArchiveHandle *AH)
AH->intSize = AH->ReadBytePtr(AH);
if (AH->intSize > 32)
- fatal("sanity check on integer size (%lu) failed",
- (unsigned long) AH->intSize);
+ fatal("sanity check on integer size (%zd) failed",
+ AH->intSize);
if (AH->intSize > sizeof(int))
pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 54e708875c..1751e12929 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -1233,10 +1233,10 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
return 0;
if (len != TAR_BLOCK_SIZE)
- fatal(ngettext("incomplete tar header found (%lu byte)",
- "incomplete tar header found (%lu bytes)",
+ fatal(ngettext("incomplete tar header found (%zd byte)",
+ "incomplete tar header found (%zd bytes)",
len),
- (unsigned long) len);
+ len);
/* Calc checksum */
chk = tarChecksum(h);
--
2.17.0