I have removed the unnecessary memsets (for structs with no padding). With
these changes, and removing the two WAL-related suppressions, the make
installcheck under Valgrind passes. The second patch is a small addition to
the hash index test that exercises the "vacuum one page" path we discussed
above.
diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c
index 7da97be..ba3b1c2 100644
--- a/src/backend/access/brin/brin_pageops.c
+++ b/src/backend/access/brin/brin_pageops.c
@@ -271,6 +271,8 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
XLogRecPtr recptr;
uint8 info;
+ memset(&xlrec, 0, sizeof(xlrec));
+
info = XLOG_BRIN_UPDATE | (extended ? XLOG_BRIN_INIT_PAGE : 0);
xlrec.insert.offnum = newoff;
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 3d3a9da..0021db0 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -461,6 +461,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
Buffer lbuffer = InvalidBuffer;
Page newrootpg = NULL;
+ memset(&data, 0, sizeof(data));
+
/* Get a new index page to become the right page */
rbuffer = GinNewBuffer(btree->index);
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index f50848e..d83aecb 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -230,6 +230,8 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
int cleanupSize;
bool needWal;
+ memset(&data, 0, sizeof(data));
+
if (collector->ntuples == 0)
return;
@@ -571,6 +573,8 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
Buffer buffers[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE];
+ memset(&data, 0, sizeof(data));
+
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index ff92727..76be56e 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -650,6 +650,8 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build)
XLogRecPtr recptr;
ginxlogUpdateMeta data;
+ memset(&data, 0, sizeof(data));
+
data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 840543e..1c524de 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -214,6 +214,8 @@ ginDeletePostingPage(GinVacuumState *gvs, Buffer dBuffer, Buffer lBuffer,
XLogRecPtr recptr;
ginxlogDeletePage data;
+ memset(&data, 0, sizeof(data));
+
/*
* We can't pass REGBUF_STANDARD for the deleted page, because we
* didn't set pd_lower on pre-9.4 versions. The page might've been
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index c783838..1028db4 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -501,6 +501,8 @@ gistXLogSplit(bool page_is_leaf,
XLogRecPtr recptr;
int i;
+ memset(&xlrec, 0, sizeof(xlrec));
+
for (ptr = dist; ptr; ptr = ptr->next)
npage++;
@@ -671,6 +673,8 @@ gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete,
gistxlogDelete xlrec;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
+
xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.ntodelete = ntodelete;
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 0cefbac..6f30bd0 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -426,6 +426,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf)
xl_hash_vacuum_one_page xlrec;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(hrel);
xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.ntuples = ndeletable;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 8f1c11a..b11dba8 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2428,6 +2428,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
int npages = 0;
int npages_used = 0;
+ memset(&scratch, 0, sizeof(scratch));
+
/* currently not needed (thus unsupported) for heap_multi_insert() */
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
@@ -6654,6 +6656,7 @@ heap_inplace_update_and_unlock(Relation relation,
BlockNumber blkno;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
xlrec.dbId = MyDatabaseId;
xlrec.tsId = MyDatabaseTableSpace;
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 6beeb69..54b4a5a 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -2058,6 +2058,7 @@ heap_log_freeze_cmp(const void *arg1, const void *arg2)
static inline void
heap_log_freeze_new_plan(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
{
+ memset(plan, 0, sizeof(*plan));
plan->xmax = frz->xmax;
plan->t_infomask2 = frz->t_infomask2;
plan->t_infomask = frz->t_infomask;
@@ -2182,7 +2183,9 @@ log_heap_prune_and_freeze(Relation relation, Buffer buffer,
Assert((vmflags & VISIBILITYMAP_VALID_BITS) == vmflags);
- xlrec.flags = 0;
+ memset(&xlrec, 0, sizeof(xlrec));
+ memset(&freeze_plans, 0, sizeof(freeze_plans));
+
regbuf_flags_heap = REGBUF_STANDARD;
/*
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 6b19ac3..6c94ae1 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -842,6 +842,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
else
dboid = MyDatabaseId;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.num_mappings = num_mappings;
xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
xlrec.mapped_xid = src->xid;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 91bb37d..87ac75b 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1337,6 +1337,7 @@ _bt_insertonpg(Relation rel,
XLogRecPtr recptr;
uint16 upostingoff;
+ memset(&xlmeta, 0, sizeof(xlmeta));
xlrec.offnum = newitemoff;
XLogBeginInsert();
@@ -2602,6 +2603,7 @@ _bt_newlevel(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf)
XLogRecPtr recptr;
xl_btree_metadata md;
+ memset(&md, 0, sizeof(md));
xlrec.rootblk = rootblknum;
xlrec.level = metad->btm_level;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 9aa7806..1fb6ced 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -288,6 +288,7 @@ _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
xl_btree_metadata md;
XLogRecPtr recptr;
+ memset(&md, 0, sizeof(md));
XLogBeginInsert();
XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
@@ -476,6 +477,7 @@ _bt_getroot(Relation rel, Relation heaprel, int access)
XLogRecPtr recptr;
xl_btree_metadata md;
+ memset(&md, 0, sizeof(md));
XLogBeginInsert();
XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
@@ -2255,6 +2257,7 @@ _bt_mark_page_halfdead(Relation rel, Relation heaprel, Buffer leafbuf,
xl_btree_mark_page_halfdead xlrec;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.poffset = poffset;
xlrec.leafblk = leafblkno;
if (topparent != leafblkno)
@@ -2678,6 +2681,8 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
uint8 xlinfo;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
+ memset(&xlmeta, 0, sizeof(xlmeta));
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c
index 7c7371c..4cc4768 100644
--- a/src/backend/access/spgist/spgdoinsert.c
+++ b/src/backend/access/spgist/spgdoinsert.c
@@ -403,6 +403,8 @@ moveLeafs(Relation index, SpGistState *state,
char *leafdata,
*leafptr;
+ memset(&xlrec, 0, sizeof(xlrec));
+
/* This doesn't work on root page */
Assert(parent->buffer != InvalidBuffer);
Assert(parent->buffer != current->buffer);
@@ -710,6 +712,8 @@ doPickSplit(Relation index, SpGistState *state,
nToInsert,
maxToInclude;
+ memset(&xlrec, 0, sizeof(xlrec));
+
in.level = level;
/*
@@ -1514,6 +1518,8 @@ spgAddNodeAction(Relation index, SpGistState *state,
SpGistInnerTuple newInnerTuple;
spgxlogAddNode xlrec;
+ memset(&xlrec, 0, sizeof(xlrec));
+
/* Should not be applied to nulls */
Assert(!SpGistPageStoresNulls(current->page));
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 6b7117b..9e6acb8 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -140,6 +140,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
OffsetNumber i,
max = PageGetMaxOffsetNumber(page);
+ memset(&xlrec, 0, sizeof(xlrec));
memset(predecessor, 0, sizeof(predecessor));
memset(deletable, 0, sizeof(deletable));
nDeletable = 0;
@@ -414,6 +415,8 @@ vacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer)
OffsetNumber i,
max = PageGetMaxOffsetNumber(page);
+ memset(&xlrec, 0, sizeof(xlrec));
+
xlrec.nDelete = 0;
/* Scan page, identify tuples to delete, accumulate stats */
@@ -505,6 +508,8 @@ vacuumRedirectAndPlaceholder(Relation index, Relation heaprel, Buffer buffer)
spgxlogVacuumRedirect xlrec;
GlobalVisState *vistest;
+ memset(&xlrec, 0, sizeof(xlrec));
+
xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
xlrec.nToPlaceholder = 0;
xlrec.snapshotConflictHorizon = InvalidTransactionId;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 55b9f38..4406e34 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1040,6 +1040,9 @@ save_state_data(const void *data, uint32 len)
}
memcpy(records.tail->data + records.tail->len, data, len);
+ /* Zero MAXALIGN padding to avoid uninitialized bytes in WAL */
+ if (padlen > len)
+ memset(records.tail->data + records.tail->len + len, 0, padlen - len);
records.tail->len += padlen;
records.bytes_free -= padlen;
records.total_len += padlen;
@@ -1077,6 +1080,7 @@ StartPrepare(GlobalTransaction gxact)
records.total_len = 0;
/* Create header */
+ memset(&hdr, 0, sizeof(hdr));
hdr.magic = TWOPHASE_MAGIC;
hdr.total_len = 0; /* EndPrepare will fill this in */
hdr.xid = xid;
@@ -1275,6 +1279,7 @@ RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info,
{
TwoPhaseRecordOnDisk record;
+ memset(&record, 0, sizeof(record));
record.rmid = rmid;
record.info = info;
record.len = len;
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b9b678f..f5b3616 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -5149,6 +5149,7 @@ BootStrapXLOG(uint32 data_checksum_version)
* segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
* used, so that we can use 0/0 to mean "before any valid WAL segment".
*/
+ memset(&checkPoint, 0, sizeof(checkPoint));
checkPoint.redo = wal_segment_size + SizeOfXLogLongPHD;
checkPoint.ThisTimeLineID = BootstrapTimeLineID;
checkPoint.PrevTimeLineID = BootstrapTimeLineID;
@@ -8252,6 +8253,7 @@ XLogReportParameters(void)
xl_parameter_change xlrec;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.MaxConnections = MaxConnections;
xlrec.max_worker_processes = max_worker_processes;
xlrec.max_wal_senders = max_wal_senders;
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index cd6d720..9b5ab9c 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2318,6 +2318,8 @@ ExecuteTruncateGuts(List *explicit_rels,
xl_heap_truncate xlrec;
int i = 0;
+ memset(&xlrec, 0, sizeof(xlrec));
+
/* should only get here if effective_wal_level is 'logical' */
Assert(XLogLogicalInfoActive());
diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c
index 06825d6..64a41c3 100644
--- a/src/backend/replication/logical/message.c
+++ b/src/backend/replication/logical/message.c
@@ -46,6 +46,8 @@ LogLogicalMessage(const char *prefix, const char *message, size_t size,
xl_logical_message xlrec;
XLogRecPtr lsn;
+ memset(&xlrec, 0, sizeof(xlrec));
+
/*
* Force xid to be allocated if we're emitting a transactional message.
*/
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 26afd8f..df971f5 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -1011,6 +1011,7 @@ replorigin_advance(ReplOriginId node,
{
xl_replorigin_set xlrec;
+ memset(&xlrec, 0, sizeof(xlrec));
xlrec.remote_lsn = remote_commit;
xlrec.node_id = node;
xlrec.force = go_backward;
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index f3ad90c..4ae5be2 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1357,6 +1357,8 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
xl_running_xacts xlrec;
XLogRecPtr recptr;
+ memset(&xlrec, 0, sizeof(xlrec));
+
xlrec.xcnt = CurrRunningXacts->xcnt;
xlrec.subxcnt = CurrRunningXacts->subxcnt;
xlrec.subxid_overflow = (CurrRunningXacts->subxid_status != SUBXIDS_IN_ARRAY);
diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c
index bc8c43b..25d329e 100644
--- a/src/backend/utils/activity/pgstat_relation.c
+++ b/src/backend/utils/activity/pgstat_relation.c
@@ -695,6 +695,7 @@ AtPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state)
PgStat_TableStatus *tabstat PG_USED_FOR_ASSERTS_ONLY;
TwoPhasePgStatRecord record;
+ memset(&record, 0, sizeof(record));
Assert(trans->nest_level == 1);
Assert(trans->upper == NULL);
tabstat = trans->parent;
diff --git a/src/tools/valgrind.supp b/src/tools/valgrind.supp
index d56794b..f2ab0c8 100644
--- a/src/tools/valgrind.supp
+++ b/src/tools/valgrind.supp
@@ -23,23 +23,6 @@
fun:pgstat_write_statsfiles
}
-{
- padding_XLogRecData_CRC
- Memcheck:Value8
-
- fun:pg_comp_crc32c*
- fun:XLogRecordAssemble
-}
-
-{
- padding_XLogRecData_write
- Memcheck:Param
- pwrite64(buf)
-
- ...
- fun:XLogWrite
-}
-
{
padding_relcache
Memcheck:Param
diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out
index 0403540..258d501 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -333,6 +333,25 @@ ROLLBACK;
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
CHECKPOINT;
VACUUM hash_cleanup_heap;
+-- Test insert-driven cleanup of dead index tuples (_hash_vacuum_one_page).
+TRUNCATE hash_cleanup_heap;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+DELETE FROM hash_cleanup_heap
+ WHERE ctid IN ('(0,5)','(0,10)','(0,15)','(0,20)','(0,25)',
+ '(0,30)','(0,35)','(0,40)','(0,45)','(0,50)');
+SET enable_seqscan = off;
+SET enable_bitmapscan = off;
+SELECT count(*) FROM hash_cleanup_heap WHERE keycol = 1;
+ count
+-------
+ 990
+(1 row)
+
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 200) as i;
+RESET enable_seqscan;
+RESET enable_bitmapscan;
-- Clean up.
DROP TABLE hash_cleanup_heap;
-- Index on temp table.
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 60571f6..1c85d98 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -314,6 +314,21 @@ INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
CHECKPOINT;
VACUUM hash_cleanup_heap;
+-- Test insert-driven cleanup of dead index tuples (_hash_vacuum_one_page).
+TRUNCATE hash_cleanup_heap;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+DELETE FROM hash_cleanup_heap
+ WHERE ctid IN ('(0,5)','(0,10)','(0,15)','(0,20)','(0,25)',
+ '(0,30)','(0,35)','(0,40)','(0,45)','(0,50)');
+SET enable_seqscan = off;
+SET enable_bitmapscan = off;
+SELECT count(*) FROM hash_cleanup_heap WHERE keycol = 1;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 200) as i;
+RESET enable_seqscan;
+RESET enable_bitmapscan;
+
-- Clean up.
DROP TABLE hash_cleanup_heap;