On Thu, Nov 28, 2013 at 05:38:05PM -0500, Bruce Momjian wrote: > > I wonder if we ought to mark each page as all-visible in > > raw_heap_insert() when we first initialize it, and then clear the flag > > when we come across a tuple that isn't all-visible. We could try to > > set hint bits on the tuple before placing it on the page, too, though > > I'm not sure of the details. > > I went with the per-page approach because I wanted to re-use the vacuum > lazy function. Is there some other code that does this already? I am > trying to avoid yet-another set of routines that would need to be > maintained or could be buggy. This hit bit setting is tricky. > > And thanks much for the review!
So, should I put this in the next commit fest? I still have an unknown about the buffer number to use here: ! /* XXX use 0 or real offset? */ ! ItemPointerSet(&(tuple.t_self), BufferIsValid(buf) ? ! BufferGetBlockNumber(buf) : 0, offnum); Is everyone else OK with this approach? Updated patch attached. -- Bruce Momjian <br...@momjian.us> http://momjian.us EnterpriseDB http://enterprisedb.com + Everyone has their own god. +
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c new file mode 100644 index 951894c..44ae5d8 *** a/src/backend/access/heap/rewriteheap.c --- b/src/backend/access/heap/rewriteheap.c *************** *** 107,112 **** --- 107,114 ---- #include "access/rewriteheap.h" #include "access/transam.h" #include "access/tuptoaster.h" + #include "access/visibilitymap.h" + #include "commands/vacuum.h" #include "storage/bufmgr.h" #include "storage/smgr.h" #include "utils/memutils.h" *************** typedef OldToNewMappingData *OldToNewMap *** 172,177 **** --- 174,180 ---- /* prototypes for internal functions */ static void raw_heap_insert(RewriteState state, HeapTuple tup); + static void update_page_vm(Relation relation, Page page, BlockNumber blkno); /* *************** end_heap_rewrite(RewriteState state) *** 280,285 **** --- 283,289 ---- state->rs_buffer); RelationOpenSmgr(state->rs_new_rel); + update_page_vm(state->rs_new_rel, state->rs_buffer, state->rs_blockno); PageSetChecksumInplace(state->rs_buffer, state->rs_blockno); smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno, *************** raw_heap_insert(RewriteState state, Heap *** 632,637 **** --- 636,642 ---- */ RelationOpenSmgr(state->rs_new_rel); + update_page_vm(state->rs_new_rel, page, state->rs_blockno); PageSetChecksumInplace(page, state->rs_blockno); smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, *************** raw_heap_insert(RewriteState state, Heap *** 677,679 **** --- 682,704 ---- if (heaptup != tup) heap_freetuple(heaptup); } + + static void + update_page_vm(Relation relation, Page page, BlockNumber blkno) + { + Buffer vmbuffer = InvalidBuffer; + TransactionId visibility_cutoff_xid; + + visibilitymap_pin(relation, blkno, &vmbuffer); + Assert(BufferIsValid(vmbuffer)); + + if (!visibilitymap_test(relation, blkno, &vmbuffer) && + heap_page_is_all_visible(relation, InvalidBuffer, page, + &visibility_cutoff_xid)) + { + PageSetAllVisible(page); + visibilitymap_set(relation, blkno, InvalidBlockNumber, + InvalidXLogRecPtr, vmbuffer, visibility_cutoff_xid); + } + ReleaseBuffer(vmbuffer); + } diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c new file mode 100644 index 7f40d89..a42511b *** a/src/backend/access/heap/visibilitymap.c --- b/src/backend/access/heap/visibilitymap.c *************** visibilitymap_set(Relation rel, BlockNum *** 278,284 **** map[mapByte] |= (1 << mapBit); MarkBufferDirty(vmBuf); ! if (RelationNeedsWAL(rel)) { if (XLogRecPtrIsInvalid(recptr)) { --- 278,284 ---- map[mapByte] |= (1 << mapBit); MarkBufferDirty(vmBuf); ! if (RelationNeedsWAL(rel) && BufferIsValid(heapBuf)) { if (XLogRecPtrIsInvalid(recptr)) { diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c new file mode 100644 index fe2d9e7..4f6578f *** a/src/backend/commands/vacuumlazy.c --- b/src/backend/commands/vacuumlazy.c *************** static void lazy_record_dead_tuple(LVRel *** 151,158 **** ItemPointer itemptr); static bool lazy_tid_reaped(ItemPointer itemptr, void *state); static int vac_cmp_itemptr(const void *left, const void *right); - static bool heap_page_is_all_visible(Relation rel, Buffer buf, - TransactionId *visibility_cutoff_xid); /* --- 151,156 ---- *************** lazy_vacuum_page(Relation onerel, BlockN *** 1197,1203 **** * check if the page has become all-visible. */ if (!visibilitymap_test(onerel, blkno, vmbuffer) && ! heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid)) { Assert(BufferIsValid(*vmbuffer)); PageSetAllVisible(page); --- 1195,1201 ---- * check if the page has become all-visible. */ if (!visibilitymap_test(onerel, blkno, vmbuffer) && ! heap_page_is_all_visible(onerel, buffer, NULL, &visibility_cutoff_xid)) { Assert(BufferIsValid(*vmbuffer)); PageSetAllVisible(page); *************** vac_cmp_itemptr(const void *left, const *** 1704,1717 **** * transactions. Also return the visibility_cutoff_xid which is the highest * xmin amongst the visible tuples. */ ! static bool ! heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid) { - Page page = BufferGetPage(buf); OffsetNumber offnum, maxoff; bool all_visible = true; *visibility_cutoff_xid = InvalidTransactionId; /* --- 1702,1718 ---- * transactions. Also return the visibility_cutoff_xid which is the highest * xmin amongst the visible tuples. */ ! bool ! heap_page_is_all_visible(Relation rel, Buffer buf, Page page, ! TransactionId *visibility_cutoff_xid) { OffsetNumber offnum, maxoff; bool all_visible = true; + if (BufferIsValid(buf)) + page = BufferGetPage(buf); + *visibility_cutoff_xid = InvalidTransactionId; /* *************** heap_page_is_all_visible(Relation rel, B *** 1732,1738 **** if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid)) continue; ! ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum); /* * Dead line pointers can have index pointers pointing to them. So --- 1733,1741 ---- if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid)) continue; ! /* XXX use 0 or real offset? */ ! ItemPointerSet(&(tuple.t_self), BufferIsValid(buf) ? ! BufferGetBlockNumber(buf) : 0, offnum); /* * Dead line pointers can have index pointers pointing to them. So diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c new file mode 100644 index 1ebc5ff..c53e2fb *** a/src/backend/utils/time/tqual.c --- b/src/backend/utils/time/tqual.c *************** static inline void *** 108,113 **** --- 108,117 ---- SetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid) { + /* we might not have a buffer if we are doing raw_heap_insert() */ + if (!BufferIsValid(buffer)) + return; + if (TransactionIdIsValid(xid)) { /* NB: xid must be known committed here! */ diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h new file mode 100644 index 44a3c3b..7e3ff9c *** a/src/include/commands/vacuum.h --- b/src/include/commands/vacuum.h *************** *** 19,24 **** --- 19,25 ---- #include "catalog/pg_type.h" #include "nodes/parsenodes.h" #include "storage/buf.h" + #include "storage/bufpage.h" #include "storage/lock.h" #include "utils/relcache.h" *************** extern void vacuum_delay_point(void); *** 168,173 **** --- 169,176 ---- /* in commands/vacuumlazy.c */ extern void lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy); + extern bool heap_page_is_all_visible(Relation rel, Buffer buf, Page page, + TransactionId *visibility_cutoff_xid); /* in commands/analyze.c */ extern void analyze_rel(Oid relid, VacuumStmt *vacstmt,
-- Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org) To make changes to your subscription: http://www.postgresql.org/mailpref/pgsql-hackers