On 08/13/2014 04:15 PM, Heikki Linnakangas wrote:
Hmm, thinking about this some more, there is one sensible way to split
this patch: We can add the XLogReplayBuffer() function and rewrite all
the redo routines to use it, without changing any WAL record formats or
anything in the way the WAL records are constructed. In the patch,
XLogReplayBuffer() takes one input arument, the block reference ID, and
it fetches the RelFileNode and BlockNumber of the block based on that.
Without the WAL format changes, the information isn't there in the
record, but we can require the callers to pass the RelFileNode and
BlockNumber. The final patch will remove those arguments from every
caller, but that's a very mechanical change.

As in the attached patch. I only modified the heapam redo routines to
use the new XLogReplayBuffer() idiom; the idea is to do that for every
redo routine.

After applying such a patch, the main WAL format changing patch becomes
much smaller, and makes it easier to see from the redo routines where
significant changes to the WAL record formats have been made. This also
allows us to split the bikeshedding; we can discuss the name of
XLogReplayBuffer() first :-).

Here's a full version of this refactoring patch, all the rmgr's have now been updated to use XLogReplayBuffer(). I think this is a worthwhile change on its own, even if we drop the ball on the rest of the WAL format patch, because it makes the redo-routines more readable. I propose to commit this as soon as someone has reviewed it, and we agree on a for what's currently called XLogReplayBuffer(). I have tested this with my page-image comparison tool.

- Heikki

commit 4c6c7571e91f34919aff2c2981fe474537dc557b
Author: Heikki Linnakangas <heikki.linnakan...@iki.fi>
Date:   Wed Aug 13 15:39:08 2014 +0300

    Refactor redo routines to use XLogReplayBuffer

diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index ffabf4e..acbb840 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -20,25 +20,23 @@
 static MemoryContext opCtx;		/* working memory for operations */
 
 static void
-ginRedoClearIncompleteSplit(XLogRecPtr lsn, RelFileNode node, BlockNumber blkno)
+ginRedoClearIncompleteSplit(XLogRecPtr lsn, int block_index,
+							RelFileNode node, BlockNumber blkno)
 {
 	Buffer		buffer;
 	Page		page;
 
-	buffer = XLogReadBuffer(node, blkno, false);
-	if (!BufferIsValid(buffer))
-		return;					/* page was deleted, nothing to do */
-	page = (Page) BufferGetPage(buffer);
-
-	if (lsn > PageGetLSN(page))
+	if (XLogReplayBuffer(block_index, node, blkno, &buffer) == BLK_NEEDS_REDO)
 	{
+		page = (Page) BufferGetPage(buffer);
+
 		GinPageGetOpaque(page)->flags &= ~GIN_INCOMPLETE_SPLIT;
 
 		PageSetLSN(page, lsn);
 		MarkBufferDirty(buffer);
 	}
-
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -351,26 +349,14 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
 		rightChildBlkno = BlockIdGetBlockNumber((BlockId) payload);
 		payload += sizeof(BlockIdData);
 
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
-			ginRedoClearIncompleteSplit(lsn, data->node, leftChildBlkno);
+		ginRedoClearIncompleteSplit(lsn, 0, data->node, leftChildBlkno);
 	}
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(isLeaf ? 0 : 1))
+	if (XLogReplayBuffer(isLeaf ? 0 : 1,
+						 data->node, data->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		(void) RestoreBackupBlock(lsn, record, isLeaf ? 0 : 1, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(data->node, data->blkno, false);
-	if (!BufferIsValid(buffer))
-		return;					/* page was deleted, nothing to do */
-	page = (Page) BufferGetPage(buffer);
+		page = BufferGetPage(buffer);
 
-	if (lsn > PageGetLSN(page))
-	{
 		/* How to insert the payload is tree-type specific */
 		if (data->flags & GIN_INSERT_ISDATA)
 		{
@@ -386,8 +372,8 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
 		PageSetLSN(page, lsn);
 		MarkBufferDirty(buffer);
 	}
-
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -476,12 +462,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
 	 * split
 	 */
 	if (!isLeaf)
-	{
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
-			ginRedoClearIncompleteSplit(lsn, data->node, data->leftChildBlkno);
-	}
+		ginRedoClearIncompleteSplit(lsn, 0, data->node, data->leftChildBlkno);
 
 	flags = 0;
 	if (isLeaf)
@@ -607,29 +588,19 @@ ginRedoVacuumDataLeafPage(XLogRecPtr lsn, XLogRecord *record)
 	Buffer		buffer;
 	Page		page;
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
+	if (XLogReplayBuffer(0, xlrec->node, xlrec->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(xlrec->node, xlrec->blkno, false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
+		page = (Page) BufferGetPage(buffer);
 
-	Assert(GinPageIsLeaf(page));
-	Assert(GinPageIsData(page));
+		Assert(GinPageIsLeaf(page));
+		Assert(GinPageIsData(page));
 
-	if (lsn > PageGetLSN(page))
-	{
 		ginRedoRecompress(page, &xlrec->data);
 		PageSetLSN(page, lsn);
 		MarkBufferDirty(buffer);
 	}
-
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -641,62 +612,39 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
 	Buffer		lbuffer;
 	Page		page;
 
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		dbuffer = RestoreBackupBlock(lsn, record, 0, false, true);
-	else
+	if (XLogReplayBuffer(0, data->node, data->blkno, &dbuffer) == BLK_NEEDS_REDO)
 	{
-		dbuffer = XLogReadBuffer(data->node, data->blkno, false);
-		if (BufferIsValid(dbuffer))
-		{
-			page = BufferGetPage(dbuffer);
-			if (lsn > PageGetLSN(page))
-			{
-				Assert(GinPageIsData(page));
-				GinPageGetOpaque(page)->flags = GIN_DELETED;
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(dbuffer);
-			}
-		}
+		page = BufferGetPage(dbuffer);
+
+		Assert(GinPageIsData(page));
+		GinPageGetOpaque(page)->flags = GIN_DELETED;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(dbuffer);
 	}
 
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-		pbuffer = RestoreBackupBlock(lsn, record, 1, false, true);
-	else
+	if (XLogReplayBuffer(1, data->node, data->parentBlkno, &pbuffer) == BLK_NEEDS_REDO)
 	{
-		pbuffer = XLogReadBuffer(data->node, data->parentBlkno, false);
-		if (BufferIsValid(pbuffer))
-		{
-			page = BufferGetPage(pbuffer);
-			if (lsn > PageGetLSN(page))
-			{
-				Assert(GinPageIsData(page));
-				Assert(!GinPageIsLeaf(page));
-				GinPageDeletePostingItem(page, data->parentOffset);
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(pbuffer);
-			}
-		}
+		page = BufferGetPage(pbuffer);
+
+		Assert(GinPageIsData(page));
+		Assert(!GinPageIsLeaf(page));
+		GinPageDeletePostingItem(page, data->parentOffset);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(pbuffer);
 	}
 
-	if (record->xl_info & XLR_BKP_BLOCK(2))
-		(void) RestoreBackupBlock(lsn, record, 2, false, false);
-	else if (data->leftBlkno != InvalidBlockNumber)
+	if (XLogReplayBuffer(2, data->node, data->leftBlkno, &lbuffer) == BLK_NEEDS_REDO)
 	{
-		lbuffer = XLogReadBuffer(data->node, data->leftBlkno, false);
-		if (BufferIsValid(lbuffer))
-		{
-			page = BufferGetPage(lbuffer);
-			if (lsn > PageGetLSN(page))
-			{
-				Assert(GinPageIsData(page));
-				GinPageGetOpaque(page)->rightlink = data->rightLink;
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(lbuffer);
-			}
-			UnlockReleaseBuffer(lbuffer);
-		}
+		page = BufferGetPage(lbuffer);
+
+		Assert(GinPageIsData(page));
+		GinPageGetOpaque(page)->rightlink = data->rightLink;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(lbuffer);
 	}
 
+	if (BufferIsValid(lbuffer))
+		UnlockReleaseBuffer(lbuffer);
 	if (BufferIsValid(pbuffer))
 		UnlockReleaseBuffer(pbuffer);
 	if (BufferIsValid(dbuffer))
@@ -730,74 +678,61 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
 		/*
 		 * insert into tail page
 		 */
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
+		if (XLogReplayBuffer(0, data->node, data->metadata.tail, &buffer) == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(data->node, data->metadata.tail, false);
-			if (BufferIsValid(buffer))
-			{
-				Page		page = BufferGetPage(buffer);
+			Page		page = BufferGetPage(buffer);
+			OffsetNumber off;
+			int			i;
+			Size		tupsize;
+			IndexTuple	tuples;
 
-				if (lsn > PageGetLSN(page))
-				{
-					OffsetNumber l,
-								off = (PageIsEmpty(page)) ? FirstOffsetNumber :
-					OffsetNumberNext(PageGetMaxOffsetNumber(page));
-					int			i,
-								tupsize;
-					IndexTuple	tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
+			tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
 
-					for (i = 0; i < data->ntuples; i++)
-					{
-						tupsize = IndexTupleSize(tuples);
+			if (PageIsEmpty(page))
+				off = FirstOffsetNumber;
+			else
+				off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
 
-						l = PageAddItem(page, (Item) tuples, tupsize, off, false, false);
+			for (i = 0; i < data->ntuples; i++)
+			{
+				tupsize = IndexTupleSize(tuples);
 
-						if (l == InvalidOffsetNumber)
-							elog(ERROR, "failed to add item to index page");
+				if (PageAddItem(page, (Item) tuples, tupsize, off,
+								false, false) == InvalidOffsetNumber)
+					elog(ERROR, "failed to add item to index page");
 
-						tuples = (IndexTuple) (((char *) tuples) + tupsize);
+				tuples = (IndexTuple) (((char *) tuples) + tupsize);
 
-						off++;
-					}
+				off++;
+			}
 
-					/*
-					 * Increase counter of heap tuples
-					 */
-					GinPageGetOpaque(page)->maxoff++;
+			/*
+			 * Increase counter of heap tuples
+			 */
+			GinPageGetOpaque(page)->maxoff++;
 
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 	else if (data->prevTail != InvalidBlockNumber)
 	{
 		/*
 		 * New tail
 		 */
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
+		if (XLogReplayBuffer(0, data->node, data->prevTail, &buffer) == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(data->node, data->prevTail, false);
-			if (BufferIsValid(buffer))
-			{
-				Page		page = BufferGetPage(buffer);
+			Page		page = BufferGetPage(buffer);
 
-				if (lsn > PageGetLSN(page))
-				{
-					GinPageGetOpaque(page)->rightlink = data->newRightlink;
+			GinPageGetOpaque(page)->rightlink = data->newRightlink;
 
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 
 	UnlockReleaseBuffer(metabuffer);
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 7d36b2a..e921777 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -48,31 +48,26 @@ gistRedoClearFollowRight(XLogRecPtr lsn, XLogRecord *record, int block_index,
 {
 	Buffer		buffer;
 	Page		page;
-
-	if (record->xl_info & XLR_BKP_BLOCK(block_index))
-		buffer = RestoreBackupBlock(lsn, record, block_index, false, true);
-	else
-	{
-		buffer = XLogReadBuffer(node, childblkno, false);
-		if (!BufferIsValid(buffer))
-			return;				/* page was deleted, nothing to do */
-	}
-	page = (Page) BufferGetPage(buffer);
+	XLogReplayResult rc;
 
 	/*
-	 * Note that we still update the page even if page LSN is equal to the LSN
-	 * of this record, because the updated NSN is not included in the full
-	 * page image.
+	 * Note that we still update the page even if it was restored from a
+	 * full page image, because the updated NSN is not included in the
+	 * image.
 	 */
-	if (lsn >= PageGetLSN(page))
+	rc = XLogReplayBuffer(block_index, node, childblkno, &buffer);
+	if (rc == BLK_NEEDS_REDO || rc == BLK_RESTORED)
 	{
+		page = BufferGetPage(buffer);
+
 		GistPageSetNSN(page, lsn);
 		GistClearFollowRight(page);
 
 		PageSetLSN(page, lsn);
 		MarkBufferDirty(buffer);
 	}
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 /*
@@ -87,104 +82,85 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
 	Page		page;
 	char	   *data;
 
-	/*
-	 * We need to acquire and hold lock on target page while updating the left
-	 * child page.  If we have a full-page image of target page, getting the
-	 * lock is a side-effect of restoring that image.  Note that even if the
-	 * target page no longer exists, we'll still attempt to replay the change
-	 * on the child page.
-	 */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		buffer = RestoreBackupBlock(lsn, record, 0, false, true);
-	else
-		buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
-
-	/* Fix follow-right data on left child page */
-	if (BlockNumberIsValid(xldata->leftchild))
-		gistRedoClearFollowRight(lsn, record, 1,
-								 xldata->node, xldata->leftchild);
-
-	/* Done if target page no longer exists */
-	if (!BufferIsValid(buffer))
-		return;
-
-	/* nothing more to do if page was backed up (and no info to do it with) */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
+	if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
-
-	page = (Page) BufferGetPage(buffer);
-
-	/* nothing more to do if change already applied */
-	if (lsn <= PageGetLSN(page))
-	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
-
-	data = begin + sizeof(gistxlogPageUpdate);
+		page = (Page) BufferGetPage(buffer);
 
-	/* Delete old tuples */
-	if (xldata->ntodelete > 0)
-	{
-		int			i;
-		OffsetNumber *todelete = (OffsetNumber *) data;
+		data = begin + sizeof(gistxlogPageUpdate);
 
-		data += sizeof(OffsetNumber) * xldata->ntodelete;
+		/* Delete old tuples */
+		if (xldata->ntodelete > 0)
+		{
+			int			i;
+			OffsetNumber *todelete = (OffsetNumber *) data;
 
-		for (i = 0; i < xldata->ntodelete; i++)
-			PageIndexTupleDelete(page, todelete[i]);
-		if (GistPageIsLeaf(page))
-			GistMarkTuplesDeleted(page);
-	}
+			data += sizeof(OffsetNumber) * xldata->ntodelete;
 
-	/* add tuples */
-	if (data - begin < record->xl_len)
-	{
-		OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
-		OffsetNumberNext(PageGetMaxOffsetNumber(page));
+			for (i = 0; i < xldata->ntodelete; i++)
+				PageIndexTupleDelete(page, todelete[i]);
+			if (GistPageIsLeaf(page))
+				GistMarkTuplesDeleted(page);
+		}
 
-		while (data - begin < record->xl_len)
+		/* add tuples */
+		if (data - begin < record->xl_len)
 		{
-			IndexTuple	itup = (IndexTuple) data;
-			Size		sz = IndexTupleSize(itup);
-			OffsetNumber l;
-
-			data += sz;
+			OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
+				OffsetNumberNext(PageGetMaxOffsetNumber(page));
+
+			while (data - begin < record->xl_len)
+			{
+				IndexTuple	itup = (IndexTuple) data;
+				Size		sz = IndexTupleSize(itup);
+				OffsetNumber l;
+
+				data += sz;
+
+				l = PageAddItem(page, (Item) itup, sz, off, false, false);
+				if (l == InvalidOffsetNumber)
+					elog(ERROR, "failed to add item to GiST index page, size %d bytes",
+						 (int) sz);
+				off++;
+			}
+		}
+		else
+		{
+			/*
+			 * special case: leafpage, nothing to insert, nothing to delete,
+			 * then vacuum marks page
+			 */
+			if (GistPageIsLeaf(page) && xldata->ntodelete == 0)
+				GistClearTuplesDeleted(page);
+		}
 
-			l = PageAddItem(page, (Item) itup, sz, off, false, false);
-			if (l == InvalidOffsetNumber)
-				elog(ERROR, "failed to add item to GiST index page, size %d bytes",
-					 (int) sz);
-			off++;
+		if (!GistPageIsLeaf(page) &&
+			PageGetMaxOffsetNumber(page) == InvalidOffsetNumber &&
+			xldata->blkno == GIST_ROOT_BLKNO)
+		{
+			/*
+			 * all links on non-leaf root page was deleted by vacuum full, so
+			 * root page becomes a leaf
+			 */
+			GistPageSetLeaf(page);
 		}
-	}
-	else
-	{
-		/*
-		 * special case: leafpage, nothing to insert, nothing to delete, then
-		 * vacuum marks page
-		 */
-		if (GistPageIsLeaf(page) && xldata->ntodelete == 0)
-			GistClearTuplesDeleted(page);
-	}
 
-	if (!GistPageIsLeaf(page) &&
-		PageGetMaxOffsetNumber(page) == InvalidOffsetNumber &&
-		xldata->blkno == GIST_ROOT_BLKNO)
-	{
-		/*
-		 * all links on non-leaf root page was deleted by vacuum full, so root
-		 * page becomes a leaf
-		 */
-		GistPageSetLeaf(page);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+	/*
+	 * Fix follow-right data on left child page
+	 *
+	 * This must be done while still holding the lock on the target page.
+	 * Note that even if the target page no longer exists, we'll still attempt
+	 * to replay the change on the child page.
+	 */
+	if (BlockNumberIsValid(xldata->leftchild))
+		gistRedoClearFollowRight(lsn, record, 1,
+								 xldata->node, xldata->leftchild);
+
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d731f98..bf863af 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -7137,15 +7137,13 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
 {
 	xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
 	Buffer		buffer;
-	Page		page;
-	OffsetNumber *end;
-	OffsetNumber *redirected;
-	OffsetNumber *nowdead;
-	OffsetNumber *nowunused;
-	int			nredirected;
-	int			ndead;
-	int			nunused;
-	Size		freespace;
+	Size		freespace = 0;
+	RelFileNode	rnode;
+	BlockNumber	blkno;
+	XLogReplayResult rc;
+
+	rnode = xlrec->node;
+	blkno = xlrec->block;
 
 	/*
 	 * We're about to remove tuples. In Hot Standby mode, ensure that there's
@@ -7156,65 +7154,62 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
 	 * latestRemovedXid is invalid, skip conflict processing.
 	 */
 	if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
-		ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
-											xlrec->node);
+		ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
 
 	/*
 	 * If we have a full-page image, restore it (using a cleanup lock) and
 	 * we're done.
 	 */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, true, false);
-		return;
-	}
+	rc = XLogReplayBufferExtended(0, rnode, MAIN_FORKNUM, blkno,
+								  RBM_NORMAL, true, &buffer);
+	if (rc == BLK_NEEDS_REDO)
+	{
+		Page		page = (Page) BufferGetPage(buffer);
+		OffsetNumber *end;
+		OffsetNumber *redirected;
+		OffsetNumber *nowdead;
+		OffsetNumber *nowunused;
+		int			nredirected;
+		int			ndead;
+		int			nunused;
+
+		nredirected = xlrec->nredirected;
+		ndead = xlrec->ndead;
+		end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
+		redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
+		nowdead = redirected + (nredirected * 2);
+		nowunused = nowdead + ndead;
+		nunused = (end - nowunused);
+		Assert(nunused >= 0);
+
+		/* Update all item pointers per the record, and repair fragmentation */
+		heap_page_prune_execute(buffer,
+								redirected, nredirected,
+								nowdead, ndead,
+								nowunused, nunused);
+
+		freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
 
-	buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
-	if (!BufferIsValid(buffer))
-		return;
-	LockBufferForCleanup(buffer);
-	page = (Page) BufferGetPage(buffer);
+		/*
+		 * Note: we don't worry about updating the page's prunability hints. At
+		 * worst this will cause an extra prune cycle to occur soon.
+		 */
 
-	if (lsn <= PageGetLSN(page))
-	{
-		UnlockReleaseBuffer(buffer);
-		return;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
-
-	nredirected = xlrec->nredirected;
-	ndead = xlrec->ndead;
-	end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
-	redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
-	nowdead = redirected + (nredirected * 2);
-	nowunused = nowdead + ndead;
-	nunused = (end - nowunused);
-	Assert(nunused >= 0);
-
-	/* Update all item pointers per the record, and repair fragmentation */
-	heap_page_prune_execute(buffer,
-							redirected, nredirected,
-							nowdead, ndead,
-							nowunused, nunused);
-
-	freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
-
-	/*
-	 * Note: we don't worry about updating the page's prunability hints. At
-	 * worst this will cause an extra prune cycle to occur soon.
-	 */
-
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/*
 	 * Update the FSM as well.
 	 *
-	 * XXX: We don't get here if the page was restored from full page image.
+	 * XXX: Don't do this if the page was restored from full page image.
 	 * We don't bother to update the FSM in that case, it doesn't need to be
 	 * totally accurate anyway.
 	 */
-	XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
+	if (rc == BLK_NEEDS_REDO)
+		XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
 }
 
 /*
@@ -7229,6 +7224,15 @@ static void
 heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
 {
 	xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
+	Buffer		buffer;
+	Page		page;
+	Buffer		vmbuffer = InvalidBuffer;
+	RelFileNode	rnode;
+	BlockNumber	blkno;
+	XLogReplayResult rc;
+
+	rnode = xlrec->node;
+	blkno = xlrec->block;
 
 	/*
 	 * If there are any Hot Standby transactions running that have an xmin
@@ -7240,60 +7244,45 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
 	 * rather than killing the transaction outright.
 	 */
 	if (InHotStandby)
-		ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node);
+		ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
 
 	/*
-	 * If heap block was backed up, restore it. This can only happen with
-	 * checksums enabled.
+	 * Read the heap page, if it still exists. If the heap file has
+	 * dropped or truncated later in recovery, we don't need to update the
+	 * page, but we'd better still update the visibility map.
 	 */
-	if (record->xl_info & XLR_BKP_BLOCK(1))
+	rc = XLogReplayBuffer(1, rnode, blkno, &buffer);
+	if (rc == BLK_NEEDS_REDO)
 	{
-		Assert(DataChecksumsEnabled());
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
+		/*
+		 * We don't bump the LSN of the heap page when setting the
+		 * visibility map bit (unless checksums are enabled, in which case
+		 * we must), because that would generate an unworkable volume of
+		 * full-page writes.  This exposes us to torn page hazards, but
+		 * since we're not inspecting the existing page contents in any
+		 * way, we don't care.
+		 *
+		 * However, all operations that clear the visibility map bit *do*
+		 * bump the LSN, and those operations will only be replayed if the
+		 * XLOG LSN follows the page LSN.  Thus, if the page LSN has
+		 * advanced past our XLOG record's LSN, we mustn't mark the page
+		 * all-visible, because the subsequent update won't be replayed to
+		 * clear the flag.
+		 */
+		page = BufferGetPage(buffer);
+		PageSetAllVisible(page);
+		MarkBufferDirty(buffer);
 	}
-	else
+	else if (rc == BLK_RESTORED)
 	{
-		Buffer		buffer;
-		Page		page;
-
 		/*
-		 * Read the heap page, if it still exists. If the heap file has been
-		 * dropped or truncated later in recovery, we don't need to update the
-		 * page, but we'd better still update the visibility map.
+		 * If heap block was backed up, restore it. This can only happen with
+		 * checksums enabled.
 		 */
-		buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM,
-										xlrec->block, RBM_NORMAL);
-		if (BufferIsValid(buffer))
-		{
-			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
-
-			page = (Page) BufferGetPage(buffer);
-
-			/*
-			 * We don't bump the LSN of the heap page when setting the
-			 * visibility map bit (unless checksums are enabled, in which case
-			 * we must), because that would generate an unworkable volume of
-			 * full-page writes.  This exposes us to torn page hazards, but
-			 * since we're not inspecting the existing page contents in any
-			 * way, we don't care.
-			 *
-			 * However, all operations that clear the visibility map bit *do*
-			 * bump the LSN, and those operations will only be replayed if the
-			 * XLOG LSN follows the page LSN.  Thus, if the page LSN has
-			 * advanced past our XLOG record's LSN, we mustn't mark the page
-			 * all-visible, because the subsequent update won't be replayed to
-			 * clear the flag.
-			 */
-			if (lsn > PageGetLSN(page))
-			{
-				PageSetAllVisible(page);
-				MarkBufferDirty(buffer);
-			}
-
-			/* Done with heap page. */
-			UnlockReleaseBuffer(buffer);
-		}
+		Assert(DataChecksumsEnabled());
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/*
 	 * Even if we skipped the heap page update due to the LSN interlock, it's
@@ -7306,10 +7295,9 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
 	else
 	{
 		Relation	reln;
-		Buffer		vmbuffer = InvalidBuffer;
 
-		reln = CreateFakeRelcacheEntry(xlrec->node);
-		visibilitymap_pin(reln, xlrec->block, &vmbuffer);
+		reln = CreateFakeRelcacheEntry(rnode);
+		visibilitymap_pin(reln, blkno, &vmbuffer);
 
 		/*
 		 * Don't set the bit if replay has already passed this point.
@@ -7323,7 +7311,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
 		 * real harm is done; and the next VACUUM will fix it.
 		 */
 		if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
-			visibilitymap_set(reln, xlrec->block, InvalidBuffer, lsn, vmbuffer,
+			visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
 							  xlrec->cutoff_xid);
 
 		ReleaseBuffer(vmbuffer);
@@ -7351,41 +7339,29 @@ heap_xlog_freeze_page(XLogRecPtr lsn, XLogRecord *record)
 		ResolveRecoveryConflictWithSnapshot(cutoff_xid, xlrec->node);
 
 	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
+	if (XLogReplayBuffer(0, xlrec->node, xlrec->block, &buffer) == BLK_NEEDS_REDO)
 	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
-	if (!BufferIsValid(buffer))
-		return;
-
-	page = (Page) BufferGetPage(buffer);
+		page = BufferGetPage(buffer);
 
-	if (lsn <= PageGetLSN(page))
-	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		/* now execute freeze plan for each frozen tuple */
+		for (ntup = 0; ntup < xlrec->ntuples; ntup++)
+		{
+			xl_heap_freeze_tuple *xlrec_tp;
+			ItemId		lp;
+			HeapTupleHeader tuple;
 
-	/* now execute freeze plan for each frozen tuple */
-	for (ntup = 0; ntup < xlrec->ntuples; ntup++)
-	{
-		xl_heap_freeze_tuple *xlrec_tp;
-		ItemId		lp;
-		HeapTupleHeader tuple;
+			xlrec_tp = &xlrec->tuples[ntup];
+			lp = PageGetItemId(page, xlrec_tp->offset);		/* offsets are one-based */
+			tuple = (HeapTupleHeader) PageGetItem(page, lp);
 
-		xlrec_tp = &xlrec->tuples[ntup];
-		lp = PageGetItemId(page, xlrec_tp->offset);		/* offsets are one-based */
-		tuple = (HeapTupleHeader) PageGetItem(page, lp);
+			heap_execute_freeze_tuple(tuple, xlrec_tp);
+		}
 
-		heap_execute_freeze_tuple(tuple, xlrec_tp);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
-
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 /*
@@ -7425,8 +7401,10 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
 	ItemId		lp = NULL;
 	HeapTupleHeader htup;
 	BlockNumber blkno;
+	RelFileNode	target_node;
 
 	blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
+	target_node = xlrec->target.node;
 
 	/*
 	 * The visibility map may need to be fixed even if the heap page is
@@ -7434,7 +7412,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
 	 */
 	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(xlrec->target.node);
+		Relation	reln = CreateFakeRelcacheEntry(target_node);
 		Buffer		vmbuffer = InvalidBuffer;
 
 		visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -7444,51 +7422,40 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
 	}
 
 	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
+	if (XLogReplayBuffer(0, target_node, blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
+		page = (Page) BufferGetPage(buffer);
 
-	if (lsn <= PageGetLSN(page))	/* changes are applied */
-	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) >= offnum)
+			lp = PageGetItemId(page, offnum);
 
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) >= offnum)
-		lp = PageGetItemId(page, offnum);
+		if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+			elog(PANIC, "heap_delete_redo: invalid lp");
 
-	if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
-		elog(PANIC, "heap_delete_redo: invalid lp");
+		htup = (HeapTupleHeader) PageGetItem(page, lp);
 
-	htup = (HeapTupleHeader) PageGetItem(page, lp);
-
-	htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
-	htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
-	HeapTupleHeaderClearHotUpdated(htup);
-	fix_infomask_from_infobits(xlrec->infobits_set,
-							   &htup->t_infomask, &htup->t_infomask2);
-	HeapTupleHeaderSetXmax(htup, xlrec->xmax);
-	HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+		htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+		htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+		HeapTupleHeaderClearHotUpdated(htup);
+		fix_infomask_from_infobits(xlrec->infobits_set,
+								   &htup->t_infomask, &htup->t_infomask2);
+		HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+		HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
 
-	/* Mark the page as a candidate for pruning */
-	PageSetPrunable(page, record->xl_xid);
+		/* Mark the page as a candidate for pruning */
+		PageSetPrunable(page, record->xl_xid);
 
-	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
-		PageClearAllVisible(page);
+		if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+			PageClearAllVisible(page);
 
-	/* Make sure there is no forward chain link in t_ctid */
-	htup->t_ctid = xlrec->target.tid;
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		/* Make sure there is no forward chain link in t_ctid */
+		htup->t_ctid = xlrec->target.tid;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -7506,9 +7473,12 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
 	HeapTupleHeader htup;
 	xl_heap_header xlhdr;
 	uint32		newlen;
-	Size		freespace;
+	Size		freespace = 0;
+	RelFileNode	target_node;
 	BlockNumber blkno;
+	XLogReplayResult rc;
 
+	target_node = xlrec->target.node;
 	blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
 
 	/*
@@ -7517,7 +7487,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
 	 */
 	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(xlrec->target.node);
+		Relation	reln = CreateFakeRelcacheEntry(target_node);
 		Buffer		vmbuffer = InvalidBuffer;
 
 		visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -7527,81 +7497,70 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
 	}
 
 	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
 	if (record->xl_info & XLOG_HEAP_INIT_PAGE)
 	{
-		buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
-		Assert(BufferIsValid(buffer));
-		page = (Page) BufferGetPage(buffer);
+		rc = XLogReplayBufferExtended(0, target_node, MAIN_FORKNUM, blkno, RBM_ZERO, false, &buffer);
+		Assert(rc == BLK_NEEDS_REDO);
+		page = BufferGetPage(buffer);
 
 		PageInit(page, BufferGetPageSize(buffer), 0);
 	}
 	else
+		rc = XLogReplayBuffer(0, target_node, blkno, &buffer);
+
+	if (rc == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
-		if (!BufferIsValid(buffer))
-			return;
-		page = (Page) BufferGetPage(buffer);
+		page = BufferGetPage(buffer);
 
-		if (lsn <= PageGetLSN(page))	/* changes are applied */
-		{
-			UnlockReleaseBuffer(buffer);
-			return;
-		}
-	}
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+			elog(PANIC, "heap_insert_redo: invalid max offset number");
 
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) + 1 < offnum)
-		elog(PANIC, "heap_insert_redo: invalid max offset number");
-
-	newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
-	Assert(newlen <= MaxHeapTupleSize);
-	memcpy((char *) &xlhdr,
-		   (char *) xlrec + SizeOfHeapInsert,
-		   SizeOfHeapHeader);
-	htup = &tbuf.hdr;
-	MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
-	/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
-	memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
-		   (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
-		   newlen);
-	newlen += offsetof(HeapTupleHeaderData, t_bits);
-	htup->t_infomask2 = xlhdr.t_infomask2;
-	htup->t_infomask = xlhdr.t_infomask;
-	htup->t_hoff = xlhdr.t_hoff;
-	HeapTupleHeaderSetXmin(htup, record->xl_xid);
-	HeapTupleHeaderSetCmin(htup, FirstCommandId);
-	htup->t_ctid = xlrec->target.tid;
+		newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
+		Assert(newlen <= MaxHeapTupleSize);
+		memcpy((char *) &xlhdr,
+			   (char *) xlrec + SizeOfHeapInsert,
+			   SizeOfHeapHeader);
+		htup = &tbuf.hdr;
+		MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+		/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
+		memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
+			   (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
+			   newlen);
+		newlen += offsetof(HeapTupleHeaderData, t_bits);
+		htup->t_infomask2 = xlhdr.t_infomask2;
+		htup->t_infomask = xlhdr.t_infomask;
+		htup->t_hoff = xlhdr.t_hoff;
+		HeapTupleHeaderSetXmin(htup, record->xl_xid);
+		HeapTupleHeaderSetCmin(htup, FirstCommandId);
+		htup->t_ctid = xlrec->target.tid;
 
-	offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
-	if (offnum == InvalidOffsetNumber)
-		elog(PANIC, "heap_insert_redo: failed to add tuple");
+		offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
+		if (offnum == InvalidOffsetNumber)
+			elog(PANIC, "heap_insert_redo: failed to add tuple");
 
-	freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
+		freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
 
-	PageSetLSN(page, lsn);
+		PageSetLSN(page, lsn);
 
-	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
-		PageClearAllVisible(page);
+		if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+			PageClearAllVisible(page);
 
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/*
 	 * If the page is running low on free space, update the FSM as well.
 	 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
 	 * better than that without knowing the fill-factor for the table.
 	 *
-	 * XXX: We don't get here if the page was restored from full page image.
+	 * XXX: Don't do this if the page was restored from full page image.
 	 * We don't bother to update the FSM in that case, it doesn't need to be
 	 * totally accurate anyway.
 	 */
-	if (freespace < BLCKSZ / 5)
+	if (rc == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
 		XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
 }
 
@@ -7613,6 +7572,8 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
 {
 	char	   *recdata = XLogRecGetData(record);
 	xl_heap_multi_insert *xlrec;
+	RelFileNode rnode;
+	BlockNumber blkno;
 	Buffer		buffer;
 	Page		page;
 	struct
@@ -7622,10 +7583,10 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
 	}			tbuf;
 	HeapTupleHeader htup;
 	uint32		newlen;
-	Size		freespace;
-	BlockNumber blkno;
+	Size		freespace = 0;
 	int			i;
 	bool		isinit = (record->xl_info & XLOG_HEAP_INIT_PAGE) != 0;
+	XLogReplayResult rc;
 
 	/*
 	 * Insertion doesn't overwrite MVCC data, so no conflict processing is
@@ -7635,6 +7596,9 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
 	xlrec = (xl_heap_multi_insert *) recdata;
 	recdata += SizeOfHeapMultiInsert;
 
+	rnode = xlrec->node;
+	blkno = xlrec->blkno;
+
 	/*
 	 * If we're reinitializing the page, the tuples are stored in order from
 	 * FirstOffsetNumber. Otherwise there's an array of offsets in the WAL
@@ -7643,15 +7607,13 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
 	if (!isinit)
 		recdata += sizeof(OffsetNumber) * xlrec->ntuples;
 
-	blkno = xlrec->blkno;
-
 	/*
 	 * The visibility map may need to be fixed even if the heap page is
 	 * already up-to-date.
 	 */
 	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(xlrec->node);
+		Relation	reln = CreateFakeRelcacheEntry(rnode);
 		Buffer		vmbuffer = InvalidBuffer;
 
 		visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -7660,94 +7622,82 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
 		FreeFakeRelcacheEntry(reln);
 	}
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
 	if (isinit)
 	{
-		buffer = XLogReadBuffer(xlrec->node, blkno, true);
-		Assert(BufferIsValid(buffer));
-		page = (Page) BufferGetPage(buffer);
+		rc = XLogReplayBufferExtended(0, rnode, MAIN_FORKNUM, blkno,
+									  RBM_ZERO, false, &buffer);
+		Assert(rc == BLK_NEEDS_REDO);
+		page = BufferGetPage(buffer);
 
 		PageInit(page, BufferGetPageSize(buffer), 0);
 	}
 	else
-	{
-		buffer = XLogReadBuffer(xlrec->node, blkno, false);
-		if (!BufferIsValid(buffer))
-			return;
-		page = (Page) BufferGetPage(buffer);
+		rc = XLogReplayBuffer(0, rnode, blkno, &buffer);
 
-		if (lsn <= PageGetLSN(page))	/* changes are applied */
-		{
-			UnlockReleaseBuffer(buffer);
-			return;
-		}
-	}
-
-	for (i = 0; i < xlrec->ntuples; i++)
+	if (rc == BLK_NEEDS_REDO)
 	{
-		OffsetNumber offnum;
-		xl_multi_insert_tuple *xlhdr;
+		page = BufferGetPage(buffer);
+		for (i = 0; i < xlrec->ntuples; i++)
+		{
+			OffsetNumber offnum;
+			xl_multi_insert_tuple *xlhdr;
 
-		if (isinit)
-			offnum = FirstOffsetNumber + i;
-		else
-			offnum = xlrec->offsets[i];
-		if (PageGetMaxOffsetNumber(page) + 1 < offnum)
-			elog(PANIC, "heap_multi_insert_redo: invalid max offset number");
+			if (isinit)
+				offnum = FirstOffsetNumber + i;
+			else
+				offnum = xlrec->offsets[i];
+			if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+				elog(PANIC, "heap_multi_insert_redo: invalid max offset number");
+
+			xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(recdata);
+			recdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
+
+			newlen = xlhdr->datalen;
+			Assert(newlen <= MaxHeapTupleSize);
+			htup = &tbuf.hdr;
+			MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+			/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
+			memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
+				   (char *) recdata,
+				   newlen);
+			recdata += newlen;
+
+			newlen += offsetof(HeapTupleHeaderData, t_bits);
+			htup->t_infomask2 = xlhdr->t_infomask2;
+			htup->t_infomask = xlhdr->t_infomask;
+			htup->t_hoff = xlhdr->t_hoff;
+			HeapTupleHeaderSetXmin(htup, record->xl_xid);
+			HeapTupleHeaderSetCmin(htup, FirstCommandId);
+			ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
+			ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
+
+			offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
+			if (offnum == InvalidOffsetNumber)
+				elog(PANIC, "heap_multi_insert_redo: failed to add tuple");
+		}
 
-		xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(recdata);
-		recdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
+		freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
 
-		newlen = xlhdr->datalen;
-		Assert(newlen <= MaxHeapTupleSize);
-		htup = &tbuf.hdr;
-		MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
-		/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
-		memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
-			   (char *) recdata,
-			   newlen);
-		recdata += newlen;
+		PageSetLSN(page, lsn);
 
-		newlen += offsetof(HeapTupleHeaderData, t_bits);
-		htup->t_infomask2 = xlhdr->t_infomask2;
-		htup->t_infomask = xlhdr->t_infomask;
-		htup->t_hoff = xlhdr->t_hoff;
-		HeapTupleHeaderSetXmin(htup, record->xl_xid);
-		HeapTupleHeaderSetCmin(htup, FirstCommandId);
-		ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
-		ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
+		if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+			PageClearAllVisible(page);
 
-		offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
-		if (offnum == InvalidOffsetNumber)
-			elog(PANIC, "heap_multi_insert_redo: failed to add tuple");
+		MarkBufferDirty(buffer);
 	}
-
-	freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
-
-	PageSetLSN(page, lsn);
-
-	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
-		PageClearAllVisible(page);
-
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/*
 	 * If the page is running low on free space, update the FSM as well.
 	 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
 	 * better than that without knowing the fill-factor for the table.
 	 *
-	 * XXX: We don't get here if the page was restored from full page image.
+	 * XXX: Don't do this if the page was restored from full page image.
 	 * We don't bother to update the FSM in that case, it doesn't need to be
 	 * totally accurate anyway.
 	 */
-	if (freespace < BLCKSZ / 5)
+	if (rc == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
 		XLogRecordPageWithFreeSpace(xlrec->node, blkno, freespace);
 }
 
@@ -7758,8 +7708,9 @@ static void
 heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
 {
 	xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
-	bool		samepage = (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
-							ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+	RelFileNode	rnode;
+	BlockNumber	newblk;
+	BlockNumber	oldblk;
 	Buffer		obuffer,
 				nbuffer;
 	Page		page;
@@ -7778,24 +7729,29 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
 	}			tbuf;
 	xl_heap_header_len xlhdr;
 	uint32		newlen;
-	Size		freespace;
+	Size		freespace = 0;
+	XLogReplayResult old_replay;
+	XLogReplayResult new_replay;
 
 	/* initialize to keep the compiler quiet */
 	oldtup.t_data = NULL;
 	oldtup.t_len = 0;
 
+	rnode = xlrec->target.node;
+	newblk = ItemPointerGetBlockNumber(&xlrec->newtid);
+	oldblk = ItemPointerGetBlockNumber(&xlrec->target.tid);
+
 	/*
 	 * The visibility map may need to be fixed even if the heap page is
 	 * already up-to-date.
 	 */
 	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(xlrec->target.node);
-		BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
+		Relation	reln = CreateFakeRelcacheEntry(rnode);
 		Buffer		vmbuffer = InvalidBuffer;
 
-		visibilitymap_pin(reln, block, &vmbuffer);
-		visibilitymap_clear(reln, block, vmbuffer);
+		visibilitymap_pin(reln, oldblk, &vmbuffer);
+		visibilitymap_clear(reln, oldblk, vmbuffer);
 		ReleaseBuffer(vmbuffer);
 		FreeFakeRelcacheEntry(reln);
 	}
@@ -7810,84 +7766,63 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
 	 * added the new tuple to the new page.
 	 */
 
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		obuffer = RestoreBackupBlock(lsn, record, 0, false, true);
-		if (samepage)
-		{
-			/* backup block covered both changes, so we're done */
-			UnlockReleaseBuffer(obuffer);
-			return;
-		}
-		goto newt;
-	}
-
 	/* Deal with old tuple version */
-
-	obuffer = XLogReadBuffer(xlrec->target.node,
-							 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
-							 false);
-	if (!BufferIsValid(obuffer))
-		goto newt;
-	page = (Page) BufferGetPage(obuffer);
-
-	if (lsn <= PageGetLSN(page))	/* changes are applied */
+	old_replay = XLogReplayBuffer(0, rnode, oldblk, &obuffer);
+	if (old_replay == BLK_NEEDS_REDO)
 	{
-		if (samepage)
-		{
-			UnlockReleaseBuffer(obuffer);
-			return;
-		}
-		goto newt;
-	}
-
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) >= offnum)
-		lp = PageGetItemId(page, offnum);
+		page = (Page) BufferGetPage(obuffer);
 
-	if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
-		elog(PANIC, "heap_update_redo: invalid lp");
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) >= offnum)
+			lp = PageGetItemId(page, offnum);
 
-	htup = (HeapTupleHeader) PageGetItem(page, lp);
+		if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+			elog(PANIC, "heap_update_redo: invalid lp");
 
-	oldtup.t_data = htup;
-	oldtup.t_len = ItemIdGetLength(lp);
+		htup = (HeapTupleHeader) PageGetItem(page, lp);
 
-	htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
-	htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
-	if (hot_update)
-		HeapTupleHeaderSetHotUpdated(htup);
-	else
-		HeapTupleHeaderClearHotUpdated(htup);
-	fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
-							   &htup->t_infomask2);
-	HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
-	HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
-	/* Set forward chain link in t_ctid */
-	htup->t_ctid = xlrec->newtid;
+		oldtup.t_data = htup;
+		oldtup.t_len = ItemIdGetLength(lp);
 
-	/* Mark the page as a candidate for pruning */
-	PageSetPrunable(page, record->xl_xid);
+		htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+		htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+		if (hot_update)
+			HeapTupleHeaderSetHotUpdated(htup);
+		else
+			HeapTupleHeaderClearHotUpdated(htup);
+		fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
+								   &htup->t_infomask2);
+		HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
+		HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+		/* Set forward chain link in t_ctid */
+		htup->t_ctid = xlrec->newtid;
+
+		/* Mark the page as a candidate for pruning */
+		PageSetPrunable(page, record->xl_xid);
+
+		if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+			PageClearAllVisible(page);
 
-	if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
-		PageClearAllVisible(page);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(obuffer);
+	}
 
-	/*
-	 * this test is ugly, but necessary to avoid thinking that insert change
-	 * is already applied
-	 */
-	if (samepage)
+	if (oldblk == newblk)
 	{
 		nbuffer = obuffer;
-		goto newsame;
+		new_replay = old_replay;
 	}
+	else if (record->xl_info & XLOG_HEAP_INIT_PAGE)
+	{
+		new_replay = XLogReplayBufferExtended(1, rnode, MAIN_FORKNUM, newblk,
+											  RBM_ZERO, false, &nbuffer);
+		Assert (new_replay == BLK_NEEDS_REDO);
+		page = (Page) BufferGetPage(nbuffer);
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(obuffer);
-
-	/* Deal with new tuple */
-
-newt:;
+		PageInit(page, BufferGetPageSize(nbuffer), 0);
+	}
+	else
+		new_replay = XLogReplayBuffer(1, rnode, newblk, &nbuffer);
 
 	/*
 	 * The visibility map may need to be fixed even if the heap page is
@@ -7896,144 +7831,107 @@ newt:;
 	if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
 	{
 		Relation	reln = CreateFakeRelcacheEntry(xlrec->target.node);
-		BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
 		Buffer		vmbuffer = InvalidBuffer;
 
-		visibilitymap_pin(reln, block, &vmbuffer);
-		visibilitymap_clear(reln, block, vmbuffer);
+		visibilitymap_pin(reln, newblk, &vmbuffer);
+		visibilitymap_clear(reln, newblk, vmbuffer);
 		ReleaseBuffer(vmbuffer);
 		FreeFakeRelcacheEntry(reln);
 	}
 
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-	{
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
-		if (BufferIsValid(obuffer))
-			UnlockReleaseBuffer(obuffer);
-		return;
-	}
-
-	if (record->xl_info & XLOG_HEAP_INIT_PAGE)
+	/* Deal with new tuple */
+	if (new_replay == BLK_NEEDS_REDO)
 	{
-		nbuffer = XLogReadBuffer(xlrec->target.node,
-								 ItemPointerGetBlockNumber(&(xlrec->newtid)),
-								 true);
-		Assert(BufferIsValid(nbuffer));
 		page = (Page) BufferGetPage(nbuffer);
 
-		PageInit(page, BufferGetPageSize(nbuffer), 0);
-	}
-	else
-	{
-		nbuffer = XLogReadBuffer(xlrec->target.node,
-								 ItemPointerGetBlockNumber(&(xlrec->newtid)),
-								 false);
-		if (!BufferIsValid(nbuffer))
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
+		if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+			elog(PANIC, "heap_update_redo: invalid max offset number");
+
+		recdata = (char *) xlrec + SizeOfHeapUpdate;
+
+		if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
 		{
-			if (BufferIsValid(obuffer))
-				UnlockReleaseBuffer(obuffer);
-			return;
+			Assert(newblk == oldblk);
+			memcpy(&prefixlen, recdata, sizeof(uint16));
+			recdata += sizeof(uint16);
 		}
-		page = (Page) BufferGetPage(nbuffer);
-
-		if (lsn <= PageGetLSN(page))	/* changes are applied */
+		if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
 		{
-			UnlockReleaseBuffer(nbuffer);
-			if (BufferIsValid(obuffer))
-				UnlockReleaseBuffer(obuffer);
-			return;
+			Assert(newblk == oldblk);
+			memcpy(&suffixlen, recdata, sizeof(uint16));
+			recdata += sizeof(uint16);
 		}
-	}
-
-newsame:;
 
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
-	if (PageGetMaxOffsetNumber(page) + 1 < offnum)
-		elog(PANIC, "heap_update_redo: invalid max offset number");
+		memcpy((char *) &xlhdr, recdata, SizeOfHeapHeaderLen);
+		recdata += SizeOfHeapHeaderLen;
 
-	recdata = (char *) xlrec + SizeOfHeapUpdate;
+		Assert(xlhdr.t_len + prefixlen + suffixlen <= MaxHeapTupleSize);
+		htup = &tbuf.hdr;
+		MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
 
-	if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
-	{
-		Assert(samepage);
-		memcpy(&prefixlen, recdata, sizeof(uint16));
-		recdata += sizeof(uint16);
-	}
-	if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
-	{
-		Assert(samepage);
-		memcpy(&suffixlen, recdata, sizeof(uint16));
-		recdata += sizeof(uint16);
-	}
+		/*
+		 * Reconstruct the new tuple using the prefix and/or suffix from the old
+		 * tuple, and the data stored in the WAL record.
+		 */
+		newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
+		if (prefixlen > 0)
+		{
+			int			len;
+
+			/* copy bitmap [+ padding] [+ oid] from WAL record */
+			len = xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
+			memcpy(newp, recdata, len);
+			recdata += len;
+			newp += len;
+
+			/* copy prefix from old tuple */
+			memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
+			newp += prefixlen;
+
+			/* copy new tuple data from WAL record */
+			len = xlhdr.t_len - (xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
+			memcpy(newp, recdata, len);
+			recdata += len;
+			newp += len;
+		}
+		else
+		{
+			/* copy bitmap [+ padding] [+ oid] + data from record, all in one go */
+			memcpy(newp, recdata, xlhdr.t_len);
+			recdata += xlhdr.t_len;
+			newp += xlhdr.t_len;
+		}
+		/* copy suffix from old tuple */
+		if (suffixlen > 0)
+			memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
 
-	memcpy((char *) &xlhdr, recdata, SizeOfHeapHeaderLen);
-	recdata += SizeOfHeapHeaderLen;
+		newlen = offsetof(HeapTupleHeaderData, t_bits) +xlhdr.t_len + prefixlen + suffixlen;
+		htup->t_infomask2 = xlhdr.header.t_infomask2;
+		htup->t_infomask = xlhdr.header.t_infomask;
+		htup->t_hoff = xlhdr.header.t_hoff;
 
-	Assert(xlhdr.t_len + prefixlen + suffixlen <= MaxHeapTupleSize);
-	htup = &tbuf.hdr;
-	MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+		HeapTupleHeaderSetXmin(htup, record->xl_xid);
+		HeapTupleHeaderSetCmin(htup, FirstCommandId);
+		HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
+		/* Make sure there is no forward chain link in t_ctid */
+		htup->t_ctid = xlrec->newtid;
 
-	/*
-	 * Reconstruct the new tuple using the prefix and/or suffix from the old
-	 * tuple, and the data stored in the WAL record.
-	 */
-	newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
-	if (prefixlen > 0)
-	{
-		int			len;
+		offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
+		if (offnum == InvalidOffsetNumber)
+			elog(PANIC, "heap_update_redo: failed to add tuple");
 
-		/* copy bitmap [+ padding] [+ oid] from WAL record */
-		len = xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
-		memcpy(newp, recdata, len);
-		recdata += len;
-		newp += len;
+		if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
+			PageClearAllVisible(page);
 
-		/* copy prefix from old tuple */
-		memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
-		newp += prefixlen;
+		freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
 
-		/* copy new tuple data from WAL record */
-		len = xlhdr.t_len - (xlhdr.header.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
-		memcpy(newp, recdata, len);
-		recdata += len;
-		newp += len;
-	}
-	else
-	{
-		/* copy bitmap [+ padding] [+ oid] + data from record, all in one go */
-		memcpy(newp, recdata, xlhdr.t_len);
-		recdata += xlhdr.t_len;
-		newp += xlhdr.t_len;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(nbuffer);
 	}
-	/* copy suffix from old tuple */
-	if (suffixlen > 0)
-		memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
-
-	newlen = offsetof(HeapTupleHeaderData, t_bits) +xlhdr.t_len + prefixlen + suffixlen;
-	htup->t_infomask2 = xlhdr.header.t_infomask2;
-	htup->t_infomask = xlhdr.header.t_infomask;
-	htup->t_hoff = xlhdr.header.t_hoff;
-
-	HeapTupleHeaderSetXmin(htup, record->xl_xid);
-	HeapTupleHeaderSetCmin(htup, FirstCommandId);
-	HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
-	/* Make sure there is no forward chain link in t_ctid */
-	htup->t_ctid = xlrec->newtid;
-
-	offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
-	if (offnum == InvalidOffsetNumber)
-		elog(PANIC, "heap_update_redo: failed to add tuple");
-
-	if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
-		PageClearAllVisible(page);
-
-	freespace = PageGetHeapFreeSpace(page);		/* needed to update FSM below */
-
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(nbuffer);
-	UnlockReleaseBuffer(nbuffer);
-
-	if (BufferIsValid(obuffer) && obuffer != nbuffer)
+	if (BufferIsValid(nbuffer) && nbuffer != obuffer)
+		UnlockReleaseBuffer(nbuffer);
+	if (BufferIsValid(obuffer))
 		UnlockReleaseBuffer(obuffer);
 
 	/*
@@ -8047,11 +7945,11 @@ newsame:;
 	 * as it did before the update, assuming the new tuple is about the same
 	 * size as the old one.
 	 *
-	 * XXX: We don't get here if the page was restored from full page image.
+	 * XXX: Don't do this if the page was restored from full page image.
 	 * We don't bother to update the FSM in that case, it doesn't need to be
 	 * totally accurate anyway.
 	 */
-	if (!hot_update && freespace < BLCKSZ / 5)
+	if (new_replay == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
 		XLogRecordPageWithFreeSpace(xlrec->target.node,
 								 ItemPointerGetBlockNumber(&(xlrec->newtid)),
 									freespace);
@@ -8067,53 +7965,41 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
 	ItemId		lp = NULL;
 	HeapTupleHeader htup;
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(xlrec->target.node,
-							ItemPointerGetBlockNumber(&(xlrec->target.tid)),
-							false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
-
-	if (lsn <= PageGetLSN(page))	/* changes are applied */
+	if (XLogReplayBuffer(0, xlrec->target.node,
+						 ItemPointerGetBlockNumber(&xlrec->target.tid),
+						 &buffer) == BLK_NEEDS_REDO)
 	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		page = (Page) BufferGetPage(buffer);
 
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) >= offnum)
-		lp = PageGetItemId(page, offnum);
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) >= offnum)
+			lp = PageGetItemId(page, offnum);
 
-	if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
-		elog(PANIC, "heap_lock_redo: invalid lp");
+		if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+			elog(PANIC, "heap_lock_redo: invalid lp");
 
-	htup = (HeapTupleHeader) PageGetItem(page, lp);
+		htup = (HeapTupleHeader) PageGetItem(page, lp);
 
-	fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
-							   &htup->t_infomask2);
+		fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
+								   &htup->t_infomask2);
 
-	/*
-	 * Clear relevant update flags, but only if the modified infomask says
-	 * there's no update.
-	 */
-	if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
-	{
-		HeapTupleHeaderClearHotUpdated(htup);
-		/* Make sure there is no forward chain link in t_ctid */
-		htup->t_ctid = xlrec->target.tid;
+		/*
+		 * Clear relevant update flags, but only if the modified infomask says
+		 * there's no update.
+		 */
+		if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
+		{
+			HeapTupleHeaderClearHotUpdated(htup);
+			/* Make sure there is no forward chain link in t_ctid */
+			htup->t_ctid = xlrec->target.tid;
+		}
+		HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
+		HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
-	HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
-	HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -8127,42 +8013,27 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
 	ItemId		lp = NULL;
 	HeapTupleHeader htup;
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
-	buffer = XLogReadBuffer(xlrec->target.node,
-							ItemPointerGetBlockNumber(&(xlrec->target.tid)),
-							false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
-
-	if (lsn <= PageGetLSN(page))	/* changes are applied */
+	if (XLogReplayBuffer(0, xlrec->target.node, ItemPointerGetBlockNumber(&(xlrec->target.tid)), &buffer) == BLK_NEEDS_REDO)
 	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
-
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) >= offnum)
-		lp = PageGetItemId(page, offnum);
+		page = BufferGetPage(buffer);
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) >= offnum)
+			lp = PageGetItemId(page, offnum);
 
-	if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
-		elog(PANIC, "heap_xlog_lock_updated: invalid lp");
+		if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+			elog(PANIC, "heap_xlog_lock_updated: invalid lp");
 
-	htup = (HeapTupleHeader) PageGetItem(page, lp);
+		htup = (HeapTupleHeader) PageGetItem(page, lp);
 
-	fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
-							   &htup->t_infomask2);
-	HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+		fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
+								   &htup->t_infomask2);
+		HeapTupleHeaderSetXmax(htup, xlrec->xmax);
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -8177,47 +8048,35 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
 	uint32		oldlen;
 	uint32		newlen;
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
+	if (XLogReplayBuffer(0, xlrec->target.node,
+						 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+						 &buffer) == BLK_NEEDS_REDO)
 	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
+		page = BufferGetPage(buffer);
 
-	buffer = XLogReadBuffer(xlrec->target.node,
-							ItemPointerGetBlockNumber(&(xlrec->target.tid)),
-							false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
+		offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+		if (PageGetMaxOffsetNumber(page) >= offnum)
+			lp = PageGetItemId(page, offnum);
 
-	if (lsn <= PageGetLSN(page))	/* changes are applied */
-	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+			elog(PANIC, "heap_inplace_redo: invalid lp");
 
-	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-	if (PageGetMaxOffsetNumber(page) >= offnum)
-		lp = PageGetItemId(page, offnum);
+		htup = (HeapTupleHeader) PageGetItem(page, lp);
 
-	if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
-		elog(PANIC, "heap_inplace_redo: invalid lp");
+		oldlen = ItemIdGetLength(lp) - htup->t_hoff;
+		newlen = record->xl_len - SizeOfHeapInplace;
+		if (oldlen != newlen)
+			elog(PANIC, "heap_inplace_redo: wrong tuple length");
 
-	htup = (HeapTupleHeader) PageGetItem(page, lp);
-
-	oldlen = ItemIdGetLength(lp) - htup->t_hoff;
-	newlen = record->xl_len - SizeOfHeapInplace;
-	if (oldlen != newlen)
-		elog(PANIC, "heap_inplace_redo: wrong tuple length");
-
-	memcpy((char *) htup + htup->t_hoff,
-		   (char *) xlrec + SizeOfHeapInplace,
-		   newlen);
+		memcpy((char *) htup + htup->t_hoff,
+			   (char *) xlrec + SizeOfHeapInplace,
+			   newlen);
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 void
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 5f9fc49..56f795e 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -116,27 +116,24 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
  */
 static void
 _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record,
+						   int block_index,
 						   RelFileNode rnode, BlockNumber cblock)
 {
 	Buffer		buf;
 
-	buf = XLogReadBuffer(rnode, cblock, false);
-	if (BufferIsValid(buf))
+	if (XLogReplayBuffer(block_index, rnode, cblock, &buf) == BLK_NEEDS_REDO)
 	{
 		Page		page = (Page) BufferGetPage(buf);
+		BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
 
-		if (lsn > PageGetLSN(page))
-		{
-			BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+		Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0);
+		pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
 
-			Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0);
-			pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
-
-			PageSetLSN(page, lsn);
-			MarkBufferDirty(buf);
-		}
-		UnlockReleaseBuffer(buf);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buf);
 	}
+	if (BufferIsValid(buf))
+		UnlockReleaseBuffer(buf);
 }
 
 static void
@@ -184,39 +181,28 @@ btree_xlog_insert(bool isleaf, bool ismeta,
 	 */
 	if (!isleaf)
 	{
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
-			_bt_clear_incomplete_split(lsn, record, xlrec->target.node, cblkno);
+		_bt_clear_incomplete_split(lsn, record, 0, xlrec->target.node, cblkno);
 		main_blk_index = 1;
 	}
 	else
 		main_blk_index = 0;
 
-	if (record->xl_info & XLR_BKP_BLOCK(main_blk_index))
-		(void) RestoreBackupBlock(lsn, record, main_blk_index, false, false);
-	else
+	if (XLogReplayBuffer(main_blk_index, xlrec->target.node,
+						 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+						 &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xlrec->target.node,
-							 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
-								false);
-		if (BufferIsValid(buffer))
-		{
-			page = (Page) BufferGetPage(buffer);
+		page = BufferGetPage(buffer);
 
-			if (lsn > PageGetLSN(page))
-			{
-				if (PageAddItem(page, (Item) datapos, datalen,
-							ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
-								false, false) == InvalidOffsetNumber)
-					elog(PANIC, "btree_insert_redo: failed to add item");
+		if (PageAddItem(page, (Item) datapos, datalen,
+						ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+						false, false) == InvalidOffsetNumber)
+			elog(PANIC, "btree_insert_redo: failed to add item");
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/*
 	 * Note: in normal operation, we'd update the metapage while still holding
@@ -299,12 +285,7 @@ btree_xlog_split(bool onleft, bool isroot,
 	 * before locking the other pages)
 	 */
 	if (!isleaf)
-	{
-		if (record->xl_info & XLR_BKP_BLOCK(1))
-			(void) RestoreBackupBlock(lsn, record, 1, false, false);
-		else
-			_bt_clear_incomplete_split(lsn, record, xlrec->node, cblkno);
-	}
+		_bt_clear_incomplete_split(lsn, record, 1, xlrec->node, cblkno);
 
 	/* Reconstruct right (new) sibling page from scratch */
 	rbuf = XLogReadBuffer(xlrec->node, xlrec->rightsib, true);
@@ -340,87 +321,76 @@ btree_xlog_split(bool onleft, bool isroot,
 	/* don't release the buffer yet; we touch right page's first item below */
 
 	/* Now reconstruct left (original) sibling page */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		lbuf = RestoreBackupBlock(lsn, record, 0, false, true);
-	else
+	if (XLogReplayBuffer(0, xlrec->node, xlrec->leftsib, &lbuf) == BLK_NEEDS_REDO)
 	{
-		lbuf = XLogReadBuffer(xlrec->node, xlrec->leftsib, false);
-
-		if (BufferIsValid(lbuf))
+		/*
+		 * To retain the same physical order of the tuples that they had,
+		 * we initialize a temporary empty page for the left page and add
+		 * all the items to that in item number order.  This mirrors how
+		 * _bt_split() works.  It's not strictly required to retain the
+		 * same physical order, as long as the items are in the correct
+		 * item number order, but it helps debugging.  See also
+		 * _bt_restore_page(), which does the same for the right page.
+		 */
+		Page		lpage = (Page) BufferGetPage(lbuf);
+		BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+		OffsetNumber off;
+		Page		newlpage;
+		OffsetNumber leftoff;
+
+		newlpage = PageGetTempPageCopySpecial(lpage);
+
+		/* Set high key */
+		leftoff = P_HIKEY;
+		if (PageAddItem(newlpage, left_hikey, left_hikeysz,
+						P_HIKEY, false, false) == InvalidOffsetNumber)
+			elog(PANIC, "failed to add high key to left page after split");
+		leftoff = OffsetNumberNext(leftoff);
+
+		for (off = P_FIRSTDATAKEY(lopaque); off < xlrec->firstright; off++)
 		{
-			/*
-			 * To retain the same physical order of the tuples that they had,
-			 * we initialize a temporary empty page for the left page and add
-			 * all the items to that in item number order.  This mirrors how
-			 * _bt_split() works.  It's not strictly required to retain the
-			 * same physical order, as long as the items are in the correct
-			 * item number order, but it helps debugging.  See also
-			 * _bt_restore_page(), which does the same for the right page.
-			 */
-			Page		lpage = (Page) BufferGetPage(lbuf);
-			BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+			ItemId		itemid;
+			Size		itemsz;
+			Item		item;
 
-			if (lsn > PageGetLSN(lpage))
+			/* add the new item if it was inserted on left page */
+			if (onleft && off == newitemoff)
 			{
-				OffsetNumber off;
-				Page		newlpage;
-				OffsetNumber leftoff;
-
-				newlpage = PageGetTempPageCopySpecial(lpage);
-
-				/* Set high key */
-				leftoff = P_HIKEY;
-				if (PageAddItem(newlpage, left_hikey, left_hikeysz,
-								P_HIKEY, false, false) == InvalidOffsetNumber)
-					elog(PANIC, "failed to add high key to left page after split");
+				if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
+								false, false) == InvalidOffsetNumber)
+					elog(ERROR, "failed to add new item to left page after split");
 				leftoff = OffsetNumberNext(leftoff);
-
-				for (off = P_FIRSTDATAKEY(lopaque); off < xlrec->firstright; off++)
-				{
-					ItemId		itemid;
-					Size		itemsz;
-					Item		item;
-
-					/* add the new item if it was inserted on left page */
-					if (onleft && off == newitemoff)
-					{
-						if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
-										false, false) == InvalidOffsetNumber)
-							elog(ERROR, "failed to add new item to left page after split");
-						leftoff = OffsetNumberNext(leftoff);
-					}
-
-					itemid = PageGetItemId(lpage, off);
-					itemsz = ItemIdGetLength(itemid);
-					item = PageGetItem(lpage, itemid);
-					if (PageAddItem(newlpage, item, itemsz, leftoff,
-									false, false) == InvalidOffsetNumber)
-						elog(ERROR, "failed to add old item to left page after split");
-					leftoff = OffsetNumberNext(leftoff);
-				}
-
-				/* cope with possibility that newitem goes at the end */
-				if (onleft && off == newitemoff)
-				{
-					if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
-									false, false) == InvalidOffsetNumber)
-						elog(ERROR, "failed to add new item to left page after split");
-					leftoff = OffsetNumberNext(leftoff);
-				}
-
-				PageRestoreTempPage(newlpage, lpage);
-
-				/* Fix opaque fields */
-				lopaque->btpo_flags = BTP_INCOMPLETE_SPLIT;
-				if (isleaf)
-					lopaque->btpo_flags |= BTP_LEAF;
-				lopaque->btpo_next = xlrec->rightsib;
-				lopaque->btpo_cycleid = 0;
-
-				PageSetLSN(lpage, lsn);
-				MarkBufferDirty(lbuf);
 			}
+
+			itemid = PageGetItemId(lpage, off);
+			itemsz = ItemIdGetLength(itemid);
+			item = PageGetItem(lpage, itemid);
+			if (PageAddItem(newlpage, item, itemsz, leftoff,
+							false, false) == InvalidOffsetNumber)
+				elog(ERROR, "failed to add old item to left page after split");
+			leftoff = OffsetNumberNext(leftoff);
 		}
+
+		/* cope with possibility that newitem goes at the end */
+		if (onleft && off == newitemoff)
+		{
+			if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
+							false, false) == InvalidOffsetNumber)
+				elog(ERROR, "failed to add new item to left page after split");
+			leftoff = OffsetNumberNext(leftoff);
+		}
+
+		PageRestoreTempPage(newlpage, lpage);
+
+		/* Fix opaque fields */
+		lopaque->btpo_flags = BTP_INCOMPLETE_SPLIT;
+		if (isleaf)
+			lopaque->btpo_flags |= BTP_LEAF;
+		lopaque->btpo_next = xlrec->rightsib;
+		lopaque->btpo_cycleid = 0;
+
+		PageSetLSN(lpage, lsn);
+		MarkBufferDirty(lbuf);
 	}
 
 	/* We no longer need the buffers */
@@ -443,31 +413,20 @@ btree_xlog_split(bool onleft, bool isroot,
 		 * whether this was a leaf or internal page.
 		 */
 		int			rnext_index = isleaf ? 1 : 2;
+		Buffer		buffer;
 
-		if (record->xl_info & XLR_BKP_BLOCK(rnext_index))
-			(void) RestoreBackupBlock(lsn, record, rnext_index, false, false);
-		else
+		if (XLogReplayBuffer(rnext_index, xlrec->node, xlrec->rnext, &buffer) == BLK_NEEDS_REDO)
 		{
-			Buffer		buffer;
-
-			buffer = XLogReadBuffer(xlrec->node, xlrec->rnext, false);
-
-			if (BufferIsValid(buffer))
-			{
-				Page		page = (Page) BufferGetPage(buffer);
-
-				if (lsn > PageGetLSN(page))
-				{
-					BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+			Page		page = (Page) BufferGetPage(buffer);
+			BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
 
-					pageop->btpo_prev = xlrec->rightsib;
+			pageop->btpo_prev = xlrec->rightsib;
 
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 }
 
@@ -530,53 +489,38 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
 	}
 
 	/*
-	 * If we have a full-page image, restore it (using a cleanup lock) and
-	 * we're done.
-	 */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, true, false);
-		return;
-	}
-
-	/*
 	 * Like in btvacuumpage(), we need to take a cleanup lock on every leaf
 	 * page. See nbtree/README for details.
 	 */
-	buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
-	if (!BufferIsValid(buffer))
-		return;
-	LockBufferForCleanup(buffer);
-	page = (Page) BufferGetPage(buffer);
-
-	if (lsn <= PageGetLSN(page))
+	if (XLogReplayBufferExtended(0, xlrec->node, MAIN_FORKNUM, xlrec->block,
+								 RBM_NORMAL, true, &buffer) == BLK_NEEDS_REDO)
 	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		page = (Page) BufferGetPage(buffer);
 
-	if (record->xl_len > SizeOfBtreeVacuum)
-	{
-		OffsetNumber *unused;
-		OffsetNumber *unend;
+		if (record->xl_len > SizeOfBtreeVacuum)
+		{
+			OffsetNumber *unused;
+			OffsetNumber *unend;
 
-		unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeVacuum);
-		unend = (OffsetNumber *) ((char *) xlrec + record->xl_len);
+			unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeVacuum);
+			unend = (OffsetNumber *) ((char *) xlrec + record->xl_len);
 
-		if ((unend - unused) > 0)
-			PageIndexMultiDelete(page, unused, unend - unused);
-	}
+			if ((unend - unused) > 0)
+				PageIndexMultiDelete(page, unused, unend - unused);
+		}
 
-	/*
-	 * Mark the page as not containing any LP_DEAD items --- see comments in
-	 * _bt_delitems_vacuum().
-	 */
-	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
-	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
+		/*
+		 * Mark the page as not containing any LP_DEAD items --- see comments in
+		 * _bt_delitems_vacuum().
+		 */
+		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+		opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 /*
@@ -752,47 +696,35 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
 		ResolveRecoveryConflictWithSnapshot(latestRemovedXid, xlrec->node);
 	}
 
-	/* If we have a full-page image, restore it and we're done */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		return;
-	}
-
 	/*
 	 * We don't need to take a cleanup lock to apply these changes. See
 	 * nbtree/README for details.
 	 */
-	buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
-	if (!BufferIsValid(buffer))
-		return;
-	page = (Page) BufferGetPage(buffer);
-
-	if (lsn <= PageGetLSN(page))
+	if (XLogReplayBuffer(0, xlrec->node, xlrec->block, &buffer) == BLK_NEEDS_REDO)
 	{
-		UnlockReleaseBuffer(buffer);
-		return;
-	}
+		page = (Page) BufferGetPage(buffer);
 
-	if (record->xl_len > SizeOfBtreeDelete)
-	{
-		OffsetNumber *unused;
+		if (record->xl_len > SizeOfBtreeDelete)
+		{
+			OffsetNumber *unused;
 
-		unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeDelete);
+			unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeDelete);
 
-		PageIndexMultiDelete(page, unused, xlrec->nitems);
-	}
+			PageIndexMultiDelete(page, unused, xlrec->nitems);
+		}
 
-	/*
-	 * Mark the page as not containing any LP_DEAD items --- see comments in
-	 * _bt_delitems_delete().
-	 */
-	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
-	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
+		/*
+		 * Mark the page as not containing any LP_DEAD items --- see comments
+		 * in _bt_delitems_delete().
+		 */
+		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+		opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
 
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
+	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -816,42 +748,35 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record)
 	 */
 
 	/* parent page */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xlrec->target.node, parent, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xlrec->target.node, parent, false);
-		if (BufferIsValid(buffer))
-		{
-			page = (Page) BufferGetPage(buffer);
-			pageop = (BTPageOpaque) PageGetSpecialPointer(page);
-			if (lsn > PageGetLSN(page))
-			{
-				OffsetNumber poffset;
-				ItemId		itemid;
-				IndexTuple	itup;
-				OffsetNumber nextoffset;
-				BlockNumber rightsib;
-
-				poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
-
-				nextoffset = OffsetNumberNext(poffset);
-				itemid = PageGetItemId(page, nextoffset);
-				itup = (IndexTuple) PageGetItem(page, itemid);
-				rightsib = ItemPointerGetBlockNumber(&itup->t_tid);
-
-				itemid = PageGetItemId(page, poffset);
-				itup = (IndexTuple) PageGetItem(page, itemid);
-				ItemPointerSet(&(itup->t_tid), rightsib, P_HIKEY);
-				nextoffset = OffsetNumberNext(poffset);
-				PageIndexTupleDelete(page, nextoffset);
-
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		OffsetNumber poffset;
+		ItemId		itemid;
+		IndexTuple	itup;
+		OffsetNumber nextoffset;
+		BlockNumber rightsib;
+
+		page = (Page) BufferGetPage(buffer);
+		pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+
+		poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+
+		nextoffset = OffsetNumberNext(poffset);
+		itemid = PageGetItemId(page, nextoffset);
+		itup = (IndexTuple) PageGetItem(page, itemid);
+		rightsib = ItemPointerGetBlockNumber(&itup->t_tid);
+
+		itemid = PageGetItemId(page, poffset);
+		itup = (IndexTuple) PageGetItem(page, itemid);
+		ItemPointerSet(&(itup->t_tid), rightsib, P_HIKEY);
+		nextoffset = OffsetNumberNext(poffset);
+		PageIndexTupleDelete(page, nextoffset);
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/* Rewrite the leaf page as a halfdead page */
 	buffer = XLogReadBuffer(xlrec->target.node, xlrec->leafblk, true);
@@ -911,56 +836,32 @@ btree_xlog_unlink_page(uint8 info, XLogRecPtr lsn, XLogRecord *record)
 	 */
 
 	/* Fix left-link of right sibling */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xlrec->node, rightsib, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xlrec->node, rightsib, false);
-		if (BufferIsValid(buffer))
-		{
-			page = (Page) BufferGetPage(buffer);
-			if (lsn <= PageGetLSN(page))
-			{
-				UnlockReleaseBuffer(buffer);
-			}
-			else
-			{
-				pageop = (BTPageOpaque) PageGetSpecialPointer(page);
-				pageop->btpo_prev = leftsib;
+		page = (Page) BufferGetPage(buffer);
+		pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+		pageop->btpo_prev = leftsib;
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-				UnlockReleaseBuffer(buffer);
-			}
-		}
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/* Fix right-link of left sibling, if any */
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
-	else
+	if (leftsib != P_NONE)
 	{
-		if (leftsib != P_NONE)
+		if (XLogReplayBuffer(1, xlrec->node, leftsib, &buffer) == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(xlrec->node, leftsib, false);
-			if (BufferIsValid(buffer))
-			{
-				page = (Page) BufferGetPage(buffer);
-				if (lsn <= PageGetLSN(page))
-				{
-					UnlockReleaseBuffer(buffer);
-				}
-				else
-				{
-					pageop = (BTPageOpaque) PageGetSpecialPointer(page);
-					pageop->btpo_next = rightsib;
-
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-					UnlockReleaseBuffer(buffer);
-				}
-			}
+			page = (Page) BufferGetPage(buffer);
+			pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+			pageop->btpo_next = rightsib;
+
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 
 	/* Rewrite target page as empty deleted page */
@@ -1071,10 +972,7 @@ btree_xlog_newroot(XLogRecPtr lsn, XLogRecord *record)
 		Assert(ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
 
 		/* Clear the incomplete-split flag in left child */
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
-			_bt_clear_incomplete_split(lsn, record, xlrec->node, cblkno);
+		_bt_clear_incomplete_split(lsn, record, 0, xlrec->node, cblkno);
 	}
 
 	PageSetLSN(page, lsn);
diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c
index e4f7fbb..4a47933 100644
--- a/src/backend/access/spgist/spgxlog.c
+++ b/src/backend/access/spgist/spgxlog.c
@@ -113,6 +113,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
 	SpGistLeafTupleData leafTupleHdr;
 	Buffer		buffer;
 	Page		page;
+	XLogReplayResult rc;
 
 	ptr += sizeof(spgxlogAddLeaf);
 	leafTuple = ptr;
@@ -124,82 +125,74 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
 	 * simultaneously; but in WAL replay it should be safe to update the leaf
 	 * page before updating the parent.
 	 */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
+	if (xldata->newPage)
+	{
+		buffer = XLogReadBuffer(xldata->node, xldata->blknoLeaf, true);
+		SpGistInitBuffer(buffer,
+					 SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+		rc = BLK_NEEDS_REDO;
+	}
 	else
+		rc = XLogReplayBuffer(0, xldata->node, xldata->blknoLeaf, &buffer);
+
+	if (rc == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoLeaf,
-								xldata->newPage);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
+		page = BufferGetPage(buffer);
 
-			if (xldata->newPage)
-				SpGistInitBuffer(buffer,
-					 SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+		/* insert new tuple */
+		if (xldata->offnumLeaf != xldata->offnumHeadLeaf)
+		{
+			/* normal cases, tuple was added by SpGistPageAddNewItem */
+			addOrReplaceTuple(page, (Item) leafTuple, leafTupleHdr.size,
+							  xldata->offnumLeaf);
 
-			if (lsn > PageGetLSN(page))
+			/* update head tuple's chain link if needed */
+			if (xldata->offnumHeadLeaf != InvalidOffsetNumber)
 			{
-				/* insert new tuple */
-				if (xldata->offnumLeaf != xldata->offnumHeadLeaf)
-				{
-					/* normal cases, tuple was added by SpGistPageAddNewItem */
-					addOrReplaceTuple(page, (Item) leafTuple, leafTupleHdr.size,
-									  xldata->offnumLeaf);
-
-					/* update head tuple's chain link if needed */
-					if (xldata->offnumHeadLeaf != InvalidOffsetNumber)
-					{
-						SpGistLeafTuple head;
-
-						head = (SpGistLeafTuple) PageGetItem(page,
-								PageGetItemId(page, xldata->offnumHeadLeaf));
-						Assert(head->nextOffset == leafTupleHdr.nextOffset);
-						head->nextOffset = xldata->offnumLeaf;
-					}
-				}
-				else
-				{
-					/* replacing a DEAD tuple */
-					PageIndexTupleDelete(page, xldata->offnumLeaf);
-					if (PageAddItem(page,
-									(Item) leafTuple, leafTupleHdr.size,
-					 xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
-						elog(ERROR, "failed to add item of size %u to SPGiST index page",
-							 leafTupleHdr.size);
-				}
+				SpGistLeafTuple head;
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
+				head = (SpGistLeafTuple) PageGetItem(page,
+								PageGetItemId(page, xldata->offnumHeadLeaf));
+				Assert(head->nextOffset == leafTupleHdr.nextOffset);
+				head->nextOffset = xldata->offnumLeaf;
 			}
-			UnlockReleaseBuffer(buffer);
 		}
+		else
+		{
+			/* replacing a DEAD tuple */
+			PageIndexTupleDelete(page, xldata->offnumLeaf);
+			if (PageAddItem(page, (Item) leafTuple, leafTupleHdr.size,
+					 xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
+				elog(ERROR, "failed to add item of size %u to SPGiST index page",
+					 leafTupleHdr.size);
+		}
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/* update parent downlink if necessary */
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
-	else if (xldata->blknoParent != InvalidBlockNumber)
+	if (xldata->blknoParent != InvalidBlockNumber)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
-		if (BufferIsValid(buffer))
+		if (XLogReplayBuffer(1, xldata->node, xldata->blknoParent, &buffer) == BLK_NEEDS_REDO)
 		{
+			SpGistInnerTuple tuple;
+
 			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				SpGistInnerTuple tuple;
 
-				tuple = (SpGistInnerTuple) PageGetItem(page,
+			tuple = (SpGistInnerTuple) PageGetItem(page,
 								  PageGetItemId(page, xldata->offnumParent));
 
-				spgUpdateNodeLink(tuple, xldata->nodeI,
-								  xldata->blknoLeaf, xldata->offnumLeaf);
+			spgUpdateNodeLink(tuple, xldata->nodeI,
+							  xldata->blknoLeaf, xldata->offnumLeaf);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 }
 
@@ -214,6 +207,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
 	int			nInsert;
 	Buffer		buffer;
 	Page		page;
+	XLogReplayResult rc;
 
 	fillFakeState(&state, xldata->stateSrc);
 
@@ -234,98 +228,78 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
 	 */
 
 	/* Insert tuples on the dest page (do first, so redirect is valid) */
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
-	else
+	if (xldata->newPage)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoDst,
-								xldata->newPage);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
-
-			if (xldata->newPage)
-				SpGistInitBuffer(buffer,
+		buffer = XLogReadBuffer(xldata->node, xldata->blknoDst, true);
+		SpGistInitBuffer(buffer,
 					 SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+		rc = BLK_NEEDS_REDO;
+	}
+	else
+		rc = XLogReplayBuffer(1, xldata->node, xldata->blknoDst, &buffer);
+	if (rc == BLK_NEEDS_REDO)
+	{
+		int			i;
 
-			if (lsn > PageGetLSN(page))
-			{
-				int			i;
-
-				for (i = 0; i < nInsert; i++)
-				{
-					char	   *leafTuple;
-					SpGistLeafTupleData leafTupleHdr;
-
-					/*
-					 * the tuples are not aligned, so must copy to access
-					 * the size field.
-					 */
-					leafTuple = ptr;
-					memcpy(&leafTupleHdr, leafTuple,
-						   sizeof(SpGistLeafTupleData));
-
-					addOrReplaceTuple(page, (Item) leafTuple,
-									  leafTupleHdr.size, toInsert[i]);
-					ptr += leafTupleHdr.size;
-				}
+		page = BufferGetPage(buffer);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
+		for (i = 0; i < nInsert; i++)
+		{
+			char	   *leafTuple;
+			SpGistLeafTupleData leafTupleHdr;
+
+			/*
+			 * the tuples are not aligned, so must copy to access
+			 * the size field.
+			 */
+			leafTuple = ptr;
+			memcpy(&leafTupleHdr, leafTuple, sizeof(SpGistLeafTupleData));
+
+			addOrReplaceTuple(page, (Item) leafTuple,
+							  leafTupleHdr.size, toInsert[i]);
+			ptr += leafTupleHdr.size;
 		}
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/* Delete tuples from the source page, inserting a redirection pointer */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xldata->node, xldata->blknoSrc, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoSrc, false);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
+		page = BufferGetPage(buffer);
+		spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
 						state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
-										SPGIST_PLACEHOLDER,
-										xldata->blknoDst,
-										toInsert[nInsert - 1]);
+								SPGIST_PLACEHOLDER,
+								xldata->blknoDst,
+								toInsert[nInsert - 1]);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
 	/* And update the parent downlink */
-	if (record->xl_info & XLR_BKP_BLOCK(2))
-		(void) RestoreBackupBlock(lsn, record, 2, false, false);
-	else
+	if (XLogReplayBuffer(2, xldata->node, xldata->blknoParent, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				SpGistInnerTuple tuple;
+		SpGistInnerTuple tuple;
+
+		page = BufferGetPage(buffer);
 
-				tuple = (SpGistInnerTuple) PageGetItem(page,
+		tuple = (SpGistInnerTuple) PageGetItem(page,
 								  PageGetItemId(page, xldata->offnumParent));
 
-				spgUpdateNodeLink(tuple, xldata->nodeI,
-								  xldata->blknoDst, toInsert[nInsert - 1]);
+		spgUpdateNodeLink(tuple, xldata->nodeI,
+						  xldata->blknoDst, toInsert[nInsert - 1]);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -339,6 +313,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
 	Buffer		buffer;
 	Page		page;
 	int			bbi;
+	XLogReplayResult rc;
 
 	ptr += sizeof(spgxlogAddNode);
 	innerTuple = ptr;
@@ -351,29 +326,20 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
 	{
 		/* update in place */
 		Assert(xldata->blknoParent == InvalidBlockNumber);
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
+		if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
-			if (BufferIsValid(buffer))
-			{
-				page = BufferGetPage(buffer);
-				if (lsn > PageGetLSN(page))
-				{
-					PageIndexTupleDelete(page, xldata->offnum);
-					if (PageAddItem(page, (Item) innerTuple, innerTupleHdr.size,
-									xldata->offnum,
-									false, false) != xldata->offnum)
-						elog(ERROR, "failed to add item of size %u to SPGiST index page",
-							 innerTupleHdr.size);
-
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			page = BufferGetPage(buffer);
+			PageIndexTupleDelete(page, xldata->offnum);
+			if (PageAddItem(page, (Item) innerTuple, innerTupleHdr.size,
+							xldata->offnum, false, false) != xldata->offnum)
+				elog(ERROR, "failed to add item of size %u to SPGiST index page",
+					 innerTupleHdr.size);
+
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 	else
 	{
@@ -390,90 +356,77 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
 		Assert(xldata->blkno != xldata->blknoNew);
 
 		/* Install new tuple first so redirect is valid */
-		if (record->xl_info & XLR_BKP_BLOCK(1))
-			(void) RestoreBackupBlock(lsn, record, 1, false, false);
+		if (xldata->newPage)
+		{
+			buffer = XLogReadBuffer(xldata->node, xldata->blknoNew, true);
+			/* AddNode is not used for nulls pages */
+			SpGistInitBuffer(buffer, 0);
+			rc = BLK_NEEDS_REDO;
+		}
 		else
+			rc = XLogReplayBuffer(1, xldata->node, xldata->blknoNew, &buffer);
+		if (rc == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(xldata->node, xldata->blknoNew,
-									xldata->newPage);
-			if (BufferIsValid(buffer))
+			page = BufferGetPage(buffer);
+
+			addOrReplaceTuple(page, (Item) innerTuple,
+							  innerTupleHdr.size, xldata->offnumNew);
+
+			/*
+			 * If parent is in this same page, don't advance LSN;
+			 * doing so would fool us into not applying the parent
+			 * downlink update below.  We'll update the LSN when we
+			 * fix the parent downlink.
+			 */
+			if (xldata->blknoParent != xldata->blknoNew)
 			{
-				page = BufferGetPage(buffer);
-
-				/* AddNode is not used for nulls pages */
-				if (xldata->newPage)
-					SpGistInitBuffer(buffer, 0);
-
-				if (lsn > PageGetLSN(page))
-				{
-					addOrReplaceTuple(page, (Item) innerTuple,
-									  innerTupleHdr.size, xldata->offnumNew);
-
-					/*
-					 * If parent is in this same page, don't advance LSN;
-					 * doing so would fool us into not applying the parent
-					 * downlink update below.  We'll update the LSN when we
-					 * fix the parent downlink.
-					 */
-					if (xldata->blknoParent != xldata->blknoNew)
-					{
-						PageSetLSN(page, lsn);
-					}
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
+				PageSetLSN(page, lsn);
 			}
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 
 		/* Delete old tuple, replacing it with redirect or placeholder tuple */
-		if (record->xl_info & XLR_BKP_BLOCK(0))
-			(void) RestoreBackupBlock(lsn, record, 0, false, false);
-		else
+		if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 		{
-			buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
-			if (BufferIsValid(buffer))
+			SpGistDeadTuple dt;
+
+			page = BufferGetPage(buffer);
+
+			if (state.isBuild)
+				dt = spgFormDeadTuple(&state, SPGIST_PLACEHOLDER,
+									  InvalidBlockNumber,
+									  InvalidOffsetNumber);
+			else
+				dt = spgFormDeadTuple(&state, SPGIST_REDIRECT,
+									  xldata->blknoNew,
+									  xldata->offnumNew);
+
+			PageIndexTupleDelete(page, xldata->offnum);
+			if (PageAddItem(page, (Item) dt, dt->size, xldata->offnum,
+							false, false) != xldata->offnum)
+				elog(ERROR, "failed to add item of size %u to SPGiST index page",
+					 dt->size);
+
+			if (state.isBuild)
+				SpGistPageGetOpaque(page)->nPlaceholder++;
+			else
+				SpGistPageGetOpaque(page)->nRedirection++;
+
+			/*
+			 * If parent is in this same page, don't advance LSN; doing do
+			 * would fool us into not applying the parent downlink update
+			 * below.  We'll update the LSN when we fix the parent downlink.
+			 */
+			if (xldata->blknoParent != xldata->blkno)
 			{
-				page = BufferGetPage(buffer);
-				if (lsn > PageGetLSN(page))
-				{
-					SpGistDeadTuple dt;
-
-					if (state.isBuild)
-						dt = spgFormDeadTuple(&state, SPGIST_PLACEHOLDER,
-											  InvalidBlockNumber,
-											  InvalidOffsetNumber);
-					else
-						dt = spgFormDeadTuple(&state, SPGIST_REDIRECT,
-											  xldata->blknoNew,
-											  xldata->offnumNew);
-
-					PageIndexTupleDelete(page, xldata->offnum);
-					if (PageAddItem(page, (Item) dt, dt->size,
-									xldata->offnum,
-									false, false) != xldata->offnum)
-						elog(ERROR, "failed to add item of size %u to SPGiST index page",
-							 dt->size);
-
-					if (state.isBuild)
-						SpGistPageGetOpaque(page)->nPlaceholder++;
-					else
-						SpGistPageGetOpaque(page)->nRedirection++;
-
-					/*
-					 * If parent is in this same page, don't advance LSN;
-					 * doing so would fool us into not applying the parent
-					 * downlink update below.  We'll update the LSN when we
-					 * fix the parent downlink.
-					 */
-					if (xldata->blknoParent != xldata->blkno)
-					{
-						PageSetLSN(page, lsn);
-					}
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
+				PageSetLSN(page, lsn);
 			}
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 
 		/*
 		 * Update parent downlink.  Since parent could be in either of the
@@ -491,29 +444,31 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
 		{
 			if (bbi == 2)		/* else we already did it */
 				(void) RestoreBackupBlock(lsn, record, bbi, false, false);
+			rc = BLK_RESTORED;
+			buffer = InvalidBuffer;
 		}
 		else
 		{
-			buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
-			if (BufferIsValid(buffer))
-			{
-				page = BufferGetPage(buffer);
-				if (lsn > PageGetLSN(page))
-				{
-					SpGistInnerTuple innerTuple;
+			rc = XLogReplayBuffer(bbi, xldata->node, xldata->blknoParent, &buffer);
+			Assert(rc != BLK_RESTORED);
+		}
+		if (rc == BLK_NEEDS_REDO)
+		{
+			SpGistInnerTuple innerTuple;
+
+			page = BufferGetPage(buffer);
 
-					innerTuple = (SpGistInnerTuple) PageGetItem(page,
+			innerTuple = (SpGistInnerTuple) PageGetItem(page,
 								  PageGetItemId(page, xldata->offnumParent));
 
-					spgUpdateNodeLink(innerTuple, xldata->nodeI,
-									  xldata->blknoNew, xldata->offnumNew);
+			spgUpdateNodeLink(innerTuple, xldata->nodeI,
+							  xldata->blknoNew, xldata->offnumNew);
 
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 }
 
@@ -545,60 +500,53 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
 	 */
 
 	/* insert postfix tuple first to avoid dangling link */
-	if (record->xl_info & XLR_BKP_BLOCK(1))
-		(void) RestoreBackupBlock(lsn, record, 1, false, false);
-	else if (xldata->blknoPostfix != xldata->blknoPrefix)
+	if (xldata->blknoPostfix != xldata->blknoPrefix)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix,
-								xldata->newPage);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
+		XLogReplayResult rc;
 
+		if (xldata->newPage)
+		{
+			buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix, true);
 			/* SplitTuple is not used for nulls pages */
-			if (xldata->newPage)
-				SpGistInitBuffer(buffer, 0);
+			SpGistInitBuffer(buffer, 0);
+			rc = BLK_NEEDS_REDO;
+		}
+		else
+			rc = XLogReplayBuffer(1, xldata->node, xldata->blknoPostfix, &buffer);
 
-			if (lsn > PageGetLSN(page))
-			{
-				addOrReplaceTuple(page, (Item) postfixTuple,
-								  postfixTupleHdr.size, xldata->offnumPostfix);
+		if (rc == BLK_NEEDS_REDO)
+		{
+			page = BufferGetPage(buffer);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
+			addOrReplaceTuple(page, (Item) postfixTuple,
+							  postfixTupleHdr.size, xldata->offnumPostfix);
+
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(buffer);
 		}
+		if (BufferIsValid(buffer))
+			UnlockReleaseBuffer(buffer);
 	}
 
 	/* now handle the original page */
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xldata->node, xldata->blknoPrefix, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blknoPrefix, false);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				PageIndexTupleDelete(page, xldata->offnumPrefix);
-				if (PageAddItem(page, (Item) prefixTuple, prefixTupleHdr.size,
+		page = BufferGetPage(buffer);
+		PageIndexTupleDelete(page, xldata->offnumPrefix);
+		if (PageAddItem(page, (Item) prefixTuple, prefixTupleHdr.size,
 				 xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
-					elog(ERROR, "failed to add item of size %u to SPGiST index page",
-						 prefixTupleHdr.size);
+			elog(ERROR, "failed to add item of size %u to SPGiST index page",
+				 prefixTupleHdr.size);
 
-				if (xldata->blknoPostfix == xldata->blknoPrefix)
-					addOrReplaceTuple(page, (Item) postfixTuple,
-									  postfixTupleHdr.size,
-									  xldata->offnumPostfix);
+		if (xldata->blknoPostfix == xldata->blknoPrefix)
+			addOrReplaceTuple(page, (Item) postfixTuple, postfixTupleHdr.size,
+							  xldata->offnumPostfix);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -616,9 +564,11 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
 	Buffer		destBuffer;
 	Page		srcPage;
 	Page		destPage;
+	Buffer		innerBuffer;
 	Page		page;
 	int			bbi;
 	int			i;
+	XLogReplayResult rc;
 
 	fillFakeState(&state, xldata->stateSrc);
 
@@ -668,46 +618,35 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
 		 * inserting leaf tuples and the new inner tuple, else the added
 		 * redirect tuple will be a dangling link.)
 		 */
-		if (record->xl_info & XLR_BKP_BLOCK(bbi))
+		if (XLogReplayBuffer(bbi, xldata->node, xldata->blknoSrc, &srcBuffer) == BLK_NEEDS_REDO)
 		{
-			srcBuffer = RestoreBackupBlock(lsn, record, bbi, false, true);
-			srcPage = NULL;		/* don't need to do any page updates */
+			srcPage = BufferGetPage(srcBuffer);
+
+			/*
+			 * We have it a bit easier here than in doPickSplit(), because we
+			 * know the inner tuple's location already, so we can inject the
+			 * correct redirection tuple now.
+			 */
+			if (!state.isBuild)
+				spgPageIndexMultiDelete(&state, srcPage,
+										toDelete, xldata->nDelete,
+										SPGIST_REDIRECT,
+										SPGIST_PLACEHOLDER,
+										xldata->blknoInner,
+										xldata->offnumInner);
+			else
+				spgPageIndexMultiDelete(&state, srcPage,
+										toDelete, xldata->nDelete,
+										SPGIST_PLACEHOLDER,
+										SPGIST_PLACEHOLDER,
+										InvalidBlockNumber,
+										InvalidOffsetNumber);
+
+			/* don't update LSN etc till we're done with it */
 		}
 		else
 		{
-			srcBuffer = XLogReadBuffer(xldata->node, xldata->blknoSrc, false);
-			if (BufferIsValid(srcBuffer))
-			{
-				srcPage = BufferGetPage(srcBuffer);
-				if (lsn > PageGetLSN(srcPage))
-				{
-					/*
-					 * We have it a bit easier here than in doPickSplit(),
-					 * because we know the inner tuple's location already, so
-					 * we can inject the correct redirection tuple now.
-					 */
-					if (!state.isBuild)
-						spgPageIndexMultiDelete(&state, srcPage,
-												toDelete, xldata->nDelete,
-												SPGIST_REDIRECT,
-												SPGIST_PLACEHOLDER,
-												xldata->blknoInner,
-												xldata->offnumInner);
-					else
-						spgPageIndexMultiDelete(&state, srcPage,
-												toDelete, xldata->nDelete,
-												SPGIST_PLACEHOLDER,
-												SPGIST_PLACEHOLDER,
-												InvalidBlockNumber,
-												InvalidOffsetNumber);
-
-					/* don't update LSN etc till we're done with it */
-				}
-				else
-					srcPage = NULL;		/* don't do any page updates */
-			}
-			else
-				srcPage = NULL;
+			srcPage = NULL;		/* don't do any page updates */
 		}
 		bbi++;
 	}
@@ -735,22 +674,13 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
 		 * We could probably release the page lock immediately in the
 		 * full-page-image case, but for safety let's hold it till later.
 		 */
-		if (record->xl_info & XLR_BKP_BLOCK(bbi))
+		if (XLogReplayBuffer(bbi, xldata->node, xldata->blknoDest, &destBuffer) == BLK_NEEDS_REDO)
 		{
-			destBuffer = RestoreBackupBlock(lsn, record, bbi, false, true);
-			destPage = NULL;	/* don't need to do any page updates */
+			destPage = (Page) BufferGetPage(destBuffer);
 		}
 		else
-		{
-			destBuffer = XLogReadBuffer(xldata->node, xldata->blknoDest, false);
-			if (BufferIsValid(destBuffer))
-			{
-				destPage = (Page) BufferGetPage(destBuffer);
-				if (lsn <= PageGetLSN(destPage))
-					destPage = NULL;	/* don't do any page updates */
-			}
-			else
-				destPage = NULL;
+		{		
+			destPage = NULL;	/* don't do any page updates */
 		}
 		bbi++;
 	}
@@ -787,43 +717,40 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
 	}
 
 	/* restore new inner tuple */
-	if (record->xl_info & XLR_BKP_BLOCK(bbi))
-		(void) RestoreBackupBlock(lsn, record, bbi, false, false);
-	else
+	if (xldata->initInner)
 	{
-		Buffer		buffer = XLogReadBuffer(xldata->node, xldata->blknoInner,
-											xldata->initInner);
-
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
+		innerBuffer = XLogReadBuffer(xldata->node, xldata->blknoInner, true);
+		SpGistInitBuffer(innerBuffer,
+						 (xldata->storesNulls ? SPGIST_NULLS : 0));
+		rc = BLK_NEEDS_REDO;
+	}
+	else
+		rc = XLogReplayBuffer(bbi, xldata->node, xldata->blknoInner,
+							  &innerBuffer);
 
-			if (xldata->initInner)
-				SpGistInitBuffer(buffer,
-								 (xldata->storesNulls ? SPGIST_NULLS : 0));
+	if (rc == BLK_NEEDS_REDO)
+	{
+		page = BufferGetPage(innerBuffer);
 
-			if (lsn > PageGetLSN(page))
-			{
-				addOrReplaceTuple(page, (Item) innerTuple, innerTupleHdr.size,
-								  xldata->offnumInner);
+		addOrReplaceTuple(page, (Item) innerTuple, innerTupleHdr.size,
+						  xldata->offnumInner);
 
-				/* if inner is also parent, update link while we're here */
-				if (xldata->blknoInner == xldata->blknoParent)
-				{
-					SpGistInnerTuple parent;
+		/* if inner is also parent, update link while we're here */
+		if (xldata->blknoInner == xldata->blknoParent)
+		{
+			SpGistInnerTuple parent;
 
-					parent = (SpGistInnerTuple) PageGetItem(page,
+			parent = (SpGistInnerTuple) PageGetItem(page,
 								  PageGetItemId(page, xldata->offnumParent));
-					spgUpdateNodeLink(parent, xldata->nodeI,
-									xldata->blknoInner, xldata->offnumInner);
-				}
-
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
+			spgUpdateNodeLink(parent, xldata->nodeI,
+							  xldata->blknoInner, xldata->offnumInner);
 		}
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(innerBuffer);
 	}
+	if (BufferIsValid(innerBuffer))
+		UnlockReleaseBuffer(innerBuffer);
 	bbi++;
 
 	/*
@@ -843,31 +770,25 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
 	}
 	else if (xldata->blknoInner != xldata->blknoParent)
 	{
-		if (record->xl_info & XLR_BKP_BLOCK(bbi))
-			(void) RestoreBackupBlock(lsn, record, bbi, false, false);
-		else
-		{
-			Buffer		buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
+		Buffer		parentBuffer;
 
-			if (BufferIsValid(buffer))
-			{
-				page = BufferGetPage(buffer);
+		if (XLogReplayBuffer(bbi, xldata->node, xldata->blknoParent,
+							 &parentBuffer) == BLK_NEEDS_REDO)
+		{
+			SpGistInnerTuple parent;
 
-				if (lsn > PageGetLSN(page))
-				{
-					SpGistInnerTuple parent;
+			page = BufferGetPage(parentBuffer);
 
-					parent = (SpGistInnerTuple) PageGetItem(page,
+			parent = (SpGistInnerTuple) PageGetItem(page,
 								  PageGetItemId(page, xldata->offnumParent));
-					spgUpdateNodeLink(parent, xldata->nodeI,
-									xldata->blknoInner, xldata->offnumInner);
+			spgUpdateNodeLink(parent, xldata->nodeI,
+							  xldata->blknoInner, xldata->offnumInner);
 
-					PageSetLSN(page, lsn);
-					MarkBufferDirty(buffer);
-				}
-				UnlockReleaseBuffer(buffer);
-			}
+			PageSetLSN(page, lsn);
+			MarkBufferDirty(parentBuffer);
 		}
+		if (BufferIsValid(parentBuffer))
+			UnlockReleaseBuffer(parentBuffer);
 	}
 }
 
@@ -902,62 +823,55 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
 	ptr += sizeof(OffsetNumber) * xldata->nChain;
 	chainDest = (OffsetNumber *) ptr;
 
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
-		if (BufferIsValid(buffer))
+		page = BufferGetPage(buffer);
+
+		spgPageIndexMultiDelete(&state, page,
+								toDead, xldata->nDead,
+								SPGIST_DEAD, SPGIST_DEAD,
+								InvalidBlockNumber,
+								InvalidOffsetNumber);
+
+		spgPageIndexMultiDelete(&state, page,
+								toPlaceholder, xldata->nPlaceholder,
+								SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+								InvalidBlockNumber,
+								InvalidOffsetNumber);
+
+		/* see comments in vacuumLeafPage() */
+		for (i = 0; i < xldata->nMove; i++)
 		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				spgPageIndexMultiDelete(&state, page,
-										toDead, xldata->nDead,
-										SPGIST_DEAD, SPGIST_DEAD,
-										InvalidBlockNumber,
-										InvalidOffsetNumber);
+			ItemId		idSrc = PageGetItemId(page, moveSrc[i]);
+			ItemId		idDest = PageGetItemId(page, moveDest[i]);
+			ItemIdData	tmp;
 
-				spgPageIndexMultiDelete(&state, page,
-										toPlaceholder, xldata->nPlaceholder,
-									  SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
-										InvalidBlockNumber,
-										InvalidOffsetNumber);
+			tmp = *idSrc;
+			*idSrc = *idDest;
+			*idDest = tmp;
+		}
 
-				/* see comments in vacuumLeafPage() */
-				for (i = 0; i < xldata->nMove; i++)
-				{
-					ItemId		idSrc = PageGetItemId(page, moveSrc[i]);
-					ItemId		idDest = PageGetItemId(page, moveDest[i]);
-					ItemIdData	tmp;
-
-					tmp = *idSrc;
-					*idSrc = *idDest;
-					*idDest = tmp;
-				}
-
-				spgPageIndexMultiDelete(&state, page,
-										moveSrc, xldata->nMove,
-									  SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
-										InvalidBlockNumber,
-										InvalidOffsetNumber);
+		spgPageIndexMultiDelete(&state, page,
+								moveSrc, xldata->nMove,
+								SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+								InvalidBlockNumber,
+								InvalidOffsetNumber);
 
-				for (i = 0; i < xldata->nChain; i++)
-				{
-					SpGistLeafTuple lt;
+		for (i = 0; i < xldata->nChain; i++)
+		{
+			SpGistLeafTuple lt;
 
-					lt = (SpGistLeafTuple) PageGetItem(page,
+			lt = (SpGistLeafTuple) PageGetItem(page,
 										   PageGetItemId(page, chainSrc[i]));
-					Assert(lt->tupstate == SPGIST_LIVE);
-					lt->nextOffset = chainDest[i];
-				}
-
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
+			Assert(lt->tupstate == SPGIST_LIVE);
+			lt->nextOffset = chainDest[i];
 		}
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -971,25 +885,18 @@ spgRedoVacuumRoot(XLogRecPtr lsn, XLogRecord *record)
 
 	toDelete = xldata->offsets;
 
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
-		if (BufferIsValid(buffer))
-		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				/* The tuple numbers are in order */
-				PageIndexMultiDelete(page, toDelete, xldata->nDelete);
+		page = BufferGetPage(buffer);
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
-			UnlockReleaseBuffer(buffer);
-		}
+		/* The tuple numbers are in order */
+		PageIndexMultiDelete(page, toDelete, xldata->nDelete);
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 static void
@@ -999,7 +906,6 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
 	spgxlogVacuumRedirect *xldata = (spgxlogVacuumRedirect *) ptr;
 	OffsetNumber *itemToPlaceholder;
 	Buffer		buffer;
-	Page		page;
 
 	itemToPlaceholder = xldata->offsets;
 
@@ -1014,64 +920,54 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
 												xldata->node);
 	}
 
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-		(void) RestoreBackupBlock(lsn, record, 0, false, false);
-	else
+	if (XLogReplayBuffer(0, xldata->node, xldata->blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
+		Page		page = BufferGetPage(buffer);
+		SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
+		int			i;
 
-		if (BufferIsValid(buffer))
+		/* Convert redirect pointers to plain placeholders */
+		for (i = 0; i < xldata->nToPlaceholder; i++)
 		{
-			page = BufferGetPage(buffer);
-			if (lsn > PageGetLSN(page))
-			{
-				SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
-				int			i;
+			SpGistDeadTuple dt;
 
-				/* Convert redirect pointers to plain placeholders */
-				for (i = 0; i < xldata->nToPlaceholder; i++)
-				{
-					SpGistDeadTuple dt;
-
-					dt = (SpGistDeadTuple) PageGetItem(page,
+			dt = (SpGistDeadTuple) PageGetItem(page,
 								  PageGetItemId(page, itemToPlaceholder[i]));
-					Assert(dt->tupstate == SPGIST_REDIRECT);
-					dt->tupstate = SPGIST_PLACEHOLDER;
-					ItemPointerSetInvalid(&dt->pointer);
-				}
-
-				Assert(opaque->nRedirection >= xldata->nToPlaceholder);
-				opaque->nRedirection -= xldata->nToPlaceholder;
-				opaque->nPlaceholder += xldata->nToPlaceholder;
-
-				/* Remove placeholder tuples at end of page */
-				if (xldata->firstPlaceholder != InvalidOffsetNumber)
-				{
-					int			max = PageGetMaxOffsetNumber(page);
-					OffsetNumber *toDelete;
+			Assert(dt->tupstate == SPGIST_REDIRECT);
+			dt->tupstate = SPGIST_PLACEHOLDER;
+			ItemPointerSetInvalid(&dt->pointer);
+		}
 
-					toDelete = palloc(sizeof(OffsetNumber) * max);
+		Assert(opaque->nRedirection >= xldata->nToPlaceholder);
+		opaque->nRedirection -= xldata->nToPlaceholder;
+		opaque->nPlaceholder += xldata->nToPlaceholder;
 
-					for (i = xldata->firstPlaceholder; i <= max; i++)
-						toDelete[i - xldata->firstPlaceholder] = i;
+		/* Remove placeholder tuples at end of page */
+		if (xldata->firstPlaceholder != InvalidOffsetNumber)
+		{
+			int			max = PageGetMaxOffsetNumber(page);
+			OffsetNumber *toDelete;
 
-					i = max - xldata->firstPlaceholder + 1;
-					Assert(opaque->nPlaceholder >= i);
-					opaque->nPlaceholder -= i;
+			toDelete = palloc(sizeof(OffsetNumber) * max);
 
-					/* The array is sorted, so can use PageIndexMultiDelete */
-					PageIndexMultiDelete(page, toDelete, i);
+			for (i = xldata->firstPlaceholder; i <= max; i++)
+				toDelete[i - xldata->firstPlaceholder] = i;
 
-					pfree(toDelete);
-				}
+			i = max - xldata->firstPlaceholder + 1;
+			Assert(opaque->nPlaceholder >= i);
+			opaque->nPlaceholder -= i;
 
-				PageSetLSN(page, lsn);
-				MarkBufferDirty(buffer);
-			}
+			/* The array is sorted, so can use PageIndexMultiDelete */
+			PageIndexMultiDelete(page, toDelete, i);
 
-			UnlockReleaseBuffer(buffer);
+			pfree(toDelete);
 		}
+
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 }
 
 void
diff --git a/src/backend/access/transam/README b/src/backend/access/transam/README
index f83526c..ed46a16 100644
--- a/src/backend/access/transam/README
+++ b/src/backend/access/transam/README
@@ -500,33 +500,28 @@ incrementally update the page, the rdata array *must* mention the buffer
 ID at least once; otherwise there is no defense against torn-page problems.
 The standard replay-routine pattern for this case is
 
-	if (record->xl_info & XLR_BKP_BLOCK(N))
+	if (XLogReplayBuffer(N, rnode, blkno, &buffer) == BLK_NEEDS_REDO)
 	{
-		/* apply the change from the full-page image */
-		(void) RestoreBackupBlock(lsn, record, N, false, false);
-		return;
-	}
+		page = (Page) BufferGetPage(buffer);
 
-	buffer = XLogReadBuffer(rnode, blkno, false);
-	if (!BufferIsValid(buffer))
-	{
-		/* page has been deleted, so we need do nothing */
-		return;
-	}
-	page = (Page) BufferGetPage(buffer);
+		... apply the change ...
 
-	if (XLByteLE(lsn, PageGetLSN(page)))
-	{
-		/* changes are already applied */
-		UnlockReleaseBuffer(buffer);
-		return;
+		PageSetLSN(page, lsn);
+		MarkBufferDirty(buffer);
 	}
+	if (BufferIsValid(buffer))
+		UnlockReleaseBuffer(buffer);
 
-	... apply the change ...
-
-	PageSetLSN(page, lsn);
-	MarkBufferDirty(buffer);
-	UnlockReleaseBuffer(buffer);
+XLogReplayBuffer checks what action needs to be taken to the page.  If
+the XLR_BKP_BLOCK(N) flag is set, it restores the full page image and returns
+BLK_RESTORED.  If there is no full page image, but page cannot be found or if
+the change has already been replayed (ie. the page's LSN >= the record we're
+replaying), it returns BLK_NOTFOUND or BLK_DONE, respectively.  Usually, the
+redo routine only needs to pay attention to BLK_NEEDS_REDO return code, which
+means that the routine should apply the incremental change.  In any case, the
+caller is responsible for unlocking and releasing the buffer; note that
+XLogReplayBuffer returns the buffer locked even if no redo is required, unless
+the page does not exist.
 
 As noted above, for a multi-page update you need to be able to determine
 which XLR_BKP_BLOCK(N) flag applies to each page.  If a WAL record reflects
@@ -539,31 +534,8 @@ per the above discussion, fully-rewritable buffers shouldn't be mentioned in
 When replaying a WAL record that describes changes on multiple pages, you
 must be careful to lock the pages properly to prevent concurrent Hot Standby
 queries from seeing an inconsistent state.  If this requires that two
-or more buffer locks be held concurrently, the coding pattern shown above
-is too simplistic, since it assumes the routine can exit as soon as it's
-known the current page requires no modification.  Instead, you might have
-something like
-
-	if (record->xl_info & XLR_BKP_BLOCK(0))
-	{
-		/* apply the change from the full-page image */
-		buffer0 = RestoreBackupBlock(lsn, record, 0, false, true);
-	}
-	else
-	{
-		buffer0 = XLogReadBuffer(rnode, blkno, false);
-		if (BufferIsValid(buffer0))
-		{
-			... apply the change if not already done ...
-			MarkBufferDirty(buffer0);
-		}
-	}
-
-	... similarly apply the changes for remaining pages ...
-
-	/* and now we can release the lock on the first page */
-	if (BufferIsValid(buffer0))
-		UnlockReleaseBuffer(buffer0);
+or more buffer locks be held concurrently, you must lock the pages in
+appropriate order, and not release the locks until all the changes are done.
 
 Note that we must only use PageSetLSN/PageGetLSN() when we know the action
 is serialised. Only Startup process may modify data blocks during recovery,
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 34f2fc0..881652d 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -4039,7 +4039,7 @@ RestoreBackupBlock(XLogRecPtr lsn, XLogRecord *record, int block_index,
 	}
 
 	/* Caller specified a bogus block_index */
-	elog(ERROR, "failed to restore block_index %d", block_index);
+	elog(PANIC, "failed to restore block_index %d", block_index);
 	return InvalidBuffer;		/* keep compiler quiet */
 }
 
@@ -6844,6 +6844,8 @@ StartupXLOG(void)
 					RecordKnownAssignedTransactionIds(record->xl_xid);
 
 				/* Now apply the WAL record itself */
+				XLogRedoLSN = EndRecPtr;
+				XLogRedoRecord = record;
 				RmgrTable[record->xl_rmid].rm_redo(EndRecPtr, record);
 
 				/* Pop the error context stack */
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index b7829ff..4ef2ca8 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -25,6 +25,11 @@
 #include "utils/hsearch.h"
 #include "utils/rel.h"
 
+/*
+ * WAL record currently being replayed.
+ */
+XLogRecPtr XLogRedoLSN;
+XLogRecord *XLogRedoRecord;
 
 /*
  * During XLOG replay, we may see XLOG records for incremental updates of
@@ -242,6 +247,78 @@ XLogCheckInvalidPages(void)
 	invalid_page_tab = NULL;
 }
 
+
+/*
+ * XLogReplayBuffer
+ *		Read a page during XLOG replay
+ *
+ * Reads a block referenced by a WAL record into shared buffer cache, and
+ * determines what needs to be done to replay the changes to it. If the
+ * WAL record includes a full-page image of the page, it is restored.
+ *
+ * (Getting the buffer lock is not really necessary during single-process
+ * crash recovery, but some subroutines such as MarkBufferDirty will complain
+ * if we don't have the lock.  In hot standby mode it's definitely necessary.)
+ * Returns one of the following:
+ *
+ * The returned buffer is exclusively-locked.
+ *	BLK_REPLAY		- block needs to be replayed
+ *	BLK_DONE		- block doesn't need replaying
+ *	BLK_RESTORED	- block was restored from a full-page image included in
+ *					  the record
+ *	BLK_NOTFOUND	- block was not found (because it was truncated away by
+ *					  an operation later in the WAL stream)
+ *
+ * On return, the buffer is locked in exclusive-mode, and returned in *buf.
+ * Note that the buffer is locked and returned even if it doesn't need
+ * replaying.
+ */
+XLogReplayResult
+XLogReplayBuffer(int block_index, RelFileNode rnode, BlockNumber blkno, Buffer *buf)
+{
+	return XLogReplayBufferExtended(block_index,
+									rnode, MAIN_FORKNUM, blkno,
+									RBM_NORMAL, false, buf);
+}
+
+/*
+ * XLogReplayBufferExtended
+ *		Like XLogReplayBuffer, but with extra options.
+ *
+ * If mode is RBM_ZERO or RBM_ZERO_ON_ERROR, if the page doesn't exist, the
+ * relation is extended with all-zeroes pages up to the referenced block
+ * number. In RBM_ZERO mode, the return values is always BLK_NEEDS_REDO.
+ *
+ * If 'get_cleanup_lock' is true, a "cleanup lock" is acquired on the buffer
+ * using LockBufferForCleanup(), instead of a regulare exclusive lock.
+ */
+XLogReplayResult
+XLogReplayBufferExtended(int block_index,
+						 RelFileNode rnode, ForkNumber forkno, BlockNumber blkno,
+						 ReadBufferMode mode, bool get_cleanup_lock,
+						 Buffer *buf)
+{
+	if (XLogRedoRecord->xl_info & XLR_BKP_BLOCK(block_index))
+	{
+		*buf = RestoreBackupBlock(XLogRedoLSN, XLogRedoRecord, block_index, get_cleanup_lock, true);
+		return BLK_RESTORED;
+	}
+	else
+	{
+		*buf = XLogReadBufferExtended(rnode, forkno, blkno, mode);
+		if (BufferIsValid(*buf))
+		{
+			LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
+			if (XLogRedoLSN <= PageGetLSN(BufferGetPage(*buf)))
+				return BLK_DONE;
+			else
+				return BLK_NEEDS_REDO;
+		}
+		else
+			return BLK_NOTFOUND;
+	}
+}
+
 /*
  * XLogReadBuffer
  *		Read a page during XLOG replay.
diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h
index 58f11d9..5fda2c6 100644
--- a/src/include/access/xlogutils.h
+++ b/src/include/access/xlogutils.h
@@ -1,7 +1,7 @@
 /*
  * xlogutils.h
  *
- * PostgreSQL transaction log manager utility routines
+ * Utilities for replaying WAL records.
  *
  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -22,6 +22,27 @@ extern void XLogDropDatabase(Oid dbid);
 extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
 					 BlockNumber nblocks);
 
+/* Result codes for XLogReplayBuffer[Extended] */
+typedef enum
+{
+	BLK_NEEDS_REDO,		/* block needs to be replayed */
+	BLK_DONE,			/* block was already replayed */
+	BLK_RESTORED,		/* block was restored from a full-page image */
+	BLK_NOTFOUND		/* block was not found (and hence does not need to be
+						 * replayed) */
+} XLogReplayResult;
+
+extern XLogRecPtr XLogRedoLSN;
+extern struct XLogRecord *XLogRedoRecord;
+
+extern XLogReplayResult XLogReplayBuffer(int block_index,
+				 RelFileNode rnode, BlockNumber blkno, Buffer *buf);
+extern XLogReplayResult XLogReplayBufferExtended(int block_index,
+						 RelFileNode rnode, ForkNumber forkno,
+						 BlockNumber blkno,
+						 ReadBufferMode mode, bool get_cleanup_lock,
+						 Buffer *buf);
+
 extern Buffer XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init);
 extern Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
 					   BlockNumber blkno, ReadBufferMode mode);
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to