diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index f19f6fdfaf..a13314338a 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -558,7 +558,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
 				Size		itemsz;
 				OffsetNumber l;
 
-				itemsz = IndexTupleDSize(*itup);
+				itemsz = IndexTupleSize(itup);
 				itemsz = MAXALIGN(itemsz);
 
 				data += itemsz;
@@ -686,7 +686,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 				Size		itemsz;
 				OffsetNumber l;
 
-				itemsz = IndexTupleDSize(*itup);
+				itemsz = IndexTupleSize(itup);
 				itemsz = MAXALIGN(itemsz);
 
 				data += itemsz;
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index dc08db97db..b90d4174fa 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -55,7 +55,7 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
 	hashkey = _hash_get_indextuple_hashkey(itup);
 
 	/* compute item size too */
-	itemsz = IndexTupleDSize(*itup);
+	itemsz = IndexTupleSize(itup);
 	itemsz = MAXALIGN(itemsz);	/* be safe, PageAddItem will do this but we
 								 * need to be consistent */
 
@@ -222,7 +222,7 @@ restart_insert:
 		XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
 
 		XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
-		XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
+		XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
 
 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INSERT);
 
@@ -309,7 +309,7 @@ _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups,
 	{
 		Size		itemsize;
 
-		itemsize = IndexTupleDSize(*itups[i]);
+		itemsize = IndexTupleSize(itups[i]);
 		itemsize = MAXALIGN(itemsize);
 
 		/* Find where to insert the tuple (preserving page's hashkey ordering) */
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index c206e704d4..b5ec0f69d6 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -890,7 +890,7 @@ readpage:
 
 			itup = (IndexTuple) PageGetItem(rpage,
 											PageGetItemId(rpage, roffnum));
-			itemsz = IndexTupleDSize(*itup);
+			itemsz = IndexTupleSize(itup);
 			itemsz = MAXALIGN(itemsz);
 
 			/*
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index a50e35dfcb..51100e059b 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1173,7 +1173,7 @@ _hash_splitbucket(Relation rel,
 				 * the current page in the new bucket, we must allocate a new
 				 * overflow page and place the tuple on that page instead.
 				 */
-				itemsz = IndexTupleDSize(*new_itup);
+				itemsz = IndexTupleSize(new_itup);
 				itemsz = MAXALIGN(itemsz);
 
 				if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 310589da4e..b39090aff1 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -558,7 +558,7 @@ _bt_findinsertloc(Relation rel,
 
 	lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
 
-	itemsz = IndexTupleDSize(*newtup);
+	itemsz = IndexTupleSize(newtup);
 	itemsz = MAXALIGN(itemsz);	/* be safe, PageAddItem will do this but we
 								 * need to be consistent */
 
@@ -755,7 +755,7 @@ _bt_insertonpg(Relation rel,
 		elog(ERROR, "cannot insert to incompletely split page %u",
 			 BufferGetBlockNumber(buf));
 
-	itemsz = IndexTupleDSize(*itup);
+	itemsz = IndexTupleSize(itup);
 	itemsz = MAXALIGN(itemsz);	/* be safe, PageAddItem will do this but we
 								 * need to be consistent */
 
@@ -914,7 +914,7 @@ _bt_insertonpg(Relation rel,
 									sizeof(IndexTupleData));
 			}
 			else
-				XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
+				XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
 
 			recptr = XLogInsert(RM_BTREE_ID, xlinfo);
 
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index bf6c03c7b2..b71529f124 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -468,7 +468,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
 	last_off = state->btps_lastoff;
 
 	pgspc = PageGetFreeSpace(npage);
-	itupsz = IndexTupleDSize(*itup);
+	itupsz = IndexTupleSize(itup);
 	itupsz = MAXALIGN(itupsz);
 
 	/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 7250b4f0b8..afd9f824b4 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -53,7 +53,7 @@ _bt_restore_page(Page page, char *from, int len)
 	{
 		/* Need to copy tuple header due to alignment considerations */
 		memcpy(&itupdata, from, sizeof(IndexTupleData));
-		itemsz = IndexTupleDSize(itupdata);
+		itemsz = IndexTupleSize(&itupdata);
 		itemsz = MAXALIGN(itemsz);
 
 		items[i] = (Item) from;
@@ -282,7 +282,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
 		if (onleft)
 		{
 			newitem = (Item) datapos;
-			newitemsz = MAXALIGN(IndexTupleSize(newitem));
+			newitemsz = MAXALIGN(IndexTupleSize((IndexTuple) newitem));
 			datapos += newitemsz;
 			datalen -= newitemsz;
 		}
@@ -291,7 +291,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
 		if (!isleaf)
 		{
 			left_hikey = (Item) datapos;
-			left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey));
+			left_hikeysz = MAXALIGN(IndexTupleSize((IndexTuple) left_hikey));
 			datapos += left_hikeysz;
 			datalen -= left_hikeysz;
 		}
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index c178ae91a9..4b53d32ee5 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -67,8 +67,7 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap;
 #define INDEX_VAR_MASK	0x4000
 #define INDEX_NULL_MASK 0x8000
 
-#define IndexTupleSize(itup)		((Size) (((IndexTuple) (itup))->t_info & INDEX_SIZE_MASK))
-#define IndexTupleDSize(itup)		((Size) ((itup).t_info & INDEX_SIZE_MASK))
+#define IndexTupleSize(itup)		((Size) ((itup)->t_info & INDEX_SIZE_MASK))
 #define IndexTupleHasNulls(itup)	((((IndexTuple) (itup))->t_info & INDEX_NULL_MASK))
 #define IndexTupleHasVarwidths(itup) ((((IndexTuple) (itup))->t_info & INDEX_VAR_MASK))
 
