diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index 8647e8c..f823612 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -109,7 +109,7 @@ hash_xlog_insert(XLogReaderState *record)
 
 	if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
 	{
-		Size		datalen;
+		size_t		datalen;
 		char	   *datapos = XLogRecGetBlockData(record, 0, &datalen);
 
 		page = BufferGetPage(buffer);
@@ -161,7 +161,7 @@ hash_xlog_add_ovfl_page(XLogReaderState *record)
 	HashPageOpaque ovflopaque;
 	uint32	   *num_bucket;
 	char	   *data;
-	Size datalen PG_USED_FOR_ASSERTS_ONLY;
+	size_t datalen PG_USED_FOR_ASSERTS_ONLY;
 	bool		new_bmpage = false;
 
 	XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk);
@@ -293,7 +293,7 @@ hash_xlog_split_allocate_page(XLogReaderState *record)
 	Buffer		oldbuf;
 	Buffer		newbuf;
 	Buffer		metabuf;
-	Size datalen PG_USED_FOR_ASSERTS_ONLY;
+	size_t datalen PG_USED_FOR_ASSERTS_ONLY;
 	char	   *data;
 	XLogRedoAction action;
 
@@ -512,7 +512,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
 		Page		writepage;
 		char	   *begin;
 		char	   *data;
-		Size		datalen;
+		size_t		datalen;
 		uint16		ninserted = 0;
 
 		data = begin = XLogRecGetBlockData(record, 1, &datalen);
@@ -528,7 +528,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
 			while (data - begin < datalen)
 			{
 				IndexTuple	itup = (IndexTuple) data;
-				Size		itemsz;
+				size_t		itemsz;
 				OffsetNumber l;
 
 				itemsz = IndexTupleDSize(*itup);
@@ -559,7 +559,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
 	{
 		Page		page;
 		char	   *ptr;
-		Size		len;
+		size_t		len;
 
 		ptr = XLogRecGetBlockData(record, 2, &len);
 
@@ -640,7 +640,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 		Page		writepage;
 		char	   *begin;
 		char	   *data;
-		Size		datalen;
+		size_t		datalen;
 		uint16		ninserted = 0;
 
 		data = begin = XLogRecGetBlockData(record, 1, &datalen);
@@ -656,7 +656,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 			while (data - begin < datalen)
 			{
 				IndexTuple	itup = (IndexTuple) data;
-				Size		itemsz;
+				size_t		itemsz;
 				OffsetNumber l;
 
 				itemsz = IndexTupleDSize(*itup);
@@ -761,7 +761,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 		uint32	   *freep = NULL;
 		char	   *data;
 		uint32	   *bitmap_page_bit;
-		Size		datalen;
+		size_t		datalen;
 
 		freep = HashPageGetBitmap(mappage);
 
@@ -787,7 +787,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 			Page		page;
 			char	   *data;
 			uint32	   *firstfree_ovflpage;
-			Size		datalen;
+			size_t		datalen;
 
 			data = XLogRecGetBlockData(record, 6, &datalen);
 			firstfree_ovflpage = (uint32 *) data;
@@ -841,7 +841,7 @@ hash_xlog_delete(XLogReaderState *record)
 	if (action == BLK_NEEDS_REDO)
 	{
 		char	   *ptr;
-		Size		len;
+		size_t		len;
 
 		ptr = XLogRecGetBlockData(record, 1, &len);
 
@@ -945,7 +945,7 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 	TransactionId	latestRemovedXid = InvalidTransactionId;
 	int		i;
 	char *ptr;
-	Size len;
+	size_t len;
 
 	xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
 
@@ -1108,7 +1108,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
 	if (action == BLK_NEEDS_REDO)
 	{
 		char *ptr;
-		Size len;
+		size_t len;
 
 		ptr = XLogRecGetBlockData(record, 0, &len);
 
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 20b4362..ffa1c01 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -461,10 +461,10 @@ logicalrep_worker_sigterm(SIGNAL_ARGS)
  * ApplyLauncherShmemSize
  *		Compute space needed for replication launcher shared memory
  */
-Size
+size_t
 ApplyLauncherShmemSize(void)
 {
-	Size		size;
+	size_t		size;
 
 	/*
 	 * Need the fixed struct and the array of LogicalRepWorker.
diff --git a/src/backend/utils/misc/backend_random.c b/src/backend/utils/misc/backend_random.c
index dcc2363..08dd45b 100644
--- a/src/backend/utils/misc/backend_random.c
+++ b/src/backend/utils/misc/backend_random.c
@@ -37,7 +37,7 @@
 
 #ifdef HAVE_STRONG_RANDOM
 
-Size
+size_t
 BackendRandomShmemSize(void)
 {
 	return 0;
@@ -73,7 +73,7 @@ typedef struct
 
 static BackendRandomShmemStruct *BackendRandomShmem;
 
-Size
+size_t
 BackendRandomShmemSize(void)
 {
 	return sizeof(BackendRandomShmemStruct);
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 49e68b4..5f36392 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -65,7 +65,7 @@
  * double this size, and so on.  Larger segments may be created if necessary
  * to satisfy large requests.
  */
-#define DSA_INITIAL_SEGMENT_SIZE ((Size) (1 * 1024 * 1024))
+#define DSA_INITIAL_SEGMENT_SIZE ((size_t) (1 * 1024 * 1024))
 
 /*
  * How many segments to create before we double the segment size.  If this is
@@ -98,7 +98,7 @@
 #define DSA_OFFSET_BITMASK (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)
 
 /* The maximum size of a DSM segment. */
-#define DSA_MAX_SEGMENT_SIZE ((Size) 1 << DSA_OFFSET_WIDTH)
+#define DSA_MAX_SEGMENT_SIZE ((size_t) 1 << DSA_OFFSET_WIDTH)
 
 /* Number of pages (see FPM_PAGE_SIZE) per regular superblock. */
 #define DSA_PAGES_PER_SUPERBLOCK		16
@@ -121,7 +121,7 @@
 #define DSA_EXTRACT_OFFSET(dp) ((dp) & DSA_OFFSET_BITMASK)
 
 /* The type used for index segment indexes (zero based). */
-typedef Size dsa_segment_index;
+typedef size_t dsa_segment_index;
 
 /* Sentinel value for dsa_segment_index indicating 'none' or 'end'. */
 #define DSA_SEGMENT_INDEX_NONE (~(dsa_segment_index)0)
@@ -153,9 +153,9 @@ typedef struct
 	/* Sanity check magic value. */
 	uint32		magic;
 	/* Total number of pages in this segment (excluding metadata area). */
-	Size		usable_pages;
+	size_t		usable_pages;
 	/* Total size of this segment in bytes. */
-	Size		size;
+	size_t		size;
 
 	/*
 	 * Index of the segment that precedes this one in the same segment bin, or
@@ -169,7 +169,7 @@ typedef struct
 	 */
 	dsa_segment_index next;
 	/* The index of the bin that contains this segment. */
-	Size		bin;
+	size_t		bin;
 
 	/*
 	 * A flag raised to indicate that this segment is being returned to the
@@ -197,7 +197,7 @@ typedef struct
 	dsa_pointer prevspan;		/* Previous span. */
 	dsa_pointer nextspan;		/* Next span. */
 	dsa_pointer start;			/* Starting address. */
-	Size		npages;			/* Length of span in pages. */
+	size_t		npages;			/* Length of span in pages. */
 	uint16		size_class;		/* Size class. */
 	uint16		ninitialized;	/* Maximum number of objects ever allocated. */
 	uint16		nallocatable;	/* Number of objects currently allocatable. */
@@ -308,9 +308,9 @@ typedef struct
 	/* The object pools for each size class. */
 	dsa_area_pool pools[DSA_NUM_SIZE_CLASSES];
 	/* The total size of all active segments. */
-	Size		total_segment_size;
+	size_t		total_segment_size;
 	/* The maximum total size of backing storage we are allowed. */
-	Size		max_total_segment_size;
+	size_t		max_total_segment_size;
 	/* Highest used segment index in the history of this area. */
 	dsa_segment_index high_segment_index;
 	/* The reference count for this area. */
@@ -318,7 +318,7 @@ typedef struct
 	/* A flag indicating that this area has been pinned. */
 	bool		pinned;
 	/* The number of times that segments have been freed. */
-	Size		freed_segment_counter;
+	size_t		freed_segment_counter;
 	/* The LWLock tranche ID. */
 	int			lwlock_tranche_id;
 	/* The general lock (protects everything except object pools). */
@@ -371,7 +371,7 @@ struct dsa_area
 	dsa_segment_index high_segment_index;
 
 	/* The last observed freed_segment_counter. */
-	Size		freed_segment_counter;
+	size_t		freed_segment_counter;
 };
 
 #define DSA_SPAN_NOTHING_FREE	((uint16) -1)
@@ -382,7 +382,7 @@ struct dsa_area
 	(segment_map_ptr - &area->segment_maps[0])
 
 static void init_span(dsa_area *area, dsa_pointer span_pointer,
-		  dsa_area_pool *pool, dsa_pointer start, Size npages,
+		  dsa_area_pool *pool, dsa_pointer start, size_t npages,
 		  uint16 size_class);
 static bool transfer_first_span(dsa_area *area, dsa_area_pool *pool,
 					int fromclass, int toclass);
@@ -396,8 +396,8 @@ static void unlink_span(dsa_area *area, dsa_area_span *span);
 static void add_span_to_fullness_class(dsa_area *area, dsa_area_span *span,
 						   dsa_pointer span_pointer, int fclass);
 static void unlink_segment(dsa_area *area, dsa_segment_map *segment_map);
-static dsa_segment_map *get_best_segment(dsa_area *area, Size npages);
-static dsa_segment_map *make_new_segment(dsa_area *area, Size requested_pages);
+static dsa_segment_map *get_best_segment(dsa_area *area, size_t npages);
+static dsa_segment_map *make_new_segment(dsa_area *area, size_t requested_pages);
 static dsa_area *create_internal(void *place, size_t size,
 				int tranche_id,
 				dsm_handle control_handle,
@@ -661,7 +661,7 @@ dsa_pin_mapping(dsa_area *area)
  * flags.
  */
 dsa_pointer
-dsa_allocate_extended(dsa_area *area, Size size, int flags)
+dsa_allocate_extended(dsa_area *area, size_t size, int flags)
 {
 	uint16		size_class;
 	dsa_pointer start_pointer;
@@ -684,8 +684,8 @@ dsa_allocate_extended(dsa_area *area, Size size, int flags)
 	 */
 	if (size > dsa_size_classes[lengthof(dsa_size_classes) - 1])
 	{
-		Size		npages = fpm_size_to_pages(size);
-		Size		first_page;
+		size_t		npages = fpm_size_to_pages(size);
+		size_t		first_page;
 		dsa_pointer span_pointer;
 		dsa_area_pool *pool = &area->control->pools[DSA_SCLASS_SPAN_LARGE];
 
@@ -817,7 +817,7 @@ dsa_free(dsa_area *area, dsa_pointer dp)
 	dsa_area_span *span;
 	char	   *superblock;
 	char	   *object;
-	Size		size;
+	size_t		size;
 	int			size_class;
 
 	/* Make sure we don't have a stale segment in the slot 'dp' refers to. */
@@ -924,7 +924,7 @@ void *
 dsa_get_address(dsa_area *area, dsa_pointer dp)
 {
 	dsa_segment_index index;
-	Size		offset;
+	size_t		offset;
 
 	/* Convert InvalidDsaPointer to NULL. */
 	if (!DsaPointerIsValid(dp))
@@ -997,7 +997,7 @@ dsa_unpin(dsa_area *area)
  * backends that have attached to them.
  */
 void
-dsa_set_size_limit(dsa_area *area, Size limit)
+dsa_set_size_limit(dsa_area *area, size_t limit)
 {
 	LWLockAcquire(DSA_AREA_LOCK(area), LW_EXCLUSIVE);
 	area->control->max_total_segment_size = limit;
@@ -1056,7 +1056,7 @@ dsa_trim(dsa_area *area)
 void
 dsa_dump(dsa_area *area)
 {
-	Size		i,
+	size_t		i,
 				j;
 
 	/*
@@ -1156,10 +1156,10 @@ dsa_dump(dsa_area *area)
  * Return the smallest size that you can successfully provide to
  * dsa_create_in_place.
  */
-Size
+size_t
 dsa_minimum_size(void)
 {
-	Size		size;
+	size_t		size;
 	int			pages = 0;
 
 	size = MAXALIGN(sizeof(dsa_area_control)) +
@@ -1187,9 +1187,9 @@ create_internal(void *place, size_t size,
 	dsa_area_control *control;
 	dsa_area   *area;
 	dsa_segment_map *segment_map;
-	Size		usable_pages;
-	Size		total_pages;
-	Size		metadata_bytes;
+	size_t		usable_pages;
+	size_t		total_pages;
+	size_t		metadata_bytes;
 	int			i;
 
 	/* Sanity check on the space we have to work in. */
@@ -1222,7 +1222,7 @@ create_internal(void *place, size_t size,
 	control->segment_header.freed = false;
 	control->segment_header.size = DSA_INITIAL_SEGMENT_SIZE;
 	control->handle = control_handle;
-	control->max_total_segment_size = (Size) -1;
+	control->max_total_segment_size = (size_t) -1;
 	control->total_segment_size = size;
 	memset(&control->segment_handles[0], 0,
 		   sizeof(dsm_handle) * DSA_MAX_SEGMENTS);
@@ -1326,11 +1326,11 @@ attach_internal(void *place, dsm_segment *segment, dsa_handle handle)
 static void
 init_span(dsa_area *area,
 		  dsa_pointer span_pointer,
-		  dsa_area_pool *pool, dsa_pointer start, Size npages,
+		  dsa_area_pool *pool, dsa_pointer start, size_t npages,
 		  uint16 size_class)
 {
 	dsa_area_span *span = dsa_get_address(area, span_pointer);
-	Size		obsize = dsa_size_classes[size_class];
+	size_t		obsize = dsa_size_classes[size_class];
 
 	/*
 	 * The per-pool lock must be held because we manipulate the span list for
@@ -1426,7 +1426,7 @@ alloc_object(dsa_area *area, int size_class)
 	dsa_pointer block;
 	dsa_pointer result;
 	char	   *object;
-	Size		size;
+	size_t		size;
 
 	/*
 	 * Even though ensure_active_superblock can in turn call alloc_object if
@@ -1512,12 +1512,12 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool,
 {
 	dsa_pointer span_pointer;
 	dsa_pointer start_pointer;
-	Size		obsize = dsa_size_classes[size_class];
-	Size		nmax;
+	size_t		obsize = dsa_size_classes[size_class];
+	size_t		nmax;
 	int			fclass;
-	Size		npages = 1;
-	Size		first_page;
-	Size		i;
+	size_t		npages = 1;
+	size_t		first_page;
+	size_t		i;
 	dsa_segment_map *segment_map;
 
 	Assert(LWLockHeldByMe(DSA_SCLASS_LOCK(area, size_class)));
@@ -1930,9 +1930,9 @@ unlink_segment(dsa_area *area, dsa_segment_map *segment_map)
  * pages map.
  */
 static dsa_segment_map *
-get_best_segment(dsa_area *area, Size npages)
+get_best_segment(dsa_area *area, size_t npages)
 {
-	Size		bin;
+	size_t		bin;
 
 	Assert(LWLockHeldByMe(DSA_AREA_LOCK(area)));
 
@@ -1948,7 +1948,7 @@ get_best_segment(dsa_area *area, Size npages)
 		 * The minimum contiguous size that any segment in this bin should
 		 * have.  We'll re-bin if we see segments with fewer.
 		 */
-		Size		threshold = (Size) 1 << (bin - 1);
+		size_t		threshold = (size_t) 1 << (bin - 1);
 		dsa_segment_index segment_index;
 
 		/* Search this bin for a segment with enough contiguous space. */
@@ -1957,7 +1957,7 @@ get_best_segment(dsa_area *area, Size npages)
 		{
 			dsa_segment_map *segment_map;
 			dsa_segment_index next_segment_index;
-			Size		contiguous_pages;
+			size_t		contiguous_pages;
 
 			segment_map = get_segment_by_index(area, segment_index);
 			next_segment_index = segment_map->header->next;
@@ -1973,7 +1973,7 @@ get_best_segment(dsa_area *area, Size npages)
 			/* Re-bin it if it's no longer in the appropriate bin. */
 			if (contiguous_pages < threshold)
 			{
-				Size		new_bin;
+				size_t		new_bin;
 
 				new_bin = contiguous_pages_to_segment_bin(contiguous_pages);
 
@@ -2021,13 +2021,13 @@ get_best_segment(dsa_area *area, Size npages)
  * segments would be exceeded.
  */
 static dsa_segment_map *
-make_new_segment(dsa_area *area, Size requested_pages)
+make_new_segment(dsa_area *area, size_t requested_pages)
 {
 	dsa_segment_index new_index;
-	Size		metadata_bytes;
-	Size		total_size;
-	Size		total_pages;
-	Size		usable_pages;
+	size_t		metadata_bytes;
+	size_t		total_size;
+	size_t		total_pages;
+	size_t		usable_pages;
 	dsa_segment_map *segment_map;
 	dsm_segment *segment;
 
@@ -2065,7 +2065,7 @@ make_new_segment(dsa_area *area, Size requested_pages)
 	 * pages we can fit.
 	 */
 	total_size = DSA_INITIAL_SEGMENT_SIZE *
-		((Size) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE));
+		((size_t) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE));
 	total_size = Min(total_size, DSA_MAX_SEGMENT_SIZE);
 	total_size = Min(total_size,
 					 area->control->max_total_segment_size -
@@ -2192,7 +2192,7 @@ make_new_segment(dsa_area *area, Size requested_pages)
 static void
 check_for_freed_segments(dsa_area *area)
 {
-	Size		freed_segment_counter;
+	size_t		freed_segment_counter;
 
 	/*
 	 * Any other process that has freed a segment has incremented
diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c
index 2cd7581..9a96cb8 100644
--- a/src/backend/utils/mmgr/freepage.c
+++ b/src/backend/utils/mmgr/freepage.c
@@ -68,7 +68,7 @@
 struct FreePageSpanLeader
 {
 	int			magic;			/* always FREE_PAGE_SPAN_LEADER_MAGIC */
-	Size		npages;			/* number of pages in span */
+	size_t		npages;			/* number of pages in span */
 	RelptrFreePageSpanLeader prev;
 	RelptrFreePageSpanLeader next;
 };
@@ -78,22 +78,22 @@ typedef struct FreePageBtreeHeader
 {
 	int			magic;			/* FREE_PAGE_LEAF_MAGIC or
 								 * FREE_PAGE_INTERNAL_MAGIC */
-	Size		nused;			/* number of items used */
+	size_t		nused;			/* number of items used */
 	RelptrFreePageBtree parent; /* uplink */
 } FreePageBtreeHeader;
 
 /* Internal key; points to next level of btree. */
 typedef struct FreePageBtreeInternalKey
 {
-	Size		first_page;		/* low bound for keys on child page */
+	size_t		first_page;		/* low bound for keys on child page */
 	RelptrFreePageBtree child;	/* downlink */
 } FreePageBtreeInternalKey;
 
 /* Leaf key; no payload data. */
 typedef struct FreePageBtreeLeafKey
 {
-	Size		first_page;		/* first page in span */
-	Size		npages;			/* number of pages in span */
+	size_t		first_page;		/* first page in span */
+	size_t		npages;			/* number of pages in span */
 } FreePageBtreeLeafKey;
 
 /* Work out how many keys will fit on a page. */
@@ -119,7 +119,7 @@ struct FreePageBtree
 typedef struct FreePageBtreeSearchResult
 {
 	FreePageBtree *page;
-	Size		index;
+	size_t		index;
 	bool		found;
 	unsigned	split_pages;
 } FreePageBtreeSearchResult;
@@ -127,45 +127,45 @@ typedef struct FreePageBtreeSearchResult
 /* Helper functions */
 static void FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm,
 								FreePageBtree *btp);
-static Size FreePageBtreeCleanup(FreePageManager *fpm);
+static size_t FreePageBtreeCleanup(FreePageManager *fpm);
 static FreePageBtree *FreePageBtreeFindLeftSibling(char *base,
 							 FreePageBtree *btp);
 static FreePageBtree *FreePageBtreeFindRightSibling(char *base,
 							  FreePageBtree *btp);
-static Size FreePageBtreeFirstKey(FreePageBtree *btp);
+static size_t FreePageBtreeFirstKey(FreePageBtree *btp);
 static FreePageBtree *FreePageBtreeGetRecycled(FreePageManager *fpm);
 static void FreePageBtreeInsertInternal(char *base, FreePageBtree *btp,
-						  Size index, Size first_page, FreePageBtree *child);
-static void FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index,
-						Size first_page, Size npages);
-static void FreePageBtreeRecycle(FreePageManager *fpm, Size pageno);
+						  size_t index, size_t first_page, FreePageBtree *child);
+static void FreePageBtreeInsertLeaf(FreePageBtree *btp, size_t index,
+						size_t first_page, size_t npages);
+static void FreePageBtreeRecycle(FreePageManager *fpm, size_t pageno);
 static void FreePageBtreeRemove(FreePageManager *fpm, FreePageBtree *btp,
-					Size index);
+					size_t index);
 static void FreePageBtreeRemovePage(FreePageManager *fpm, FreePageBtree *btp);
-static void FreePageBtreeSearch(FreePageManager *fpm, Size first_page,
+static void FreePageBtreeSearch(FreePageManager *fpm, size_t first_page,
 					FreePageBtreeSearchResult *result);
-static Size FreePageBtreeSearchInternal(FreePageBtree *btp, Size first_page);
-static Size FreePageBtreeSearchLeaf(FreePageBtree *btp, Size first_page);
+static size_t FreePageBtreeSearchInternal(FreePageBtree *btp, size_t first_page);
+static size_t FreePageBtreeSearchLeaf(FreePageBtree *btp, size_t first_page);
 static FreePageBtree *FreePageBtreeSplitPage(FreePageManager *fpm,
 					   FreePageBtree *btp);
 static void FreePageBtreeUpdateParentPointers(char *base, FreePageBtree *btp);
 static void FreePageManagerDumpBtree(FreePageManager *fpm, FreePageBtree *btp,
 						 FreePageBtree *parent, int level, StringInfo buf);
 static void FreePageManagerDumpSpans(FreePageManager *fpm,
-						 FreePageSpanLeader *span, Size expected_pages,
+						 FreePageSpanLeader *span, size_t expected_pages,
 						 StringInfo buf);
-static bool FreePageManagerGetInternal(FreePageManager *fpm, Size npages,
-						   Size *first_page);
-static Size FreePageManagerPutInternal(FreePageManager *fpm, Size first_page,
-						   Size npages, bool soft);
-static void FreePagePopSpanLeader(FreePageManager *fpm, Size pageno);
-static void FreePagePushSpanLeader(FreePageManager *fpm, Size first_page,
-					   Size npages);
-static Size FreePageManagerLargestContiguous(FreePageManager *fpm);
+static bool FreePageManagerGetInternal(FreePageManager *fpm, size_t npages,
+						   size_t *first_page);
+static size_t FreePageManagerPutInternal(FreePageManager *fpm, size_t first_page,
+						   size_t npages, bool soft);
+static void FreePagePopSpanLeader(FreePageManager *fpm, size_t pageno);
+static void FreePagePushSpanLeader(FreePageManager *fpm, size_t first_page,
+					   size_t npages);
+static size_t FreePageManagerLargestContiguous(FreePageManager *fpm);
 static void FreePageManagerUpdateLargest(FreePageManager *fpm);
 
 #if FPM_EXTRA_ASSERTS
-static Size sum_free_pages(FreePageManager *fpm);
+static size_t sum_free_pages(FreePageManager *fpm);
 #endif
 
 /*
@@ -182,7 +182,7 @@ static Size sum_free_pages(FreePageManager *fpm);
 void
 FreePageManagerInitialize(FreePageManager *fpm, char *base)
 {
-	Size		f;
+	size_t		f;
 
 	relptr_store(base, fpm->self, fpm);
 	relptr_store(base, fpm->btree_root, (FreePageBtree *) NULL);
@@ -207,10 +207,10 @@ FreePageManagerInitialize(FreePageManager *fpm, char *base)
  * if true, the first page of the allocation is stored in *first_page.
  */
 bool
-FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
+FreePageManagerGet(FreePageManager *fpm, size_t npages, size_t *first_page)
 {
 	bool		result;
-	Size		contiguous_pages;
+	size_t		contiguous_pages;
 
 	result = FreePageManagerGetInternal(fpm, npages, first_page);
 
@@ -249,7 +249,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
 
 #ifdef FPM_EXTRA_ASSERTS
 static void
-sum_free_pages_recurse(FreePageManager *fpm, FreePageBtree *btp, Size *sum)
+sum_free_pages_recurse(FreePageManager *fpm, FreePageBtree *btp, size_t *sum)
 {
 	char	   *base = fpm_segment_base(fpm);
 
@@ -258,7 +258,7 @@ sum_free_pages_recurse(FreePageManager *fpm, FreePageBtree *btp, Size *sum)
 	++*sum;
 	if (btp->hdr.magic == FREE_PAGE_INTERNAL_MAGIC)
 	{
-		Size		index;
+		size_t		index;
 
 
 		for (index = 0; index < btp->hdr.nused; ++index)
@@ -270,12 +270,12 @@ sum_free_pages_recurse(FreePageManager *fpm, FreePageBtree *btp, Size *sum)
 		}
 	}
 }
-static Size
+static size_t
 sum_free_pages(FreePageManager *fpm)
 {
 	FreePageSpanLeader *recycle;
 	char	   *base = fpm_segment_base(fpm);
-	Size		sum = 0;
+	size_t		sum = 0;
 	int			list;
 
 	/* Count the spans by scanning the freelists. */
@@ -320,11 +320,11 @@ sum_free_pages(FreePageManager *fpm)
  * Compute the size of the largest run of pages that the user could
  * successfully get.
  */
-static Size
+static size_t
 FreePageManagerLargestContiguous(FreePageManager *fpm)
 {
 	char	   *base;
-	Size		largest;
+	size_t		largest;
 
 	base = fpm_segment_base(fpm);
 	largest = 0;
@@ -342,7 +342,7 @@ FreePageManagerLargestContiguous(FreePageManager *fpm)
 	}
 	else
 	{
-		Size		f = FPM_NUM_FREELISTS - 1;
+		size_t		f = FPM_NUM_FREELISTS - 1;
 
 		do
 		{
@@ -376,9 +376,9 @@ FreePageManagerUpdateLargest(FreePageManager *fpm)
  * Transfer a run of pages to the free page manager.
  */
 void
-FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
+FreePageManagerPut(FreePageManager *fpm, size_t first_page, size_t npages)
 {
-	Size		contiguous_pages;
+	size_t		contiguous_pages;
 
 	Assert(npages > 0);
 
@@ -392,7 +392,7 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
 	 */
 	if (contiguous_pages > npages)
 	{
-		Size		cleanup_contiguous_pages;
+		size_t		cleanup_contiguous_pages;
 
 		cleanup_contiguous_pages = FreePageBtreeCleanup(fpm);
 		if (cleanup_contiguous_pages > contiguous_pages)
@@ -427,7 +427,7 @@ FreePageManagerDump(FreePageManager *fpm)
 	StringInfoData buf;
 	FreePageSpanLeader *recycle;
 	bool		dumped_any_freelist = false;
-	Size		f;
+	size_t		f;
 
 	/* Initialize output buffer. */
 	initStringInfo(&buf);
@@ -501,7 +501,7 @@ static void
 FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm, FreePageBtree *btp)
 {
 	char	   *base = fpm_segment_base(fpm);
-	Size		first_page;
+	size_t		first_page;
 	FreePageBtree *parent;
 	FreePageBtree *child;
 
@@ -523,7 +523,7 @@ FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm, FreePageBtree *btp)
 	/* Loop until we find an ancestor that does not require adjustment. */
 	for (;;)
 	{
-		Size		s;
+		size_t		s;
 
 		parent = relptr_access(base, child->hdr.parent);
 		if (parent == NULL)
@@ -576,11 +576,11 @@ FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm, FreePageBtree *btp)
  * Attempt to reclaim space from the free-page btree.  The return value is
  * the largest range of contiguous pages created by the cleanup operation.
  */
-static Size
+static size_t
 FreePageBtreeCleanup(FreePageManager *fpm)
 {
 	char	   *base = fpm_segment_base(fpm);
-	Size		max_contiguous_pages = 0;
+	size_t		max_contiguous_pages = 0;
 
 	/* Attempt to shrink the depth of the btree. */
 	while (!relptr_is_null(fpm->btree_root))
@@ -615,8 +615,8 @@ FreePageBtreeCleanup(FreePageManager *fpm)
 		else if (root->hdr.nused == 2 &&
 				 root->hdr.magic == FREE_PAGE_LEAF_MAGIC)
 		{
-			Size		end_of_first;
-			Size		start_of_second;
+			size_t		end_of_first;
+			size_t		start_of_second;
 
 			end_of_first = root->u.leaf_key[0].first_page +
 				root->u.leaf_key[0].npages;
@@ -624,7 +624,7 @@ FreePageBtreeCleanup(FreePageManager *fpm)
 
 			if (end_of_first + 1 == start_of_second)
 			{
-				Size		root_page = fpm_pointer_to_page(base, root);
+				size_t		root_page = fpm_pointer_to_page(base, root);
 
 				if (end_of_first == root_page)
 				{
@@ -666,8 +666,8 @@ FreePageBtreeCleanup(FreePageManager *fpm)
 	while (fpm->btree_recycle_count > 0)
 	{
 		FreePageBtree *btp;
-		Size		first_page;
-		Size		contiguous_pages;
+		size_t		first_page;
+		size_t		contiguous_pages;
 
 		btp = FreePageBtreeGetRecycled(fpm);
 		first_page = fpm_pointer_to_page(base, btp);
@@ -696,7 +696,7 @@ FreePageBtreeConsolidate(FreePageManager *fpm, FreePageBtree *btp)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageBtree *np;
-	Size		max;
+	size_t		max;
 
 	/*
 	 * We only try to consolidate pages that are less than a third full. We
@@ -779,8 +779,8 @@ FreePageBtreeFindLeftSibling(char *base, FreePageBtree *btp)
 	/* Move up until we can move left. */
 	for (;;)
 	{
-		Size		first_page;
-		Size		index;
+		size_t		first_page;
+		size_t		index;
 
 		first_page = FreePageBtreeFirstKey(p);
 		p = relptr_access(base, p->hdr.parent);
@@ -824,8 +824,8 @@ FreePageBtreeFindRightSibling(char *base, FreePageBtree *btp)
 	/* Move up until we can move right. */
 	for (;;)
 	{
-		Size		first_page;
-		Size		index;
+		size_t		first_page;
+		size_t		index;
 
 		first_page = FreePageBtreeFirstKey(p);
 		p = relptr_access(base, p->hdr.parent);
@@ -859,7 +859,7 @@ FreePageBtreeFindRightSibling(char *base, FreePageBtree *btp)
 /*
  * Get the first key on a btree page.
  */
-static Size
+static size_t
 FreePageBtreeFirstKey(FreePageBtree *btp)
 {
 	Assert(btp->hdr.nused > 0);
@@ -897,8 +897,8 @@ FreePageBtreeGetRecycled(FreePageManager *fpm)
  * Insert an item into an internal page.
  */
 static void
-FreePageBtreeInsertInternal(char *base, FreePageBtree *btp, Size index,
-							Size first_page, FreePageBtree *child)
+FreePageBtreeInsertInternal(char *base, FreePageBtree *btp, size_t index,
+							size_t first_page, FreePageBtree *child)
 {
 	Assert(btp->hdr.magic == FREE_PAGE_INTERNAL_MAGIC);
 	Assert(btp->hdr.nused <= FPM_ITEMS_PER_INTERNAL_PAGE);
@@ -914,8 +914,8 @@ FreePageBtreeInsertInternal(char *base, FreePageBtree *btp, Size index,
  * Insert an item into a leaf page.
  */
 static void
-FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index, Size first_page,
-						Size npages)
+FreePageBtreeInsertLeaf(FreePageBtree *btp, size_t index, size_t first_page,
+						size_t npages)
 {
 	Assert(btp->hdr.magic == FREE_PAGE_LEAF_MAGIC);
 	Assert(btp->hdr.nused <= FPM_ITEMS_PER_LEAF_PAGE);
@@ -931,7 +931,7 @@ FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index, Size first_page,
  * Put a page on the btree recycle list.
  */
 static void
-FreePageBtreeRecycle(FreePageManager *fpm, Size pageno)
+FreePageBtreeRecycle(FreePageManager *fpm, size_t pageno)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageSpanLeader *head = relptr_access(base, fpm->btree_recycle);
@@ -952,7 +952,7 @@ FreePageBtreeRecycle(FreePageManager *fpm, Size pageno)
  * Remove an item from the btree at the given position on the given page.
  */
 static void
-FreePageBtreeRemove(FreePageManager *fpm, FreePageBtree *btp, Size index)
+FreePageBtreeRemove(FreePageManager *fpm, FreePageBtree *btp, size_t index)
 {
 	Assert(btp->hdr.magic == FREE_PAGE_LEAF_MAGIC);
 	Assert(index < btp->hdr.nused);
@@ -988,8 +988,8 @@ FreePageBtreeRemovePage(FreePageManager *fpm, FreePageBtree *btp)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageBtree *parent;
-	Size		index;
-	Size		first_page;
+	size_t		index;
+	size_t		first_page;
 
 	for (;;)
 	{
@@ -1061,12 +1061,12 @@ FreePageBtreeRemovePage(FreePageManager *fpm, FreePageBtree *btp)
  * undefined on return.
  */
 static void
-FreePageBtreeSearch(FreePageManager *fpm, Size first_page,
+FreePageBtreeSearch(FreePageManager *fpm, size_t first_page,
 					FreePageBtreeSearchResult *result)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageBtree *btp = relptr_access(base, fpm->btree_root);
-	Size		index;
+	size_t		index;
 
 	result->split_pages = 1;
 
@@ -1136,19 +1136,19 @@ FreePageBtreeSearch(FreePageManager *fpm, Size first_page,
  * page number.  Returns the index of that key, or one greater than the number
  * of keys on the page if none.
  */
-static Size
-FreePageBtreeSearchInternal(FreePageBtree *btp, Size first_page)
+static size_t
+FreePageBtreeSearchInternal(FreePageBtree *btp, size_t first_page)
 {
-	Size		low = 0;
-	Size		high = btp->hdr.nused;
+	size_t		low = 0;
+	size_t		high = btp->hdr.nused;
 
 	Assert(btp->hdr.magic == FREE_PAGE_INTERNAL_MAGIC);
 	Assert(high > 0 && high <= FPM_ITEMS_PER_INTERNAL_PAGE);
 
 	while (low < high)
 	{
-		Size		mid = (low + high) / 2;
-		Size		val = btp->u.internal_key[mid].first_page;
+		size_t		mid = (low + high) / 2;
+		size_t		val = btp->u.internal_key[mid].first_page;
 
 		if (first_page == val)
 			return mid;
@@ -1166,19 +1166,19 @@ FreePageBtreeSearchInternal(FreePageBtree *btp, Size first_page)
  * page number.  Returns the index of that key, or one greater than the number
  * of keys on the page if none.
  */
-static Size
-FreePageBtreeSearchLeaf(FreePageBtree *btp, Size first_page)
+static size_t
+FreePageBtreeSearchLeaf(FreePageBtree *btp, size_t first_page)
 {
-	Size		low = 0;
-	Size		high = btp->hdr.nused;
+	size_t		low = 0;
+	size_t		high = btp->hdr.nused;
 
 	Assert(btp->hdr.magic == FREE_PAGE_LEAF_MAGIC);
 	Assert(high > 0 && high <= FPM_ITEMS_PER_LEAF_PAGE);
 
 	while (low < high)
 	{
-		Size		mid = (low + high) / 2;
-		Size		val = btp->u.leaf_key[mid].first_page;
+		size_t		mid = (low + high) / 2;
+		size_t		val = btp->u.leaf_key[mid].first_page;
 
 		if (first_page == val)
 			return mid;
@@ -1231,7 +1231,7 @@ FreePageBtreeSplitPage(FreePageManager *fpm, FreePageBtree *btp)
 static void
 FreePageBtreeUpdateParentPointers(char *base, FreePageBtree *btp)
 {
-	Size		i;
+	size_t		i;
 
 	Assert(btp->hdr.magic == FREE_PAGE_INTERNAL_MAGIC);
 	for (i = 0; i < btp->hdr.nused; ++i)
@@ -1251,8 +1251,8 @@ FreePageManagerDumpBtree(FreePageManager *fpm, FreePageBtree *btp,
 						 FreePageBtree *parent, int level, StringInfo buf)
 {
 	char	   *base = fpm_segment_base(fpm);
-	Size		pageno = fpm_pointer_to_page(base, btp);
-	Size		index;
+	size_t		pageno = fpm_pointer_to_page(base, btp);
+	size_t		index;
 	FreePageBtree *check_parent;
 
 	check_stack_depth();
@@ -1294,7 +1294,7 @@ FreePageManagerDumpBtree(FreePageManager *fpm, FreePageBtree *btp,
  */
 static void
 FreePageManagerDumpSpans(FreePageManager *fpm, FreePageSpanLeader *span,
-						 Size expected_pages, StringInfo buf)
+						 size_t expected_pages, StringInfo buf)
 {
 	char	   *base = fpm_segment_base(fpm);
 
@@ -1316,15 +1316,15 @@ FreePageManagerDumpSpans(FreePageManager *fpm, FreePageSpanLeader *span,
  * page manager.
  */
 static bool
-FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
+FreePageManagerGetInternal(FreePageManager *fpm, size_t npages, size_t *first_page)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageSpanLeader *victim = NULL;
 	FreePageSpanLeader *prev;
 	FreePageSpanLeader *next;
 	FreePageBtreeSearchResult result;
-	Size		victim_page = 0;	/* placate compiler */
-	Size		f;
+	size_t		victim_page = 0;	/* placate compiler */
+	size_t		f;
 
 	/*
 	 * Search for a free span.
@@ -1474,8 +1474,8 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
  * true if the btree allocated pages for internal purposes, which might
  * invalidate the current largest run requiring it to be recomputed.
  */
-static Size
-FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
+static size_t
+FreePageManagerPutInternal(FreePageManager *fpm, size_t first_page, size_t npages,
 						   bool soft)
 {
 	char	   *base = fpm_segment_base(fpm);
@@ -1483,7 +1483,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 	FreePageBtreeLeafKey *prevkey = NULL;
 	FreePageBtreeLeafKey *nextkey = NULL;
 	FreePageBtree *np;
-	Size		nindex;
+	size_t		nindex;
 
 	Assert(npages > 0);
 
@@ -1521,7 +1521,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 		else
 		{
 			/* Not contiguous; we need to initialize the btree. */
-			Size		root_page;
+			size_t		root_page;
 			FreePageBtree *root;
 
 			if (!relptr_is_null(fpm->btree_recycle))
@@ -1586,7 +1586,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 	if (prevkey != NULL && prevkey->first_page + prevkey->npages >= first_page)
 	{
 		bool		remove_next = false;
-		Size		result;
+		size_t		result;
 
 		Assert(prevkey->first_page + prevkey->npages == first_page);
 		prevkey->npages = (first_page - prevkey->first_page) + npages;
@@ -1627,7 +1627,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 	/* Consolidate with the next entry if possible. */
 	if (nextkey != NULL && first_page + npages >= nextkey->first_page)
 	{
-		Size		newpages;
+		size_t		newpages;
 
 		/* Compute new size for span. */
 		Assert(first_page + npages == nextkey->first_page);
@@ -1669,9 +1669,9 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 		/* Check whether we need to allocate more btree pages to split. */
 		if (result.split_pages > fpm->btree_recycle_count)
 		{
-			Size		pages_needed;
-			Size		recycle_page;
-			Size		i;
+			size_t		pages_needed;
+			size_t		recycle_page;
+			size_t		i;
 
 			/*
 			 * Allocate the required number of pages and split each one in
@@ -1715,7 +1715,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 		{
 			FreePageBtree *split_target = result.page;
 			FreePageBtree *child = NULL;
-			Size		key = first_page;
+			size_t		key = first_page;
 
 			for (;;)
 			{
@@ -1739,7 +1739,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 				 */
 				if (child == NULL)
 				{
-					Size		index;
+					size_t		index;
 					FreePageBtree *insert_into;
 
 					insert_into = key < newsibling->u.leaf_key[0].first_page ?
@@ -1751,7 +1751,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 				}
 				else
 				{
-					Size		index;
+					size_t		index;
 					FreePageBtree *insert_into;
 
 					insert_into =
@@ -1795,7 +1795,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 				key = newsibling->u.internal_key[0].first_page;
 				if (parent->hdr.nused < FPM_ITEMS_PER_INTERNAL_PAGE)
 				{
-					Size		index;
+					size_t		index;
 
 					index = FreePageBtreeSearchInternal(parent, key);
 					FreePageBtreeInsertInternal(base, parent, index,
@@ -1840,7 +1840,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
  * because we're changing the size of the span, or because we're allocating it.
  */
 static void
-FreePagePopSpanLeader(FreePageManager *fpm, Size pageno)
+FreePagePopSpanLeader(FreePageManager *fpm, size_t pageno)
 {
 	char	   *base = fpm_segment_base(fpm);
 	FreePageSpanLeader *span;
@@ -1857,7 +1857,7 @@ FreePagePopSpanLeader(FreePageManager *fpm, Size pageno)
 		relptr_copy(prev->next, span->next);
 	else
 	{
-		Size		f = Min(span->npages, FPM_NUM_FREELISTS) - 1;
+		size_t		f = Min(span->npages, FPM_NUM_FREELISTS) - 1;
 
 		Assert(fpm->freelist[f].relptr_off == pageno * FPM_PAGE_SIZE);
 		relptr_copy(fpm->freelist[f], span->next);
@@ -1868,10 +1868,10 @@ FreePagePopSpanLeader(FreePageManager *fpm, Size pageno)
  * Initialize a new FreePageSpanLeader and put it on the appropriate free list.
  */
 static void
-FreePagePushSpanLeader(FreePageManager *fpm, Size first_page, Size npages)
+FreePagePushSpanLeader(FreePageManager *fpm, size_t first_page, size_t npages)
 {
 	char	   *base = fpm_segment_base(fpm);
-	Size		f = Min(npages, FPM_NUM_FREELISTS) - 1;
+	size_t		f = Min(npages, FPM_NUM_FREELISTS) - 1;
 	FreePageSpanLeader *head = relptr_access(base, fpm->freelist[f]);
 	FreePageSpanLeader *span;
 
diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c
index d6be4fe..6f9faac 100644
--- a/src/backend/utils/mmgr/slab.c
+++ b/src/backend/utils/mmgr/slab.c
@@ -64,9 +64,9 @@ typedef struct SlabContext
 {
 	MemoryContextData header;	/* Standard memory-context fields */
 	/* Allocation parameters for this context: */
-	Size		chunkSize;		/* chunk size */
-	Size		fullChunkSize;	/* chunk size including header and alignment */
-	Size		blockSize;		/* block size */
+	size_t		chunkSize;		/* chunk size */
+	size_t		fullChunkSize;	/* chunk size including header and alignment */
+	size_t		blockSize;		/* block size */
 	int			chunksPerBlock; /* number of chunks per block */
 	int			minFreeChunks;	/* min number of free chunks in any block */
 	int			nblocks;		/* number of blocks allocated */
@@ -117,13 +117,13 @@ typedef struct SlabChunk
 /*
  * These functions implement the MemoryContext API for Slab contexts.
  */
-static void *SlabAlloc(MemoryContext context, Size size);
+static void *SlabAlloc(MemoryContext context, size_t size);
 static void SlabFree(MemoryContext context, void *pointer);
-static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
+static void *SlabRealloc(MemoryContext context, void *pointer, size_t size);
 static void SlabInit(MemoryContext context);
 static void SlabReset(MemoryContext context);
 static void SlabDelete(MemoryContext context);
-static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
+static size_t SlabGetChunkSpace(MemoryContext context, void *pointer);
 static bool SlabIsEmpty(MemoryContext context);
 static void SlabStats(MemoryContext context, int level, bool print,
 		  MemoryContextCounters *totals);
@@ -182,12 +182,12 @@ static MemoryContextMethods SlabMethods = {
 MemoryContext
 SlabContextCreate(MemoryContext parent,
 				  const char *name,
-				  Size blockSize,
-				  Size chunkSize)
+				  size_t blockSize,
+				  size_t chunkSize)
 {
 	int			chunksPerBlock;
-	Size		fullChunkSize;
-	Size		freelistSize;
+	size_t		fullChunkSize;
+	size_t		freelistSize;
 	SlabContext *slab;
 
 	StaticAssertStmt(offsetof(SlabChunk, slab) +sizeof(MemoryContext) ==
@@ -315,7 +315,7 @@ SlabDelete(MemoryContext context)
  *		request could not be completed; memory is added to the slab.
  */
 static void *
-SlabAlloc(MemoryContext context, Size size)
+SlabAlloc(MemoryContext context, size_t size)
 {
 	SlabContext *slab = castNode(SlabContext, context);
 	SlabBlock  *block;
@@ -559,7 +559,7 @@ SlabFree(MemoryContext context, void *pointer)
  * realloc is usually used to enlarge the chunk.
  */
 static void *
-SlabRealloc(MemoryContext context, void *pointer, Size size)
+SlabRealloc(MemoryContext context, void *pointer, size_t size)
 {
 	SlabContext *slab = castNode(SlabContext, context);
 
@@ -578,7 +578,7 @@ SlabRealloc(MemoryContext context, void *pointer, Size size)
  *		Given a currently-allocated chunk, determine the total space
  *		it occupies (including all memory-allocation overhead).
  */
-static Size
+static size_t
 SlabGetChunkSpace(MemoryContext context, void *pointer)
 {
 	SlabContext *slab = castNode(SlabContext, context);
@@ -615,10 +615,10 @@ SlabStats(MemoryContext context, int level, bool print,
 		  MemoryContextCounters *totals)
 {
 	SlabContext *slab = castNode(SlabContext, context);
-	Size		nblocks = 0;
-	Size		freechunks = 0;
-	Size		totalspace = 0;
-	Size		freespace = 0;
+	size_t		nblocks = 0;
+	size_t		freechunks = 0;
+	size_t		totalspace = 0;
+	size_t		freespace = 0;
 	int			i;
 
 	Assert(slab);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 6c6c3ee..a937f01 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -220,7 +220,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE *tb, uint32 newsize)
 
 	/*
 	 * Verify allocation of ->data is possible on platform, without
-	 * overflowing Size.
+	 * overflowing size_t.
 	 */
 	if ((((uint64) sizeof(SH_ELEMENT_TYPE)) * size) >= MaxAllocHugeSize)
 		elog(ERROR, "hash table too large");
@@ -293,14 +293,14 @@ SH_ENTRY_HASH(SH_TYPE *tb, SH_ELEMENT_TYPE * entry)
 }
 
 /* default memory allocator function */
-static inline void *SH_ALLOCATE(SH_TYPE *type, Size size);
+static inline void *SH_ALLOCATE(SH_TYPE *type, size_t size);
 static inline void SH_FREE(SH_TYPE *type, void *pointer);
 
 #ifndef SH_USE_NONDEFAULT_ALLOCATOR
 
 /* default memory allocator function */
 static inline void *
-SH_ALLOCATE(SH_TYPE *type, Size size)
+SH_ALLOCATE(SH_TYPE *type, size_t size)
 {
 	return MemoryContextAllocExtended(type->ctx, size,
 									  MCXT_ALLOC_HUGE | MCXT_ALLOC_ZERO);
diff --git a/src/include/replication/logicallauncher.h b/src/include/replication/logicallauncher.h
index cfe3db1..e2aff3a 100644
--- a/src/include/replication/logicallauncher.h
+++ b/src/include/replication/logicallauncher.h
@@ -17,7 +17,7 @@ extern int max_logical_replication_workers;
 extern void ApplyLauncherRegister(void);
 extern void ApplyLauncherMain(Datum main_arg);
 
-extern Size ApplyLauncherShmemSize(void);
+extern size_t ApplyLauncherShmemSize(void);
 extern void ApplyLauncherShmemInit(void);
 
 extern void ApplyLauncherWakeup(void);
diff --git a/src/include/utils/backend_random.h b/src/include/utils/backend_random.h
index 31602f2..aadeba0 100644
--- a/src/include/utils/backend_random.h
+++ b/src/include/utils/backend_random.h
@@ -12,7 +12,7 @@
 #ifndef BACKEND_RANDOM_H
 #define BACKEND_RANDOM_H
 
-extern Size BackendRandomShmemSize(void);
+extern size_t BackendRandomShmemSize(void);
 extern void BackendRandomShmemInit(void);
 extern bool pg_backend_random(char *dst, int len);
 
diff --git a/src/include/utils/dsa.h b/src/include/utils/dsa.h
index f084443..a40a8ee 100644
--- a/src/include/utils/dsa.h
+++ b/src/include/utils/dsa.h
@@ -22,7 +22,7 @@ struct dsa_area;
 typedef struct dsa_area dsa_area;
 
 /*
- * If this system only uses a 32-bit value for Size, then use the 32-bit
+ * If this system only uses a 32-bit value for size_t, then use the 32-bit
  * implementation of DSA.  This limits the amount of DSA that can be created
  * to something significantly less than the entire 4GB address space because
  * the DSA pointer must encode both a segment identifier and an offset, but
@@ -102,7 +102,7 @@ typedef dsm_handle dsa_handle;
 extern void dsa_startup(void);
 
 extern dsa_area *dsa_create(int tranche_id);
-extern dsa_area *dsa_create_in_place(void *place, Size size,
+extern dsa_area *dsa_create_in_place(void *place, size_t size,
 					int tranche_id, dsm_segment *segment);
 extern dsa_area *dsa_attach(dsa_handle handle);
 extern dsa_area *dsa_attach_in_place(void *place, dsm_segment *segment);
@@ -113,10 +113,10 @@ extern void dsa_pin_mapping(dsa_area *area);
 extern void dsa_detach(dsa_area *area);
 extern void dsa_pin(dsa_area *area);
 extern void dsa_unpin(dsa_area *area);
-extern void dsa_set_size_limit(dsa_area *area, Size limit);
-extern Size dsa_minimum_size(void);
+extern void dsa_set_size_limit(dsa_area *area, size_t limit);
+extern size_t dsa_minimum_size(void);
 extern dsa_handle dsa_get_handle(dsa_area *area);
-extern dsa_pointer dsa_allocate_extended(dsa_area *area, Size size, int flags);
+extern dsa_pointer dsa_allocate_extended(dsa_area *area, size_t size, int flags);
 extern void dsa_free(dsa_area *area, dsa_pointer dp);
 extern void *dsa_get_address(dsa_area *area, dsa_pointer dp);
 extern void dsa_trim(dsa_area *area);
diff --git a/src/include/utils/freepage.h b/src/include/utils/freepage.h
index 78caa53..7ce8a0f 100644
--- a/src/include/utils/freepage.h
+++ b/src/include/utils/freepage.h
@@ -52,23 +52,23 @@ struct FreePageManager
 	RelptrFreePageSpanLeader btree_recycle;
 	unsigned	btree_depth;
 	unsigned	btree_recycle_count;
-	Size		singleton_first_page;
-	Size		singleton_npages;
-	Size		contiguous_pages;
+	size_t		singleton_first_page;
+	size_t		singleton_npages;
+	size_t		contiguous_pages;
 	bool		contiguous_pages_dirty;
 	RelptrFreePageSpanLeader freelist[FPM_NUM_FREELISTS];
 #ifdef FPM_EXTRA_ASSERTS
 	/* For debugging only, pages put minus pages gotten. */
-	Size		free_pages;
+	size_t		free_pages;
 #endif
 };
 
-/* Macros to convert between page numbers (expressed as Size) and pointers. */
+/* Macros to convert between page numbers (expressed as size_t) and pointers. */
 #define fpm_page_to_pointer(base, page) \
-	(AssertVariableIsOfTypeMacro(page, Size), \
+	(AssertVariableIsOfTypeMacro(page, size_t), \
 	 (base) + FPM_PAGE_SIZE * (page))
 #define fpm_pointer_to_page(base, ptr)		\
-	(((Size) (((char *) (ptr)) - (base))) / FPM_PAGE_SIZE)
+	(((size_t) (((char *) (ptr)) - (base))) / FPM_PAGE_SIZE)
 
 /* Macro to convert an allocation size to a number of pages. */
 #define fpm_size_to_pages(sz) \
@@ -76,7 +76,7 @@ struct FreePageManager
 
 /* Macros to check alignment of absolute and relative pointers. */
 #define fpm_pointer_is_page_aligned(base, ptr)		\
-	(((Size) (((char *) (ptr)) - (base))) % FPM_PAGE_SIZE == 0)
+	(((size_t) (((char *) (ptr)) - (base))) % FPM_PAGE_SIZE == 0)
 #define fpm_relptr_is_page_aligned(base, relptr)		\
 	((relptr).relptr_off % FPM_PAGE_SIZE == 0)
 
@@ -90,10 +90,10 @@ struct FreePageManager
 
 /* Functions to manipulate the free page map. */
 extern void FreePageManagerInitialize(FreePageManager *fpm, char *base);
-extern bool FreePageManagerGet(FreePageManager *fpm, Size npages,
-				   Size *first_page);
-extern void FreePageManagerPut(FreePageManager *fpm, Size first_page,
-				   Size npages);
+extern bool FreePageManagerGet(FreePageManager *fpm, size_t npages,
+				   size_t *first_page);
+extern void FreePageManagerPut(FreePageManager *fpm, size_t first_page,
+				   size_t npages);
 extern char *FreePageManagerDump(FreePageManager *fpm);
 
 #endif   /* FREEPAGE_H */
diff --git a/src/include/utils/relptr.h b/src/include/utils/relptr.h
index 1e5e622..322607a 100644
--- a/src/include/utils/relptr.h
+++ b/src/include/utils/relptr.h
@@ -22,11 +22,11 @@
  * The idea here is that you declare a relative pointer as relptr(type)
  * and then use relptr_access to dereference it and relptr_store to change
  * it.  The use of a union here is a hack, because what's stored in the
- * relptr is always a Size, never an actual pointer.  But including a pointer
+ * relptr is always a size_t, never an actual pointer.  But including a pointer
  * in the union allows us to use stupid macro tricks to provide some measure
  * of type-safety.
  */
-#define relptr(type)	 union { type *relptr_type; Size relptr_off; }
+#define relptr(type)	 union { type *relptr_type; size_t relptr_off; }
 
 /*
  * pgindent gets confused by declarations that use "relptr(type)" directly,
