diff --git a/contrib/pg_visibility/expected/pg_visibility.out b/contrib/pg_visibility/expected/pg_visibility.out
index f0dcb897c4..6ac3e525eb 100644
--- a/contrib/pg_visibility/expected/pg_visibility.out
+++ b/contrib/pg_visibility/expected/pg_visibility.out
@@ -131,6 +131,69 @@ select pg_truncate_visibility_map('test_partition');
  
 (1 row)
 
+-- test copy freeze
+create table copyfreeze (a int, b char(1500));
+-- load all rows via COPY FREEZE and ensure that all pages are set all-visible
+-- and all-frozen.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin freeze;
+commit;
+select * from pg_visibility_map('copyfreeze');
+ blkno | all_visible | all_frozen 
+-------+-------------+------------
+     0 | t           | t
+     1 | t           | t
+     2 | t           | t
+(3 rows)
+
+select * from pg_check_frozen('copyfreeze');
+ t_ctid 
+--------
+(0 rows)
+
+-- load half the rows via regular COPY and rest via COPY FREEZE. The pages
+-- which are touched by regular COPY must not be set all-visible/all-frozen. On
+-- the other hand, pages allocated by COPY FREEZE should be marked
+-- all-frozen/all-visible.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin;
+copy copyfreeze from stdin freeze;
+commit;
+select * from pg_visibility_map('copyfreeze');
+ blkno | all_visible | all_frozen 
+-------+-------------+------------
+     0 | f           | f
+     1 | f           | f
+     2 | t           | t
+(3 rows)
+
+select * from pg_check_frozen('copyfreeze');
+ t_ctid 
+--------
+(0 rows)
+
+-- Try a mix of regular COPY and COPY FREEZE.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin freeze;
+copy copyfreeze from stdin;
+copy copyfreeze from stdin freeze;
+commit;
+select * from pg_visibility_map('copyfreeze');
+ blkno | all_visible | all_frozen 
+-------+-------------+------------
+     0 | t           | t
+     1 | f           | f
+     2 | t           | t
+(3 rows)
+
+select * from pg_check_frozen('copyfreeze');
+ t_ctid 
+--------
+(0 rows)
+
 -- cleanup
 drop table test_partitioned;
 drop view test_view;
@@ -140,3 +203,4 @@ drop server dummy_server;
 drop foreign data wrapper dummy;
 drop materialized view matview_visibility_test;
 drop table regular_table;
+drop table copyfreeze;
diff --git a/contrib/pg_visibility/sql/pg_visibility.sql b/contrib/pg_visibility/sql/pg_visibility.sql
index c2a7f1d9e4..01a65fdab4 100644
--- a/contrib/pg_visibility/sql/pg_visibility.sql
+++ b/contrib/pg_visibility/sql/pg_visibility.sql
@@ -72,6 +72,82 @@ select count(*) > 0 from pg_visibility_map_summary('test_partition');
 select * from pg_check_frozen('test_partition'); -- hopefully none
 select pg_truncate_visibility_map('test_partition');
 
+-- test copy freeze
+create table copyfreeze (a int, b char(1500));
+
+-- load all rows via COPY FREEZE and ensure that all pages are set all-visible
+-- and all-frozen.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin freeze;
+1	'1'
+2	'2'
+3	'3'
+4	'4'
+5	'5'
+6	'6'
+7	'7'
+8	'8'
+9	'9'
+10	'10'
+11	'11'
+12	'12'
+\.
+commit;
+select * from pg_visibility_map('copyfreeze');
+select * from pg_check_frozen('copyfreeze');
+
+-- load half the rows via regular COPY and rest via COPY FREEZE. The pages
+-- which are touched by regular COPY must not be set all-visible/all-frozen. On
+-- the other hand, pages allocated by COPY FREEZE should be marked
+-- all-frozen/all-visible.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin;
+1	'1'
+2	'2'
+3	'3'
+4	'4'
+5	'5'
+6	'6'
+\.
+copy copyfreeze from stdin freeze;
+7	'7'
+8	'8'
+9	'9'
+10	'10'
+11	'11'
+12	'12'
+\.
+commit;
+select * from pg_visibility_map('copyfreeze');
+select * from pg_check_frozen('copyfreeze');
+
+-- Try a mix of regular COPY and COPY FREEZE.
+begin;
+truncate copyfreeze;
+copy copyfreeze from stdin freeze;
+1	'1'
+2	'2'
+3	'3'
+4	'4'
+5	'5'
+\.
+copy copyfreeze from stdin;
+6	'6'
+\.
+copy copyfreeze from stdin freeze;
+7	'7'
+8	'8'
+9	'9'
+10	'10'
+11	'11'
+12	'12'
+\.
+commit;
+select * from pg_visibility_map('copyfreeze');
+select * from pg_check_frozen('copyfreeze');
+
 -- cleanup
 drop table test_partitioned;
 drop view test_view;
@@ -81,3 +157,4 @@ drop server dummy_server;
 drop foreign data wrapper dummy;
 drop materialized view matview_visibility_test;
 drop table regular_table;
+drop table copyfreeze;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 137cc9257d..2ba9778995 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1834,6 +1834,16 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate)
 	bistate->current_buf = InvalidBuffer;
 }
 
+/*
+ * CheckAndSetAllVisibleBulkInsertState - check if the buffer currently held in
+ * bistate is all-visible, all-frozen and take necessary action.
+ */
+void
+CheckAndSetAllVisibleBulkInsertState(Relation relation, BulkInsertState bistate)
+{
+	if (bistate->current_buf != InvalidBuffer)
+		CheckAndSetPageAllVisible(relation, bistate->current_buf, NULL);
+}
 
 /*
  *	heap_insert		- insert tuple into a heap
@@ -8763,3 +8773,159 @@ heap_mask(char *pagedata, BlockNumber blkno)
 		}
 	}
 }
+
+/*
+ * Check if all tuples in this page are frozen and visible, without doing any
+ * extensive checks.
+ *
+ * The only use of this function is when we are doing heap insertions in
+ * HEAP_INSERT_FROZEN mode (e.g. COPY FREEZE). Since the relation is not
+ * visible to any other transactions when running in this mode, we usually
+ * expect the pages to contain only tuples with frozen-xmin and invalid-xmax.
+ * But in order to guard against the case when our own transaction may has
+ * inserted regular tuples, which are not marked frozen, in the table and for
+ * abundance of caution, we still do this check.
+ *
+ * While we could do more elaborate tests like heap_page_is_all_visible does,
+ * we deliberately try to keep this simple.
+ */
+static bool
+CheckPageIsAllFrozen(Relation relation, Buffer buf)
+{
+	Page		page = BufferGetPage(buf);
+	BlockNumber blockno = BufferGetBlockNumber(buf);
+	OffsetNumber offnum,
+				maxoff;
+
+	/*
+	 * This is a stripped down version of the line pointer scan in
+	 * lazy_scan_heap(). So if you change anything here, also check that code.
+	 */
+	maxoff = PageGetMaxOffsetNumber(page);
+	for (offnum = FirstOffsetNumber;
+		 offnum <= maxoff;
+		 offnum = OffsetNumberNext(offnum))
+	{
+		ItemId		itemid;
+		HeapTupleData tuple;
+
+		itemid = PageGetItemId(page, offnum);
+
+		/* Unused or redirect line pointers are of no interest */
+		if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
+			continue;
+
+		ItemPointerSet(&(tuple.t_self), blockno, offnum);
+
+		/*
+		 * Dead line pointers can have index pointers pointing to them. So
+		 * they can't be treated as visible
+		 */
+		if (ItemIdIsDead(itemid))
+			return false;
+
+		Assert(ItemIdIsNormal(itemid));
+
+		tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+		tuple.t_len = ItemIdGetLength(itemid);
+		tuple.t_tableOid = RelationGetRelid(relation);
+
+		/*
+		 * If xmin is not frozen, then something else other than the one
+		 * inserting tuples in HEAP_INSERT_FROZEN mode, have inserted tuples in
+		 * this page. Don't mark such a page all-visible and frozen.
+		 */
+		if (!HeapTupleHeaderXminFrozen(tuple.t_data))
+			return false;
+
+		/*
+		 * Similarly, if xmax is set, be paranoid and don't mark the page as
+		 * all-visible and frozen.
+		 */
+		if (HeapTupleHeaderGetRawXmax(tuple.t_data) != InvalidTransactionId)
+			return false;
+	}							/* scan along page */
+
+	return true;
+}
+
+/*
+ * If we are inserting frozen tuples, then check if the given page
+ * has all frozen tuples and mark the page as all-visible and frozen.
+ *
+ * Caller may pass a valid vmbuffer, in which case a valid vmbuffer will be
+ * returned. If valid vmbuffer is not passed, then we don't return one either.
+ */
+void
+CheckAndSetPageAllVisible(Relation relation, Buffer buffer, Buffer *vmbuffer)
+{
+	BlockNumber targetBlock;
+	Page		page;
+	Buffer		myvmbuffer = InvalidBuffer;
+
+	/* Nothing to do if we're passed an Invalid buffer */
+	if (!BufferIsValid(buffer))
+		return;
+
+	targetBlock = BufferGetBlockNumber(buffer);
+	page = BufferGetPage(buffer);
+
+	/*
+	 * Use the passed-in vmbuffer, if available. Otherwise obtain pin the
+	 * required visibility map page before locking the heap page.
+	 */
+	if (vmbuffer && BufferIsValid(*vmbuffer))
+		myvmbuffer = *vmbuffer;
+	else
+		visibilitymap_pin(relation, targetBlock, &myvmbuffer);
+
+	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+	/*
+	 * While we are holding the lock on the page, check if all tuples
+	 * in the page are marked frozen at insertion. We can safely mark
+	 * such page all-visible and set visibility map bits too.
+	 */
+	if (CheckPageIsAllFrozen(relation, buffer))
+	{
+		PageSetAllVisible(page);
+		MarkBufferDirty(buffer);
+	}
+
+	/*
+	 * All the changes to the heap page have been done. If the all-visible
+	 * flag is now set, also set the VM all-visible bit (and, if possible, the
+	 * all-frozen bit) unless this has already been done previously.
+	 *
+	 * Note: This portion of the code resembles to what we do in
+	 * vacuumlazy.c
+	 */
+	if (PageIsAllVisible(page))
+	{
+		uint8		vm_status = visibilitymap_get_status(relation,
+				targetBlock, &myvmbuffer);
+		uint8		flags = 0;
+
+		/* Set the VM all-frozen bit to flag, if needed */
+		if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
+			flags |= VISIBILITYMAP_ALL_VISIBLE;
+		if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0)
+			flags |= VISIBILITYMAP_ALL_FROZEN;
+
+		Assert(BufferIsValid(myvmbuffer));
+		if (flags != 0)
+			visibilitymap_set(relation, targetBlock, buffer, InvalidXLogRecPtr,
+					myvmbuffer, InvalidTransactionId, flags);
+	}
+
+	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+	/*
+	 * If the caller passed a valid vmbuffer, return (potentially different)
+	 * vmbuffer. Otherwise just release the vmbuffer that we pinned.
+	 */
+	if (vmbuffer && BufferIsValid(*vmbuffer))
+		*vmbuffer = myvmbuffer;
+	else if (BufferIsValid(myvmbuffer))
+		ReleaseBuffer(myvmbuffer);
+}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 69a7a23874..28619b7616 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -415,7 +415,15 @@ loop:
 		{
 			/* easy case */
 			buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate);
-			if (PageIsAllVisible(BufferGetPage(buffer)))
+
+			/*
+			 * Obtain the pin on visibility map buffer if the page is known to
+			 * be all-visible or we are running in HEAP_INSERT_FROZEN mode. In
+			 * latter case, we may decide to mark the page all-visible before
+			 * switching to a new page and hence obtain the pin in advance.
+			 */
+			if (PageIsAllVisible(BufferGetPage(buffer)) ||
+				options & HEAP_INSERT_FROZEN)
 				visibilitymap_pin(relation, targetBlock, vmbuffer);
 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 		}
@@ -516,13 +524,18 @@ loop:
 		 * code above.
 		 */
 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		if (otherBuffer == InvalidBuffer)
-			ReleaseBuffer(buffer);
-		else if (otherBlock != targetBlock)
-		{
+		if (otherBuffer != InvalidBuffer && otherBlock != targetBlock)
 			LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
+
+		/*
+		 * If we're inserting frozen tuples, then check if the current page is
+		 * completely frozen and set the visibility bit.
+		 */
+		if (options & HEAP_INSERT_FROZEN)
+			CheckAndSetPageAllVisible(relation, buffer, vmbuffer);
+
+		if ((otherBuffer == InvalidBuffer) || (otherBlock != targetBlock))
 			ReleaseBuffer(buffer);
-		}
 
 		/* Without FSM, always fall out of the loop and extend */
 		if (!use_fsm)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 705df8900b..6dfbf993d5 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2832,6 +2832,15 @@ CopyFrom(CopyState cstate)
 					!has_instead_insert_row_trig &&
 					resultRelInfo->ri_FdwRoutine == NULL;
 
+				/*
+				 * Note: As of PG12, COPY FREEZE is not supported on
+				 * partitioned table. Nevertheless have this check in place so
+				 * that we do the right thing if it ever gets supported.
+				 */
+				if (hi_options & HEAP_INSERT_FROZEN)
+					CheckAndSetAllVisibleBulkInsertState(resultRelInfo->ri_RelationDesc,
+							bistate);
+
 				/*
 				 * We'd better make the bulk insert mechanism gets a new
 				 * buffer when the partition being inserted into changes.
@@ -3046,6 +3055,15 @@ CopyFrom(CopyState cstate)
 								firstBufferedLineNo);
 	}
 
+	/*
+	 * If we are inserting frozen tuples, check if the last page used can also
+	 * be marked as all-visible and all-frozen. This ensures that a table can
+	 * be fully frozen when the data is loaded.
+	 */
+	if (hi_options & HEAP_INSERT_FROZEN)
+		CheckAndSetAllVisibleBulkInsertState(resultRelInfo->ri_RelationDesc,
+				bistate);
+
 	/* Done, clean up */
 	error_context_stack = errcallback.previous;
 
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 3773a4df85..bf32ce295e 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -140,6 +140,10 @@ extern void setLastTid(const ItemPointer tid);
 extern BulkInsertState GetBulkInsertState(void);
 extern void FreeBulkInsertState(BulkInsertState);
 extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
+extern void CheckAndSetAllVisibleBulkInsertState(Relation relation,
+					BulkInsertState bistate);
+extern void CheckAndSetPageAllVisible(Relation relation,
+					Buffer buffer, Buffer *vmbuffer);
 
 extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 			int options, BulkInsertState bistate);
