Yet another version of my GTT patch addressing issues reported by 曾文旌(义从) <wenjing....@alibaba-inc.com>
* Bug in TRUNCATE is fixed,
* ON COMMIT DELETE ROWS option is supported
* ALTER TABLE is correctly handled

--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index ae7b729..485c068 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -672,7 +672,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	/*
 	 * We expect to be called exactly once for any index relation.
 	 */
-	if (RelationGetNumberOfBlocks(index) != 0)
+	if (RelationGetNumberOfBlocks(index) != 0 && index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
 		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
@@ -681,9 +681,17 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	 * whole relation will be rolled back.
 	 */
 
-	meta = ReadBuffer(index, P_NEW);
-	Assert(BufferGetBlockNumber(meta) == BRIN_METAPAGE_BLKNO);
-	LockBuffer(meta, BUFFER_LOCK_EXCLUSIVE);
+	if (index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
+	{
+		meta = ReadBuffer(index, P_NEW);
+		Assert(BufferGetBlockNumber(meta) == BRIN_METAPAGE_BLKNO);
+		LockBuffer(meta, BUFFER_LOCK_EXCLUSIVE);
+	}
+	else
+	{
+		meta = ReadBuffer(index, BRIN_METAPAGE_BLKNO);
+		LockBuffer(meta, BUFFER_LOCK_SHARE);
+	}
 
 	brin_metapage_init(BufferGetPage(meta), BrinGetPagesPerRange(index),
 					   BRIN_CURRENT_VERSION);
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 647350c..62e5212 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -25,6 +25,7 @@
 #include "access/brin_revmap.h"
 #include "access/brin_tuple.h"
 #include "access/brin_xlog.h"
+#include "access/brin.h"
 #include "access/rmgr.h"
 #include "access/xloginsert.h"
 #include "miscadmin.h"
@@ -79,6 +80,13 @@ brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange,
 	meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
 	LockBuffer(meta, BUFFER_LOCK_SHARE);
 	page = BufferGetPage(meta);
+
+	if (GlobalTempRelationPageIsNotInitialized(idxrel, page))
+	{
+		Relation heap = RelationIdGetRelation(idxrel->rd_index->indrelid);
+		brinbuild(heap, idxrel,  BuildIndexInfo(idxrel));
+		RelationClose(heap);
+	}
 	TestForOldSnapshot(snapshot, idxrel, page);
 	metadata = (BrinMetaPageData *) PageGetContents(page);
 
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index b5072c0..650f31a 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -158,6 +158,19 @@ static relopt_bool boolRelOpts[] =
 		},
 		true
 	},
+	/*
+	 * For global temp table only
+	 * use AccessExclusiveLock for ensure safety
+	 */
+	{
+		{
+			"on_commit_delete_rows",
+			"global temp table on commit options",
+			RELOPT_KIND_HEAP | RELOPT_KIND_PARTITIONED,
+			ShareUpdateExclusiveLock
+		},
+		false
+	},
 	/* list terminator */
 	{{NULL}}
 };
@@ -1478,6 +1491,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
 	StdRdOptions *rdopts;
 	int			numoptions;
 	static const relopt_parse_elt tab[] = {
+		{"on_commit_delete_rows", RELOPT_TYPE_BOOL,
+		offsetof(StdRdOptions, on_commit_delete_rows)},
 		{"fillfactor", RELOPT_TYPE_INT, offsetof(StdRdOptions, fillfactor)},
 		{"autovacuum_enabled", RELOPT_TYPE_BOOL,
 		offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, enabled)},
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 439a91b..51fe102 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -241,6 +241,13 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
 	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
 	metapage = BufferGetPage(metabuffer);
 
+	if (GlobalTempRelationPageIsNotInitialized(index, metapage))
+	{
+		Relation heap = RelationIdGetRelation(index->rd_index->indrelid);
+		ginbuild(heap, index, BuildIndexInfo(index));
+		RelationClose(heap);
+	}
+
 	/*
 	 * An insertion to the pending list could logically belong anywhere in the
 	 * tree, so it conflicts with all serializable scans.  All scans acquire a
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index b18ae2b..975c186 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1759,7 +1759,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 				match;
 	int			i;
 	pendingPosition pos;
-	Buffer		metabuffer = ReadBuffer(scan->indexRelation, GIN_METAPAGE_BLKNO);
+	Relation    index = scan->indexRelation;
+	Buffer		metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
 	Page		page;
 	BlockNumber blkno;
 
@@ -1769,11 +1770,19 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	 * Acquire predicate lock on the metapage, to conflict with any fastupdate
 	 * insertions.
 	 */
-	PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
+	PredicateLockPage(index, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
 
 	LockBuffer(metabuffer, GIN_SHARE);
 	page = BufferGetPage(metabuffer);
-	TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
+	TestForOldSnapshot(scan->xs_snapshot, index, page);
+
+	if (GlobalTempRelationPageIsNotInitialized(index, page))
+	{
+		Relation heap = RelationIdGetRelation(index->rd_index->indrelid);
+		ginbuild(heap, index, BuildIndexInfo(index));
+		RelationClose(heap);
+		UnlockReleaseBuffer(metabuffer);
+	}
 	blkno = GinPageGetMeta(page)->head;
 
 	/*
@@ -1784,10 +1793,10 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
 	{
 		/* No pending list, so proceed with normal scan */
 		UnlockReleaseBuffer(metabuffer);
-		return;
+		return true;
 	}
 
-	pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
+	pos.pendingBuffer = ReadBuffer(index, blkno);
 	LockBuffer(pos.pendingBuffer, GIN_SHARE);
 	pos.firstOffset = FirstOffsetNumber;
 	UnlockReleaseBuffer(metabuffer);
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 55eab14..d6739f3 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -328,7 +328,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	MemoryContext oldCtx;
 	OffsetNumber attnum;
 
-	if (RelationGetNumberOfBlocks(index) != 0)
+	if (RelationGetNumberOfBlocks(index) != 0 && index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
 		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
@@ -337,7 +337,15 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	memset(&buildstate.buildStats, 0, sizeof(GinStatsData));
 
 	/* initialize the meta page */
-	MetaBuffer = GinNewBuffer(index);
+	if (index->rd_rel->relpersistence == RELPERSISTENCE_SESSION)
+	{
+		MetaBuffer = ReadBuffer(index, 0);
+		LockBuffer(MetaBuffer, GIN_SHARE);
+	}
+	else
+	{
+		MetaBuffer = GinNewBuffer(index);
+	}
 
 	/* initialize the root page */
 	RootBuffer = GinNewBuffer(index);
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0cc8791..bcde5ea 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -16,6 +16,7 @@
 
 #include "access/gist_private.h"
 #include "access/gistscan.h"
+#include "catalog/index.h"
 #include "catalog/pg_collation.h"
 #include "miscadmin.h"
 #include "storage/lmgr.h"
@@ -677,7 +678,10 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
 		if (!xlocked)
 		{
 			LockBuffer(stack->buffer, GIST_SHARE);
-			gistcheckpage(state.r, stack->buffer);
+			if (stack->blkno == GIST_ROOT_BLKNO && GlobalTempRelationPageIsNotInitialized(state.r, BufferGetPage(stack->buffer)))
+				gistbuild(heapRel, r, BuildIndexInfo(r));
+			else
+				gistcheckpage(state.r, stack->buffer);
 		}
 
 		stack->page = (Page) BufferGetPage(stack->buffer);
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 2f4543d..8d194c8 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -156,7 +156,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	 * We expect to be called exactly once for any index relation. If that's
 	 * not the case, big trouble's what we have.
 	 */
-	if (RelationGetNumberOfBlocks(index) != 0)
+	if (RelationGetNumberOfBlocks(index) != 0 && index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
 		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
@@ -171,8 +171,16 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	buildstate.giststate->tempCxt = createTempGistContext();
 
 	/* initialize the root page */
-	buffer = gistNewBuffer(index);
-	Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
+	if (index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
+	{
+		buffer = gistNewBuffer(index);
+		Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
+	}
+	else
+	{
+		buffer = ReadBuffer(index, GIST_ROOT_BLKNO);
+		LockBuffer(buffer, GIST_SHARE);
+	}
 	page = BufferGetPage(buffer);
 
 	START_CRIT_SECTION();
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 22d790d..5560a41 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -17,8 +17,10 @@
 #include "access/genam.h"
 #include "access/gist_private.h"
 #include "access/relscan.h"
+#include "catalog/index.h"
 #include "miscadmin.h"
 #include "storage/lmgr.h"
+#include "storage/freespace.h"
 #include "storage/predicate.h"
 #include "pgstat.h"
 #include "lib/pairingheap.h"
@@ -344,7 +346,10 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem,
 	buffer = ReadBuffer(scan->indexRelation, pageItem->blkno);
 	LockBuffer(buffer, GIST_SHARE);
 	PredicateLockPage(r, BufferGetBlockNumber(buffer), scan->xs_snapshot);
-	gistcheckpage(scan->indexRelation, buffer);
+	if (pageItem->blkno == GIST_ROOT_BLKNO && GlobalTempRelationPageIsNotInitialized(r, BufferGetPage(buffer)))
+		gistbuild(scan->heapRelation, r, BuildIndexInfo(r));
+	else
+		gistcheckpage(scan->indexRelation, buffer);
 	page = BufferGetPage(buffer);
 	TestForOldSnapshot(scan->xs_snapshot, r, page);
 	opaque = GistPageGetOpaque(page);
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 45804d7..50b306a 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -1028,7 +1028,7 @@ gistGetFakeLSN(Relation rel)
 {
 	static XLogRecPtr counter = FirstNormalUnloggedLSN;
 
-	if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
+	if (RelationHasSessionScope(rel))
 	{
 		/*
 		 * Temporary relations are only accessible in our session, so a simple
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 5cc30da..1b228db 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -119,7 +119,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	 * We expect to be called exactly once for any index relation. If that's
 	 * not the case, big trouble's what we have.
 	 */
-	if (RelationGetNumberOfBlocks(index) != 0)
+	if (RelationGetNumberOfBlocks(index) != 0 && index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
 		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 838ee68..00ba123 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -75,13 +75,22 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
 
 	buf = ReadBuffer(rel, blkno);
 
-	if (access != HASH_NOLOCK)
-		LockBuffer(buf, access);
-
 	/* ref count and lock type are correct */
 
-	_hash_checkpage(rel, buf, flags);
-
+	if (blkno == HASH_METAPAGE && GlobalTempRelationPageIsNotInitialized(rel, BufferGetPage(buf)))
+	{
+		Relation heap = RelationIdGetRelation(rel->rd_index->indrelid);
+		hashbuild(heap, rel, BuildIndexInfo(rel));
+		RelationClose(heap);
+		if (access != HASH_NOLOCK)
+			LockBuffer(buf, access);
+	}
+	else
+	{
+		if (access != HASH_NOLOCK)
+			LockBuffer(buf, access);
+		_hash_checkpage(rel, buf, flags);
+	}
 	return buf;
 }
 
@@ -339,7 +348,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
 	bool		use_wal;
 
 	/* safety check */
-	if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
+	if (rel->rd_rel->relpersistence != RELPERSISTENCE_SESSION && RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
 		elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
 			 RelationGetRelationName(rel));
 
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 2dd8821..92df373 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -673,6 +673,7 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
 			 * init fork of an unlogged relation.
 			 */
 			if (rel->rd_rel->relpersistence == RELPERSISTENCE_PERMANENT ||
+				rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION ||
 				(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
 				 forkNum == INIT_FORKNUM))
 				log_smgrcreate(newrnode, forkNum);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 268f869..eff9e10 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -27,8 +27,10 @@
 #include "access/transam.h"
 #include "access/xlog.h"
 #include "access/xloginsert.h"
+#include "catalog/index.h"
 #include "miscadmin.h"
 #include "storage/indexfsm.h"
+#include "storage/buf_internals.h"
 #include "storage/lmgr.h"
 #include "storage/predicate.h"
 #include "utils/snapmgr.h"
@@ -762,8 +764,22 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
 	{
 		/* Read an existing block of the relation */
 		buf = ReadBuffer(rel, blkno);
-		LockBuffer(buf, access);
-		_bt_checkpage(rel, buf);
+		/* Session temporary relation may be not yet initialized for this backend. */
+		if (blkno == BTREE_METAPAGE && GlobalTempRelationPageIsNotInitialized(rel, BufferGetPage(buf)))
+		{
+			Relation heap = RelationIdGetRelation(rel->rd_index->indrelid);
+			ReleaseBuffer(buf);
+			DropRelFileNodeLocalBuffers(rel->rd_node, MAIN_FORKNUM, blkno);
+			btbuild(heap, rel, BuildIndexInfo(rel));
+			RelationClose(heap);
+			buf = ReadBuffer(rel, blkno);
+			LockBuffer(buf, access);
+		}
+		else
+		{
+			LockBuffer(buf, access);
+			_bt_checkpage(rel, buf);
+		}
 	}
 	else
 	{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index ab19692..227bc19 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -330,7 +330,7 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	 * We expect to be called exactly once for any index relation. If that's
 	 * not the case, big trouble's what we have.
 	 */
-	if (RelationGetNumberOfBlocks(index) != 0)
+	if (RelationGetNumberOfBlocks(index) != 0 && index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
 		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index b40bd44..f44bec7 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -81,21 +81,32 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 				rootbuffer,
 				nullbuffer;
 
-	if (RelationGetNumberOfBlocks(index) != 0)
-		elog(ERROR, "index \"%s\" already contains data",
-			 RelationGetRelationName(index));
-
-	/*
-	 * Initialize the meta page and root pages
-	 */
-	metabuffer = SpGistNewBuffer(index);
-	rootbuffer = SpGistNewBuffer(index);
-	nullbuffer = SpGistNewBuffer(index);
-
-	Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
-	Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_ROOT_BLKNO);
-	Assert(BufferGetBlockNumber(nullbuffer) == SPGIST_NULL_BLKNO);
-
+	if (index->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
+	{
+		if (RelationGetNumberOfBlocks(index) != 0)
+			elog(ERROR, "index \"%s\" already contains data",
+				 RelationGetRelationName(index));
+
+		/*
+		 * Initialize the meta page and root pages
+		 */
+		metabuffer = SpGistNewBuffer(index);
+		rootbuffer = SpGistNewBuffer(index);
+		nullbuffer = SpGistNewBuffer(index);
+
+		Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
+		Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_ROOT_BLKNO);
+		Assert(BufferGetBlockNumber(nullbuffer) == SPGIST_NULL_BLKNO);
+	}
+	else
+	{
+		metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
+		rootbuffer = ReadBuffer(index, SPGIST_ROOT_BLKNO);
+		nullbuffer = ReadBuffer(index, SPGIST_NULL_BLKNO);
+		LockBuffer(metabuffer, BUFFER_LOCK_SHARE);
+		LockBuffer(rootbuffer, BUFFER_LOCK_SHARE);
+		LockBuffer(nullbuffer, BUFFER_LOCK_SHARE);
+	}
 	START_CRIT_SECTION();
 
 	SpGistInitMetapage(BufferGetPage(metabuffer));
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 45472db..ea15964 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -21,6 +21,7 @@
 #include "access/spgist_private.h"
 #include "access/transam.h"
 #include "access/xact.h"
+#include "catalog/index.h"
 #include "catalog/pg_amop.h"
 #include "storage/bufmgr.h"
 #include "storage/indexfsm.h"
@@ -106,6 +107,7 @@ spgGetCache(Relation index)
 		spgConfigIn in;
 		FmgrInfo   *procinfo;
 		Buffer		metabuffer;
+		Page        metapage;
 		SpGistMetaPageData *metadata;
 
 		cache = MemoryContextAllocZero(index->rd_indexcxt,
@@ -155,12 +157,21 @@ spgGetCache(Relation index)
 		metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
 		LockBuffer(metabuffer, BUFFER_LOCK_SHARE);
 
-		metadata = SpGistPageGetMeta(BufferGetPage(metabuffer));
+		metapage = BufferGetPage(metabuffer);
+		metadata = SpGistPageGetMeta(metapage);
 
 		if (metadata->magicNumber != SPGIST_MAGIC_NUMBER)
-			elog(ERROR, "index \"%s\" is not an SP-GiST index",
-				 RelationGetRelationName(index));
-
+		{
+			if (GlobalTempRelationPageIsNotInitialized(index, metapage))
+			{
+				Relation heap = RelationIdGetRelation(index->rd_index->indrelid);
+				spgbuild(heap, index, BuildIndexInfo(index));
+				RelationClose(heap);
+			}
+			else
+				elog(ERROR, "index \"%s\" is not an SP-GiST index",
+					 RelationGetRelationName(index));
+		}
 		cache->lastUsedPages = metadata->lastUsedPages;
 
 		UnlockReleaseBuffer(metabuffer);
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 1af31c2..e60bdb7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -402,6 +402,9 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
 		case RELPERSISTENCE_TEMP:
 			backend = BackendIdForTempRelations();
 			break;
+		case RELPERSISTENCE_SESSION:
+			backend = BackendIdForSessionRelations();
+			break;
 		case RELPERSISTENCE_UNLOGGED:
 		case RELPERSISTENCE_PERMANENT:
 			backend = InvalidBackendId;
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index f6c31cc..d943b57 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3652,7 +3652,7 @@ reindex_relation(Oid relid, int flags, int options)
 		if (flags & REINDEX_REL_FORCE_INDEXES_UNLOGGED)
 			persistence = RELPERSISTENCE_UNLOGGED;
 		else if (flags & REINDEX_REL_FORCE_INDEXES_PERMANENT)
-			persistence = RELPERSISTENCE_PERMANENT;
+			persistence = rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION ? RELPERSISTENCE_SESSION : RELPERSISTENCE_PERMANENT;
 		else
 			persistence = rel->rd_rel->relpersistence;
 
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 625af8d..1e192fa 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -93,6 +93,10 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
 			backend = InvalidBackendId;
 			needs_wal = false;
 			break;
+		case RELPERSISTENCE_SESSION:
+			backend = BackendIdForSessionRelations();
+			needs_wal = false;
+			break;
 		case RELPERSISTENCE_PERMANENT:
 			backend = InvalidBackendId;
 			needs_wal = true;
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 7accb95..9f2ea48 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -102,7 +102,7 @@ static int	acquire_inherited_sample_rows(Relation onerel, int elevel,
 										  HeapTuple *rows, int targrows,
 										  double *totalrows, double *totaldeadrows);
 static void update_attstats(Oid relid, bool inh,
-							int natts, VacAttrStats **vacattrstats);
+							int natts, VacAttrStats **vacattrstats, bool is_global_temp);
 static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
 static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
 
@@ -318,6 +318,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
 	Oid			save_userid;
 	int			save_sec_context;
 	int			save_nestlevel;
+	bool        is_global_temp = onerel->rd_rel->relpersistence == RELPERSISTENCE_SESSION;
 
 	if (inh)
 		ereport(elevel,
@@ -575,14 +576,14 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
 		 * pg_statistic for columns we didn't process, we leave them alone.)
 		 */
 		update_attstats(RelationGetRelid(onerel), inh,
-						attr_cnt, vacattrstats);
+						attr_cnt, vacattrstats, is_global_temp);
 
 		for (ind = 0; ind < nindexes; ind++)
 		{
 			AnlIndexData *thisdata = &indexdata[ind];
 
 			update_attstats(RelationGetRelid(Irel[ind]), false,
-							thisdata->attr_cnt, thisdata->vacattrstats);
+							thisdata->attr_cnt, thisdata->vacattrstats, is_global_temp);
 		}
 
 		/*
@@ -1425,7 +1426,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
  *		by taking a self-exclusive lock on the relation in analyze_rel().
  */
 static void
-update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
+update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats, bool is_global_temp)
 {
 	Relation	sd;
 	int			attno;
@@ -1527,30 +1528,42 @@ update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
 			}
 		}
 
-		/* Is there already a pg_statistic tuple for this attribute? */
-		oldtup = SearchSysCache3(STATRELATTINH,
-								 ObjectIdGetDatum(relid),
-								 Int16GetDatum(stats->attr->attnum),
-								 BoolGetDatum(inh));
-
-		if (HeapTupleIsValid(oldtup))
+		if (is_global_temp)
 		{
-			/* Yes, replace it */
-			stup = heap_modify_tuple(oldtup,
-									 RelationGetDescr(sd),
-									 values,
-									 nulls,
-									 replaces);
-			ReleaseSysCache(oldtup);
-			CatalogTupleUpdate(sd, &stup->t_self, stup);
+			stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
+			InsertSysCache(STATRELATTINH,
+						   ObjectIdGetDatum(relid),
+						   Int16GetDatum(stats->attr->attnum),
+						   BoolGetDatum(inh),
+						   0,
+						   stup);
 		}
 		else
 		{
-			/* No, insert new tuple */
-			stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
-			CatalogTupleInsert(sd, stup);
-		}
+			/* Is there already a pg_statistic tuple for this attribute? */
+			oldtup = SearchSysCache3(STATRELATTINH,
+									 ObjectIdGetDatum(relid),
+									 Int16GetDatum(stats->attr->attnum),
+									 BoolGetDatum(inh));
 
+			if (HeapTupleIsValid(oldtup))
+			{
+				/* Yes, replace it */
+				stup = heap_modify_tuple(oldtup,
+										 RelationGetDescr(sd),
+										 values,
+										 nulls,
+										 replaces);
+				ReleaseSysCache(oldtup);
+				CatalogTupleUpdate(sd, &stup->t_self, stup);
+			}
+			else
+			{
+				/* No, insert new tuple */
+				stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
+				CatalogTupleInsert(sd, stup);
+			}
+		}
 		heap_freetuple(stup);
 	}
 
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index a23128d..5d131a7 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1400,7 +1400,7 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
 	 */
 	if (newrelpersistence == RELPERSISTENCE_UNLOGGED)
 		reindex_flags |= REINDEX_REL_FORCE_INDEXES_UNLOGGED;
-	else if (newrelpersistence == RELPERSISTENCE_PERMANENT)
+	else if (newrelpersistence != RELPERSISTENCE_TEMP)
 		reindex_flags |= REINDEX_REL_FORCE_INDEXES_PERMANENT;
 
 	/* Report that we are now reindexing relations */
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index a13322b..be661a4 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -94,7 +94,7 @@ static HTAB *seqhashtab = NULL; /* hash table for SeqTable items */
  */
 static SeqTableData *last_used_seq = NULL;
 
-static void fill_seq_with_data(Relation rel, HeapTuple tuple);
+static void fill_seq_with_data(Relation rel, HeapTuple tuple, Buffer buf);
 static Relation lock_and_open_sequence(SeqTable seq);
 static void create_seq_hashtable(void);
 static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
@@ -222,7 +222,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
 
 	/* now initialize the sequence's data */
 	tuple = heap_form_tuple(tupDesc, value, null);
-	fill_seq_with_data(rel, tuple);
+	fill_seq_with_data(rel, tuple, InvalidBuffer);
 
 	/* process OWNED BY if given */
 	if (owned_by)
@@ -327,7 +327,7 @@ ResetSequence(Oid seq_relid)
 	/*
 	 * Insert the modified tuple into the new storage file.
 	 */
-	fill_seq_with_data(seq_rel, tuple);
+	fill_seq_with_data(seq_rel, tuple, InvalidBuffer);
 
 	/* Clear local cache so that we don't think we have cached numbers */
 	/* Note that we do not change the currval() state */
@@ -340,18 +340,21 @@ ResetSequence(Oid seq_relid)
  * Initialize a sequence's relation with the specified tuple as content
  */
 static void
-fill_seq_with_data(Relation rel, HeapTuple tuple)
+fill_seq_with_data(Relation rel, HeapTuple tuple, Buffer buf)
 {
-	Buffer		buf;
 	Page		page;
 	sequence_magic *sm;
 	OffsetNumber offnum;
+	bool lockBuffer = false;
 
 	/* Initialize first page of relation with special magic number */
 
-	buf = ReadBuffer(rel, P_NEW);
-	Assert(BufferGetBlockNumber(buf) == 0);
-
+	if (buf == InvalidBuffer)
+	{
+		buf = ReadBuffer(rel, P_NEW);
+		Assert(BufferGetBlockNumber(buf) == 0);
+		lockBuffer = true;
+	}
 	page = BufferGetPage(buf);
 
 	PageInit(page, BufferGetPageSize(buf), sizeof(sequence_magic));
@@ -360,7 +363,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
 
 	/* Now insert sequence tuple */
 
-	LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+	if (lockBuffer)
+		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
 
 	/*
 	 * Since VACUUM does not process sequences, we have to force the tuple to
@@ -410,7 +414,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
 
 	END_CRIT_SECTION();
 
-	UnlockReleaseBuffer(buf);
+	if (lockBuffer)
+		UnlockReleaseBuffer(buf);
 }
 
 /*
@@ -502,7 +507,7 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
 		/*
 		 * Insert the modified tuple into the new storage file.
 		 */
-		fill_seq_with_data(seqrel, newdatatuple);
+		fill_seq_with_data(seqrel, newdatatuple, InvalidBuffer);
 	}
 
 	/* process OWNED BY if given */
@@ -1178,6 +1183,17 @@ read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple)
 	LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
 
 	page = BufferGetPage(*buf);
+	if (GlobalTempRelationPageIsNotInitialized(rel, page))
+	{
+		/* Initialize sequence for global temporary tables */
+		Datum		value[SEQ_COL_LASTCOL] = {0};
+		bool		null[SEQ_COL_LASTCOL] = {false};
+		HeapTuple tuple;
+		value[SEQ_COL_LASTVAL-1] = Int64GetDatumFast(1); /* start sequence with 1 */
+		tuple = heap_form_tuple(RelationGetDescr(rel), value, null);
+		fill_seq_with_data(rel, tuple, *buf);
+	}
+
 	sm = (sequence_magic *) PageGetSpecialPointer(page);
 
 	if (sm->magic != SEQ_MAGIC)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 8d25d14..21d5a30 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -12,6 +12,9 @@
  *
  *-------------------------------------------------------------------------
  */
+#include <sys/stat.h>
+#include <unistd.h>
+
 #include "postgres.h"
 
 #include "access/genam.h"
@@ -533,6 +536,23 @@ static List *GetParentedForeignKeyRefs(Relation partition);
 static void ATDetachCheckNoForeignKeyRefs(Relation partition);
 
 
+static bool
+has_oncommit_option(List *options)
+{
+	ListCell   *listptr;
+
+	foreach(listptr, options)
+	{
+		DefElem    *def = (DefElem *) lfirst(listptr);
+
+		if (pg_strcasecmp(def->defname, "on_commit_delete_rows") == 0)
+			return true;
+	}
+
+	return false;
+}
+
+
 /* ----------------------------------------------------------------
  *		DefineRelation
  *				Creates a new relation.
@@ -576,6 +596,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
 	LOCKMODE	parentLockmode;
 	const char *accessMethod = NULL;
 	Oid			accessMethodId = InvalidOid;
+	bool		has_oncommit_clause = false;
 
 	/*
 	 * Truncate relname to appropriate length (probably a waste of time, as
@@ -587,7 +608,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
 	 * Check consistency of arguments
 	 */
 	if (stmt->oncommit != ONCOMMIT_NOOP
-		&& stmt->relation->relpersistence != RELPERSISTENCE_TEMP)
+		&& !IsLocalRelpersistence(stmt->relation->relpersistence))
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
 				 errmsg("ON COMMIT can only be used on temporary tables")));
@@ -613,17 +634,6 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
 		RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL);
 
 	/*
-	 * Security check: disallow creating temp tables from security-restricted
-	 * code.  This is needed because calling code might not expect untrusted
-	 * tables to appear in pg_temp at the front of its search path.
-	 */
-	if (stmt->relation->relpersistence == RELPERSISTENCE_TEMP
-		&& InSecurityRestrictedOperation())
-		ereport(ERROR,
-				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-				 errmsg("cannot create temporary table within security-restricted operation")));
-
-	/*
 	 * Determine the lockmode to use when scanning parents.  A self-exclusive
 	 * lock is needed here.
 	 *
@@ -718,6 +728,38 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
 	/*
 	 * Parse and validate reloptions, if any.
 	 */
+	/* global temp table */
+	has_oncommit_clause = has_oncommit_option(stmt->options);
+	if (stmt->relation->relpersistence == RELPERSISTENCE_SESSION)
+	{
+		if (has_oncommit_clause)
+		{
+			if (stmt->oncommit != ONCOMMIT_NOOP)
+				elog(ERROR, "can not defeine global temp table with on commit and with clause at same time");
+		}
+		else if (stmt->oncommit != ONCOMMIT_NOOP)
+		{
+			DefElem *opt = makeNode(DefElem);
+
+			opt->type = T_DefElem;
+			opt->defnamespace = NULL;
+			opt->defname = "on_commit_delete_rows";
+			opt->defaction = DEFELEM_UNSPEC;
+
+			/* use reloptions to remember on commit clause */
+			if (stmt->oncommit == ONCOMMIT_DELETE_ROWS)
+				opt->arg  = (Node *)makeString("true");
+			else if (stmt->oncommit == ONCOMMIT_PRESERVE_ROWS)
+				opt->arg  = (Node *)makeString("false");
+			else
+				elog(ERROR, "global temp table not support on commit drop clause");
+
+			stmt->options = lappend(stmt->options, opt);
+		}
+	}
+	else if (has_oncommit_clause)
+		elog(ERROR, "regular table cannot specifie on_commit_delete_rows");
+
 	reloptions = transformRelOptions((Datum) 0, stmt->options, NULL, validnsps,
 									 true, false);
 
@@ -1772,7 +1814,8 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
 		 * table or the current physical file to be thrown away anyway.
 		 */
 		if (rel->rd_createSubid == mySubid ||
-			rel->rd_newRelfilenodeSubid == mySubid)
+			rel->rd_newRelfilenodeSubid == mySubid ||
+			rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION)
 		{
 			/* Immediate, non-rollbackable truncation is OK */
 			heap_truncate_one_rel(rel);
@@ -3449,6 +3492,26 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
 									(void *) stmt);
 }
 
+
+static bool
+CheckGlobalTempTableNotInUse(Relation rel)
+{
+	int id;
+	for (id = 1; id <= MaxBackends; id++)
+	{
+		if (id != MyBackendId)
+		{
+			struct stat fst;
+			char* path = relpathbackend(rel->rd_node, id, MAIN_FORKNUM);
+			int rc = stat(path, &fst);
+			pfree(path);
+			if (rc == 0 && fst.st_size != 0)
+				return false;
+		}
+	}
+	return true;
+}
+
 /*
  * AlterTable
  *		Execute ALTER TABLE, which can be a list of subcommands
@@ -3500,6 +3563,9 @@ AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt *stmt)
 	rel = relation_open(relid, NoLock);
 
 	CheckTableNotInUse(rel, "ALTER TABLE");
+	if (rel->rd_rel->relpersistence == RELPERSISTENCE_SESSION
+		&& !CheckGlobalTempTableNotInUse(rel))
+		elog(ERROR, "Global temp table used by active backends can not be altered");
 
 	ATController(stmt, rel, stmt->cmds, stmt->relation->inh, lockmode);
 }
@@ -7708,6 +7774,12 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
 						(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
 						 errmsg("constraints on unlogged tables may reference only permanent or unlogged tables")));
 			break;
+		case RELPERSISTENCE_SESSION:
+			if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_SESSION)
+				ereport(ERROR,
+						(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+						 errmsg("constraints on session tables may reference only session tables")));
+			break;
 		case RELPERSISTENCE_TEMP:
 			if (pkrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
 				ereport(ERROR,
@@ -14140,6 +14212,13 @@ ATPrepChangePersistence(Relation rel, bool toLogged)
 							RelationGetRelationName(rel)),
 					 errtable(rel)));
 			break;
+		case RELPERSISTENCE_SESSION:
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+					 errmsg("cannot change logged status of session table \"%s\"",
+							RelationGetRelationName(rel)),
+					 errtable(rel)));
+			break;
 		case RELPERSISTENCE_PERMANENT:
 			if (toLogged)
 				/* nothing to do */
@@ -14627,14 +14706,7 @@ PreCommit_on_commit_actions(void)
 				/* Do nothing (there shouldn't be such entries, actually) */
 				break;
 			case ONCOMMIT_DELETE_ROWS:
-
-				/*
-				 * If this transaction hasn't accessed any temporary
-				 * relations, we can skip truncating ON COMMIT DELETE ROWS
-				 * tables, as they must still be empty.
-				 */
-				if ((MyXactFlags & XACT_FLAGS_ACCESSEDTEMPNAMESPACE))
-					oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid);
+				oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid);
 				break;
 			case ONCOMMIT_DROP:
 				oids_to_drop = lappend_oid(oids_to_drop, oc->relid);
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index db3a68a..60212b0 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -48,6 +48,7 @@
 #include "partitioning/partprune.h"
 #include "rewrite/rewriteManip.h"
 #include "utils/lsyscache.h"
+#include "utils/rel.h"
 
 
 /* results of subquery_is_pushdown_safe */
@@ -618,7 +619,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
 			 * the rest of the necessary infrastructure right now anyway.  So
 			 * for now, bail out if we see a temporary table.
 			 */
-			if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
+			if (IsLocalRelpersistence(get_rel_persistence(rte->relid)))
 				return;
 
 			/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 17c5f08..7c83e7b 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -6307,7 +6307,7 @@ plan_create_index_workers(Oid tableOid, Oid indexOid)
 	 * Furthermore, any index predicate or index expressions must be parallel
 	 * safe.
 	 */
-	if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
+	if (RelationHasSessionScope(heap) ||
 		!is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
 		!is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
 	{
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 3f67aaf..565c868 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -3266,20 +3266,11 @@ OptTemp:	TEMPORARY					{ $$ = RELPERSISTENCE_TEMP; }
 			| TEMP						{ $$ = RELPERSISTENCE_TEMP; }
 			| LOCAL TEMPORARY			{ $$ = RELPERSISTENCE_TEMP; }
 			| LOCAL TEMP				{ $$ = RELPERSISTENCE_TEMP; }
-			| GLOBAL TEMPORARY
-				{
-					ereport(WARNING,
-							(errmsg("GLOBAL is deprecated in temporary table creation"),
-							 parser_errposition(@1)));
-					$$ = RELPERSISTENCE_TEMP;
-				}
-			| GLOBAL TEMP
-				{
-					ereport(WARNING,
-							(errmsg("GLOBAL is deprecated in temporary table creation"),
-							 parser_errposition(@1)));
-					$$ = RELPERSISTENCE_TEMP;
-				}
+			| GLOBAL TEMPORARY          { $$ = RELPERSISTENCE_SESSION; }
+			| GLOBAL TEMP               { $$ = RELPERSISTENCE_SESSION; }
+			| SESSION                   { $$ = RELPERSISTENCE_SESSION; }
+			| SESSION TEMPORARY         { $$ = RELPERSISTENCE_SESSION; }
+			| SESSION TEMP              { $$ = RELPERSISTENCE_SESSION; }
 			| UNLOGGED					{ $$ = RELPERSISTENCE_UNLOGGED; }
 			| /*EMPTY*/					{ $$ = RELPERSISTENCE_PERMANENT; }
 		;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index ee47547..ea7fe4c 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -437,6 +437,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
 	seqstmt->options = seqoptions;
 
 	/*
+	 * Why we should not always use persistence of parent table?
+	 * Although it is prohibited to have unlogged sequences,
+	 * unlogged tables with SERIAL fields are accepted!
+	 */
+	if (cxt->relation->relpersistence != RELPERSISTENCE_UNLOGGED)
+		seqstmt->sequence->relpersistence = cxt->relation->relpersistence;
+
+	/*
 	 * If a sequence data type was specified, add it to the options.  Prepend
 	 * to the list rather than append; in case a user supplied their own AS
 	 * clause, the "redundant options" error will point to their occurrence,
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index c1dd816..dcfc134 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -2157,7 +2157,7 @@ do_autovacuum(void)
 		/*
 		 * We cannot safely process other backends' temp tables, so skip 'em.
 		 */
-		if (classForm->relpersistence == RELPERSISTENCE_TEMP)
+		if (IsLocalRelpersistence(classForm->relpersistence))
 			continue;
 
 		relid = classForm->oid;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 483f705..1129dc3 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2933,7 +2933,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber *forkNum,
 	/* If it's a local relation, it's localbuf.c's problem. */
 	if (RelFileNodeBackendIsTemp(rnode))
 	{
-		if (rnode.backend == MyBackendId)
+		if (GetRelationBackendId(rnode.backend) == MyBackendId)
 		{
 			for (j = 0; j < nforks; j++)
 				DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 07f3c93..8cf06f6 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -28,17 +28,20 @@
 #include "miscadmin.h"
 #include "access/xlogutils.h"
 #include "access/xlog.h"
+#include "commands/tablecmds.h"
 #include "commands/tablespace.h"
 #include "pgstat.h"
 #include "postmaster/bgwriter.h"
 #include "storage/fd.h"
 #include "storage/bufmgr.h"
+#include "storage/ipc.h"
 #include "storage/md.h"
 #include "storage/relfilenode.h"
 #include "storage/smgr.h"
 #include "storage/sync.h"
 #include "utils/hsearch.h"
 #include "utils/memutils.h"
+#include "utils/rel.h"
 #include "pg_trace.h"
 
 /*
@@ -87,6 +90,19 @@ typedef struct _MdfdVec
 
 static MemoryContext MdCxt;		/* context for all MdfdVec objects */
 
+/*
+ * Structure used to collect information created by this backend.
+ * Data of this related should be deleted on backend exit.
+ */
+typedef struct SessionRelation
+{
+	RelFileNodeBackend rnode;
+	ForkNumber forknum;
+	struct SessionRelation* next;
+} SessionRelation;
+
+
+static SessionRelation* SessionRelations;
 
 /* Populate a file tag describing an md.c segment file. */
 #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
@@ -152,6 +168,60 @@ mdinit(void)
 								  ALLOCSET_DEFAULT_SIZES);
 }
 
+
+/*
+ * Delete all data of session relations and remove their pages from shared buffers.
+ * This function is called on backend exit.
+ */
+static void
+TruncateSessionRelations(int code, Datum arg)
+{
+	SessionRelation* rel;
+	for (rel = SessionRelations; rel != NULL; rel = rel->next)
+	{
+		/* Delete relation files */
+		mdunlink(rel->rnode, rel->forknum, false);
+	}
+}
+
+/*
+ * Maintain information about session relations accessed by this backend.
+ * This list is needed to perform cleanup on backend exit.
+ * Session relation is linked in this list when this relation is created or opened and file doesn't exist.
+ * Such procedure guarantee that each relation is linked into list only once.
+ */
+static void
+RegisterSessionRelation(SMgrRelation reln, ForkNumber forknum)
+{
+	SessionRelation* rel = (SessionRelation*)MemoryContextAlloc(TopMemoryContext, sizeof(SessionRelation));
+
+	/*
+	 * Perform session relation cleanup on backend exit. We are using shared memory hook, because
+	 * cleanup should be performed before backend is disconnected from shared memory.
+	 */
+	if (SessionRelations == NULL)
+		on_shmem_exit(TruncateSessionRelations, 0);
+
+	rel->rnode = reln->smgr_rnode;
+	rel->forknum = forknum;
+	rel->next = SessionRelations;
+	SessionRelations = rel;
+}
+
+static void
+RegisterOnCommitAction(SMgrRelation reln, ForkNumber forknum)
+{
+	if (reln->smgr_owner && forknum == MAIN_FORKNUM)
+	{
+		Relation rel = (Relation)((char*)reln->smgr_owner - offsetof(RelationData, rd_smgr));
+		if (rel->rd_options
+			&& ((StdRdOptions *)rel->rd_options)->on_commit_delete_rows)
+		{
+			register_on_commit_action(rel->rd_id, ONCOMMIT_DELETE_ROWS);
+		}
+	}
+}
+
 /*
  *	mdexists() -- Does the physical file exist?
  *
@@ -218,6 +288,8 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
 					 errmsg("could not create file \"%s\": %m", path)));
 		}
 	}
+	if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+		RegisterSessionRelation(reln, forkNum);
 
 	pfree(path);
 
@@ -465,6 +537,21 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
 
 	if (fd < 0)
 	{
+		/*
+		 * In case of session relation access, there may be no yet files of this relation for this backend.
+		 * If so, then create file and register session relation for truncation on backend exit.
+		 */
+		if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+		{
+			fd = PathNameOpenFile(path, O_RDWR | PG_BINARY | O_CREAT);
+			if (fd >= 0)
+			{
+				RegisterSessionRelation(reln, forknum);
+				if (!(behavior & EXTENSION_RETURN_NULL))
+					RegisterOnCommitAction(reln, forknum);
+				goto NewSegment;
+			}
+		}
 		if ((behavior & EXTENSION_RETURN_NULL) &&
 			FILE_POSSIBLY_DELETED(errno))
 		{
@@ -476,6 +563,7 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
 				 errmsg("could not open file \"%s\": %m", path)));
 	}
 
+  NewSegment:
 	pfree(path);
 
 	_fdvec_resize(reln, forknum, 1);
@@ -652,8 +740,13 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
 		 * complaining.  This allows, for example, the case of trying to
 		 * update a block that was later truncated away.
 		 */
-		if (zero_damaged_pages || InRecovery)
+		if (zero_damaged_pages || InRecovery || RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+		{
 			MemSet(buffer, 0, BLCKSZ);
+			/* In case of session relation we need to write zero page to provide correct result of subsequent mdnblocks */
+			if (RelFileNodeBackendIsGlobalTemp(reln->smgr_rnode))
+				mdwrite(reln, forknum, blocknum, buffer, true);
+		}
 		else
 			ereport(ERROR,
 					(errcode(ERRCODE_DATA_CORRUPTED),
@@ -743,7 +836,8 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
 	BlockNumber segno = 0;
 
 	/* mdopen has opened the first segment */
-	Assert(reln->md_num_open_segs[forknum] > 0);
+	if (reln->md_num_open_segs[forknum] == 0)
+		return 0;
 
 	/*
 	 * Start from the last open segments, to avoid redundant seeks.  We have
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index a87e721..2401361 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -994,6 +994,9 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
 	/* Determine owning backend. */
 	switch (relform->relpersistence)
 	{
+		case RELPERSISTENCE_SESSION:
+			backend = BackendIdForSessionRelations();
+			break;
 		case RELPERSISTENCE_UNLOGGED:
 		case RELPERSISTENCE_PERMANENT:
 			backend = InvalidBackendId;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index c3e7d94..6d86c28 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1191,6 +1191,111 @@ SearchCatCache4(CatCache *cache,
 	return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
 }
 
+
+void InsertCatCache(CatCache *cache,
+					Datum v1, Datum v2, Datum v3, Datum v4,
+					HeapTuple tuple)
+{
+	Datum		arguments[CATCACHE_MAXKEYS];
+	uint32		hashValue;
+	Index		hashIndex;
+	CatCTup    *ct;
+	dlist_iter	iter;
+	dlist_head *bucket;
+	int         nkeys = cache->cc_nkeys;
+	MemoryContext oldcxt;
+	int         i;
+
+	/*
+	 * one-time startup overhead for each cache
+	 */
+	if (unlikely(cache->cc_tupdesc == NULL))
+		CatalogCacheInitializeCache(cache);
+
+	/* Initialize local parameter array */
+	arguments[0] = v1;
+	arguments[1] = v2;
+	arguments[2] = v3;
+	arguments[3] = v4;
+	/*
+	 * find the hash bucket in which to look for the tuple
+	 */
+	hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
+	hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
+
+	/*
+	 * scan the hash bucket until we find a match or exhaust our tuples
+	 *
+	 * Note: it's okay to use dlist_foreach here, even though we modify the
+	 * dlist within the loop, because we don't continue the loop afterwards.
+	 */
+	bucket = &cache->cc_bucket[hashIndex];
+	dlist_foreach(iter, bucket)
+	{
+		ct = dlist_container(CatCTup, cache_elem, iter.cur);
+
+		if (ct->dead)
+			continue;			/* ignore dead entries */
+
+		if (ct->hash_value != hashValue)
+			continue;			/* quickly skip entry if wrong hash val */
+
+		if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
+			continue;
+
+		/*
+		 * If it's a positive entry, bump its refcount and return it. If it's
+		 * negative, we can report failure to the caller.
+		 */
+		if (ct->tuple.t_len == tuple->t_len)
+		{
+			memcpy((char *) ct->tuple.t_data,
+				   (const char *) tuple->t_data,
+				   tuple->t_len);
+			return;
+		}
+		dlist_delete(&ct->cache_elem);
+		pfree(ct);
+		cache->cc_ntup -= 1;
+		CacheHdr->ch_ntup -= 1;
+		break;
+	}
+	/* Allocate memory for CatCTup and the cached tuple in one go */
+	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+
+	ct = (CatCTup *) palloc(sizeof(CatCTup) +
+							MAXIMUM_ALIGNOF + tuple->t_len);
+	ct->tuple.t_len = tuple->t_len;
+	ct->tuple.t_self = tuple->t_self;
+	ct->tuple.t_tableOid = tuple->t_tableOid;
+	ct->tuple.t_data = (HeapTupleHeader)
+		MAXALIGN(((char *) ct) + sizeof(CatCTup));
+	/* copy tuple contents */
+	memcpy((char *) ct->tuple.t_data,
+		   (const char *) tuple->t_data,
+		   tuple->t_len);
+	ct->ct_magic = CT_MAGIC;
+	ct->my_cache = cache;
+	ct->c_list = NULL;
+	ct->refcount = 1;			/* pinned*/
+	ct->dead = false;
+	ct->negative = false;
+	ct->hash_value = hashValue;
+	dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
+	memcpy(ct->keys, arguments, nkeys*sizeof(Datum));
+
+	cache->cc_ntup++;
+	CacheHdr->ch_ntup++;
+	MemoryContextSwitchTo(oldcxt);
+
+	/*
+	 * If the hash table has become too full, enlarge the buckets array. Quite
+	 * arbitrarily, we enlarge when fill factor > 2.
+	 */
+	if (cache->cc_ntup > cache->cc_nbuckets * 2)
+		RehashCatCache(cache);
+}
+
 /*
  * Work-horse for SearchCatCache/SearchCatCacheN.
  */
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 585dcee..ce8852c 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1098,6 +1098,10 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
 	relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
 	switch (relation->rd_rel->relpersistence)
 	{
+		case RELPERSISTENCE_SESSION:
+			relation->rd_backend = BackendIdForSessionRelations();
+			relation->rd_islocaltemp = false;
+			break;
 		case RELPERSISTENCE_UNLOGGED:
 		case RELPERSISTENCE_PERMANENT:
 			relation->rd_backend = InvalidBackendId;
@@ -3301,6 +3305,10 @@ RelationBuildLocalRelation(const char *relname,
 	rel->rd_rel->relpersistence = relpersistence;
 	switch (relpersistence)
 	{
+		case RELPERSISTENCE_SESSION:
+			rel->rd_backend = BackendIdForSessionRelations();
+			rel->rd_islocaltemp = false;
+			break;
 		case RELPERSISTENCE_UNLOGGED:
 		case RELPERSISTENCE_PERMANENT:
 			rel->rd_backend = InvalidBackendId;
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 16297a5..e7a4d3c 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -1164,6 +1164,16 @@ SearchSysCache4(int cacheId,
 	return SearchCatCache4(SysCache[cacheId], key1, key2, key3, key4);
 }
 
+void
+InsertSysCache(int cacheId,
+			   Datum key1, Datum key2, Datum key3, Datum key4,
+			   HeapTuple value)
+{
+	Assert(cacheId >= 0 && cacheId < SysCacheSize &&
+		   PointerIsValid(SysCache[cacheId]));
+	InsertCatCache(SysCache[cacheId], key1, key2, key3, key4, value);
+}
+
 /*
  * ReleaseSysCache
  *		Release previously grabbed reference count on a tuple
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index bf69adc..fa7479c 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -15637,8 +15637,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 											 tbinfo->dobj.catId.oid, false);
 
 		appendPQExpBuffer(q, "CREATE %s%s %s",
-						  tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
-						  "UNLOGGED " : "",
+						  tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ? "UNLOGGED "
+						  : tbinfo->relpersistence == RELPERSISTENCE_SESSION ? "SESSION " : "",
 						  reltypename,
 						  qualrelname);
 
diff --git a/src/common/relpath.c b/src/common/relpath.c
index 62b9553..cef99d2 100644
--- a/src/common/relpath.c
+++ b/src/common/relpath.c
@@ -166,7 +166,18 @@ GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
 		}
 		else
 		{
-			if (forkNumber != MAIN_FORKNUM)
+			/*
+			 * Session relations are distinguished from local temp relations by adding
+			 * SessionRelFirstBackendId offset to backendId.
+			 * These is no need to separate them at file system level, so just subtract SessionRelFirstBackendId
+			 * to avoid too long file names.
+			 * Segments of session relations have the same prefix (t%d_) as local temporary relations
+			 * to make it possible to cleanup them in the same way as local temporary relation files.
+			 */
+			 if (backendId >= SessionRelFirstBackendId)
+				 backendId -= SessionRelFirstBackendId;
+
+			 if (forkNumber != MAIN_FORKNUM)
 				path = psprintf("base/%u/t%d_%u_%s",
 								dbNode, backendId, relNode,
 								forkNames[forkNumber]);
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 090b6ba..6a39663 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -165,6 +165,7 @@ typedef FormData_pg_class *Form_pg_class;
 #define		  RELPERSISTENCE_PERMANENT	'p' /* regular table */
 #define		  RELPERSISTENCE_UNLOGGED	'u' /* unlogged permanent table */
 #define		  RELPERSISTENCE_TEMP		't' /* temporary table */
+#define		  RELPERSISTENCE_SESSION	's' /* session table */
 
 /* default selection for replica identity (primary key or nothing) */
 #define		  REPLICA_IDENTITY_DEFAULT	'd'
diff --git a/src/include/storage/backendid.h b/src/include/storage/backendid.h
index 70ef8eb..11b4b89 100644
--- a/src/include/storage/backendid.h
+++ b/src/include/storage/backendid.h
@@ -22,6 +22,13 @@ typedef int BackendId;			/* unique currently active backend identifier */
 
 #define InvalidBackendId		(-1)
 
+/*
+ * We need to distinguish local and global temporary relations by RelFileNodeBackend.
+ * The least invasive change is to add some special bias value to backend id (since 
+ * maximal number of backed is limited by MaxBackends).
+ */
+#define SessionRelFirstBackendId (0x40000000)
+
 extern PGDLLIMPORT BackendId MyBackendId;	/* backend id of this backend */
 
 /* backend id of our parallel session leader, or InvalidBackendId if none */
@@ -34,4 +41,12 @@ extern PGDLLIMPORT BackendId ParallelMasterBackendId;
 #define BackendIdForTempRelations() \
 	(ParallelMasterBackendId == InvalidBackendId ? MyBackendId : ParallelMasterBackendId)
 
+
+#define BackendIdForSessionRelations() \
+	(BackendIdForTempRelations() + SessionRelFirstBackendId)
+
+#define IsSessionRelationBackendId(id) ((id) >= SessionRelFirstBackendId)
+
+#define GetRelationBackendId(id) ((id) & ~SessionRelFirstBackendId)
+
 #endif							/* BACKENDID_H */
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index 4ef6d8d..bac7a31 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -229,6 +229,13 @@ typedef PageHeaderData *PageHeader;
 #define PageIsNew(page) (((PageHeader) (page))->pd_upper == 0)
 
 /*
+ * Page of temporary relation is not initialized
+ */
+#define GlobalTempRelationPageIsNotInitialized(rel, page) \
+	((rel)->rd_rel->relpersistence == RELPERSISTENCE_SESSION && PageIsNew(page))
+
+
+/*
  * PageGetItemId
  *		Returns an item identifier of a page.
  */
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index 586500a..20aec72 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -75,10 +75,25 @@ typedef struct RelFileNodeBackend
 	BackendId	backend;
 } RelFileNodeBackend;
 
+/*
+ * Check whether it is local or global temporary relation, which data belongs only to one backend.
+ */
 #define RelFileNodeBackendIsTemp(rnode) \
 	((rnode).backend != InvalidBackendId)
 
 /*
+ * Check whether it is global temporary relation which metadata is shared by all sessions,
+ * but data is private for the current session.
+ */
+#define RelFileNodeBackendIsGlobalTemp(rnode) IsSessionRelationBackendId((rnode).backend)
+
+/*
+ * Check whether it is local temporary relation which exists only in this backend.
+ */
+#define RelFileNodeBackendIsLocalTemp(rnode) \
+	(RelFileNodeBackendIsTemp(rnode) && !RelFileNodeBackendIsGlobalTemp(rnode))
+
+/*
  * Note: RelFileNodeEquals and RelFileNodeBackendEquals compare relNode first
  * since that is most likely to be different in two unequal RelFileNodes.  It
  * is probably redundant to compare spcNode if the other fields are found equal,
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index ff1faba..31f615d 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -228,4 +228,8 @@ extern void PrepareToInvalidateCacheTuple(Relation relation,
 extern void PrintCatCacheLeakWarning(HeapTuple tuple);
 extern void PrintCatCacheListLeakWarning(CatCList *list);
 
+extern void InsertCatCache(CatCache *cache,
+						   Datum v1, Datum v2, Datum v3, Datum v4,
+						   HeapTuple tuple);
+
 #endif							/* CATCACHE_H */
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index a5cf804..a30137f 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -272,6 +272,7 @@ typedef struct StdRdOptions
 	int			parallel_workers;	/* max number of parallel workers */
 	bool		vacuum_index_cleanup;	/* enables index vacuuming and cleanup */
 	bool		vacuum_truncate;	/* enables vacuum to truncate a relation */
+	bool		on_commit_delete_rows;	/* global temp table */
 } StdRdOptions;
 
 #define HEAP_MIN_FILLFACTOR			10
@@ -327,6 +328,18 @@ typedef struct StdRdOptions
 	((relation)->rd_options ? \
 	 ((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpw))
 
+/*
+ * Relation persistence is either TEMP either SESSION
+ */
+#define IsLocalRelpersistence(relpersistence) \
+	((relpersistence) == RELPERSISTENCE_TEMP || (relpersistence) == RELPERSISTENCE_SESSION)
+
+/*
+ * Relation is either global either local temp table
+ */
+#define RelationHasSessionScope(relation) \
+	IsLocalRelpersistence(((relation)->rd_rel->relpersistence))
+
 /* ViewOptions->check_option values */
 typedef enum ViewOptCheckOption
 {
@@ -335,6 +348,7 @@ typedef enum ViewOptCheckOption
 	VIEW_OPTION_CHECK_OPTION_CASCADED
 } ViewOptCheckOption;
 
+
 /*
  * ViewOptions
  *		Contents of rd_options for views
@@ -526,7 +540,7 @@ typedef struct ViewOptions
  *		True if relation's pages are stored in local buffers.
  */
 #define RelationUsesLocalBuffers(relation) \
-	((relation)->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
+	RelationHasSessionScope(relation)
 
 /*
  * RELATION_IS_LOCAL
diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h
index 918765c..5b1598b 100644
--- a/src/include/utils/syscache.h
+++ b/src/include/utils/syscache.h
@@ -216,4 +216,8 @@ extern bool RelationSupportsSysCache(Oid relid);
 
 #define ReleaseSysCacheList(x)	ReleaseCatCacheList(x)
 
+
+extern void InsertSysCache(int cacheId, 
+						   Datum v1, Datum v2, Datum v3, Datum v4,
+						   HeapTuple tuple);
 #endif							/* SYSCACHE_H */
diff --git a/src/test/isolation/expected/inherit-global-temp.out b/src/test/isolation/expected/inherit-global-temp.out
new file mode 100644
index 0000000..6114f8c
--- /dev/null
+++ b/src/test/isolation/expected/inherit-global-temp.out
@@ -0,0 +1,218 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+3              
+4              
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+3              
+4              
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+5              
+6              
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+5              
+6              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_update_p s1_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_update_p: UPDATE inh_global_parent SET a = 11 WHERE a = 1;
+step s1_update_c: UPDATE inh_global_parent SET a = 13 WHERE a IN (3, 5);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+2              
+11             
+4              
+13             
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+4              
+13             
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+2              
+11             
+5              
+6              
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+5              
+6              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_update_c: UPDATE inh_global_parent SET a = 15 WHERE a IN (3, 5);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+3              
+4              
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+3              
+4              
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+6              
+15             
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+6              
+15             
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_delete_p s1_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_delete_p: DELETE FROM inh_global_parent WHERE a = 2;
+step s1_delete_c: DELETE FROM inh_global_parent WHERE a IN (4, 6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+3              
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+3              
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+5              
+6              
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+5              
+6              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_delete_c: DELETE FROM inh_global_parent WHERE a IN (4, 6);
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+3              
+4              
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+3              
+4              
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+1              
+2              
+5              
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+5              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+5              
+6              
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+5              
+6              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s2_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s2_truncate_p: TRUNCATE inh_global_parent;
+step s1_select_p: SELECT a FROM inh_global_parent;
+a              
+
+3              
+4              
+step s1_select_c: SELECT a FROM inh_global_temp_child_s1;
+a              
+
+3              
+4              
+step s2_select_p: SELECT a FROM inh_global_parent;
+a              
+
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2;
+a              
+
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_p s1_commit
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_begin: BEGIN;
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s2_select_p: SELECT a FROM inh_global_parent; <waiting ...>
+step s1_commit: COMMIT;
+step s2_select_p: <... completed>
+a              
+
+5              
+6              
+
+starting permutation: s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_c s1_commit
+step s1_insert_p: INSERT INTO inh_global_parent VALUES (1), (2);
+step s1_insert_c: INSERT INTO inh_global_temp_child_s1 VALUES (3), (4);
+step s2_insert_c: INSERT INTO inh_global_temp_child_s2 VALUES (5), (6);
+step s1_begin: BEGIN;
+step s1_truncate_p: TRUNCATE inh_global_parent;
+step s2_select_c: SELECT a FROM inh_global_temp_child_s2; <waiting ...>
+step s1_commit: COMMIT;
+step s2_select_c: <... completed>
+a              
+
+5              
+6              
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index a2fa192..ef7aa85 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -88,3 +88,4 @@ test: plpgsql-toast
 test: truncate-conflict
 test: serializable-parallel
 test: serializable-parallel-2
+test: inherit-global-temp
diff --git a/src/test/isolation/specs/inherit-global-temp.spec b/src/test/isolation/specs/inherit-global-temp.spec
new file mode 100644
index 0000000..5e95dd6
--- /dev/null
+++ b/src/test/isolation/specs/inherit-global-temp.spec
@@ -0,0 +1,73 @@
+# This is a copy of the inherit-temp test with little changes for global temporary tables.
+#
+
+setup
+{
+  CREATE TABLE inh_global_parent (a int);
+}
+
+teardown
+{
+  DROP TABLE inh_global_parent;
+}
+
+# Session 1 executes actions which act directly on both the parent and
+# its child.  Abbreviation "c" is used for queries working on the child
+# and "p" on the parent.
+session "s1"
+setup
+{
+  CREATE GLOBAL TEMPORARY TABLE inh_global_temp_child_s1 () INHERITS (inh_global_parent);
+}
+step "s1_begin" { BEGIN; }
+step "s1_truncate_p" { TRUNCATE inh_global_parent; }
+step "s1_select_p" { SELECT a FROM inh_global_parent; }
+step "s1_select_c" { SELECT a FROM inh_global_temp_child_s1; }
+step "s1_insert_p" { INSERT INTO inh_global_parent VALUES (1), (2); }
+step "s1_insert_c" { INSERT INTO inh_global_temp_child_s1 VALUES (3), (4); }
+step "s1_update_p" { UPDATE inh_global_parent SET a = 11 WHERE a = 1; }
+step "s1_update_c" { UPDATE inh_global_parent SET a = 13 WHERE a IN (3, 5); }
+step "s1_delete_p" { DELETE FROM inh_global_parent WHERE a = 2; }
+step "s1_delete_c" { DELETE FROM inh_global_parent WHERE a IN (4, 6); }
+step "s1_commit" { COMMIT; }
+teardown
+{
+  DROP TABLE inh_global_temp_child_s1;
+}
+
+# Session 2 executes actions on the parent which act only on the child.
+session "s2"
+setup
+{
+  CREATE GLOBAL TEMPORARY TABLE inh_global_temp_child_s2 () INHERITS (inh_global_parent);
+}
+step "s2_truncate_p" { TRUNCATE inh_global_parent; }
+step "s2_select_p" { SELECT a FROM inh_global_parent; }
+step "s2_select_c" { SELECT a FROM inh_global_temp_child_s2; }
+step "s2_insert_c" { INSERT INTO inh_global_temp_child_s2 VALUES (5), (6); }
+step "s2_update_c" { UPDATE inh_global_parent SET a = 15 WHERE a IN (3, 5); }
+step "s2_delete_c" { DELETE FROM inh_global_parent WHERE a IN (4, 6); }
+teardown
+{
+  DROP TABLE inh_global_temp_child_s2;
+}
+
+# Check INSERT behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check UPDATE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_update_p" "s1_update_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_update_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check DELETE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_delete_p" "s1_delete_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_delete_c" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# Check TRUNCATE behavior across sessions
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_truncate_p" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s2_truncate_p" "s1_select_p" "s1_select_c" "s2_select_p" "s2_select_c"
+
+# TRUNCATE on a parent tree does not block access to temporary child relation
+# of another session, and blocks when scanning the parent.
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_begin" "s1_truncate_p" "s2_select_p" "s1_commit"
+permutation "s1_insert_p" "s1_insert_c" "s2_insert_c" "s1_begin" "s1_truncate_p" "s2_select_c" "s1_commit"
diff --git a/src/test/regress/expected/global_temp.out b/src/test/regress/expected/global_temp.out
new file mode 100644
index 0000000..ae1adb6
--- /dev/null
+++ b/src/test/regress/expected/global_temp.out
@@ -0,0 +1,247 @@
+--
+-- GLOBAL TEMP
+-- Test global temp relations
+--
+-- Test ON COMMIT DELETE ROWS
+CREATE GLOBAL TEMP TABLE global_temptest(col int) ON COMMIT DELETE ROWS;
+BEGIN;
+INSERT INTO global_temptest VALUES (1);
+INSERT INTO global_temptest VALUES (2);
+SELECT * FROM global_temptest;
+ col 
+-----
+   1
+   2
+(2 rows)
+
+COMMIT;
+SELECT * FROM global_temptest;
+ col 
+-----
+(0 rows)
+
+DROP TABLE global_temptest;
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
+SELECT * FROM global_temptest;
+ col 
+-----
+   1
+(1 row)
+
+COMMIT;
+SELECT * FROM global_temptest;
+ col 
+-----
+(0 rows)
+
+DROP TABLE global_temptest;
+-- Test foreign keys
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest1(col int PRIMARY KEY);
+CREATE GLOBAL TEMP TABLE global_temptest2(col int REFERENCES global_temptest1)
+  ON COMMIT DELETE ROWS;
+INSERT INTO global_temptest1 VALUES (1);
+INSERT INTO global_temptest2 VALUES (1);
+COMMIT;
+SELECT * FROM global_temptest1;
+ col 
+-----
+   1
+(1 row)
+
+SELECT * FROM global_temptest2;
+ col 
+-----
+(0 rows)
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temptest4(col int REFERENCES global_temptest3);
+COMMIT;
+ERROR:  unsupported ON COMMIT and foreign key combination
+DETAIL:  Table "global_temptest4" references "global_temptest3", but they do not have the same ON COMMIT setting.
+-- For partitioned temp tables, ON COMMIT actions ignore storage-less
+-- partitioned tables.
+BEGIN;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit (a int)
+  PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit_1
+  PARTITION OF temp_parted_oncommit
+  FOR VALUES IN (1) ON COMMIT DELETE ROWS;
+INSERT INTO temp_parted_oncommit VALUES (1);
+COMMIT;
+-- partitions are emptied by the previous commit
+SELECT * FROM temp_parted_oncommit;
+ a 
+---
+(0 rows)
+
+DROP TABLE temp_parted_oncommit;
+-- Using ON COMMIT DELETE on a partitioned table does not remove
+-- all rows if partitions preserve their data.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test (a int)
+  PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test1
+  PARTITION OF global_temp_parted_oncommit_test
+  FOR VALUES IN (1) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_parted_oncommit_test VALUES (1);
+COMMIT;
+-- Data from the remaining partition is still here as its rows are
+-- preserved.
+SELECT * FROM global_temp_parted_oncommit_test;
+ a 
+---
+ 1
+(1 row)
+
+-- two relations remain in this case.
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_parted_oncommit_test%';
+              relname              
+-----------------------------------
+ global_temp_parted_oncommit_test
+ global_temp_parted_oncommit_test1
+(2 rows)
+
+DROP TABLE global_temp_parted_oncommit_test;
+-- Check dependencies between ON COMMIT actions with inheritance trees.
+-- Data on the parent is removed, and the child goes away.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test1 ()
+  INHERITS(global_temp_inh_oncommit_test) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_inh_oncommit_test1 VALUES (1);
+INSERT INTO global_temp_inh_oncommit_test VALUES (1);
+COMMIT;
+SELECT * FROM global_temp_inh_oncommit_test;
+ a 
+---
+ 1
+(1 row)
+
+-- two relations remain
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_inh_oncommit_test%';
+            relname             
+--------------------------------
+ global_temp_inh_oncommit_test
+ global_temp_inh_oncommit_test1
+(2 rows)
+
+DROP TABLE global_temp_inh_oncommit_test1;
+DROP TABLE global_temp_inh_oncommit_test;
+-- Global temp table cannot inherit from temporary relation
+BEGIN;
+CREATE TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_table1 ()
+  INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+ERROR:  cannot inherit from temporary relation "global_temp_table"
+ROLLBACK;
+-- Temp table can inherit from global temporary relation
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE TEMP TABLE temp_table1 ()
+  INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+CREATE TEMP TABLE temp_table2 ()
+  INHERITS(global_temp_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO global_temp_table VALUES (0);
+SELECT * FROM global_temp_table;
+ a 
+---
+ 0
+ 1
+ 2
+(3 rows)
+
+COMMIT;
+SELECT * FROM global_temp_table;
+ a 
+---
+ 1
+(1 row)
+
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE global_temp_table;
+-- Global temp table can inherit from normal relation
+BEGIN;
+CREATE TABLE normal_table (a int);
+CREATE GLOBAL TEMP TABLE temp_table1 ()
+  INHERITS(normal_table) ON COMMIT PRESERVE ROWS;
+CREATE GLOBAL TEMP TABLE temp_table2 ()
+  INHERITS(normal_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO normal_table VALUES (0);
+SELECT * FROM normal_table;
+ a 
+---
+ 0
+ 1
+ 2
+(3 rows)
+
+COMMIT;
+SELECT * FROM normal_table;
+ a 
+---
+ 0
+ 1
+(2 rows)
+
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE normal_table;
+-- Check SERIAL and BIGSERIAL pseudo-types
+CREATE GLOBAL TEMP TABLE global_temp_table ( aid BIGSERIAL, bid SERIAL );
+CREATE SEQUENCE test_sequence;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+ aid | bid 
+-----+-----
+   1 |   1
+   2 |   2
+   3 |   3
+(3 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval 
+---------
+       1
+(1 row)
+
+\c
+SELECT * FROM global_temp_table;
+ aid | bid 
+-----+-----
+(0 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval 
+---------
+       2
+(1 row)
+
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+ aid | bid 
+-----+-----
+   1 |   1
+   2 |   2
+   3 |   3
+(3 rows)
+
+SELECT NEXTVAL( 'test_sequence' );
+ nextval 
+---------
+       3
+(1 row)
+
+DROP TABLE global_temp_table;
+DROP SEQUENCE test_sequence;
diff --git a/src/test/regress/expected/session_table.out b/src/test/regress/expected/session_table.out
new file mode 100644
index 0000000..1b9b3f4
--- /dev/null
+++ b/src/test/regress/expected/session_table.out
@@ -0,0 +1,64 @@
+create session table my_private_table(x integer primary key, y integer);
+insert into my_private_table values (generate_series(1,10000), generate_series(1,10000));
+select count(*) from my_private_table;
+ count 
+-------
+ 10000
+(1 row)
+
+\c
+select count(*) from my_private_table;
+ count 
+-------
+     0
+(1 row)
+
+select * from my_private_table where x=10001;
+ x | y 
+---+---
+(0 rows)
+
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+create index on my_private_table(y);
+select * from my_private_table where x=10001;
+   x   |   y   
+-------+-------
+ 10001 | 10001
+(1 row)
+
+select * from my_private_table where y=10001;
+   x   |   y   
+-------+-------
+ 10001 | 10001
+(1 row)
+
+select count(*) from my_private_table;
+ count  
+--------
+ 100000
+(1 row)
+
+\c
+select * from my_private_table where x=100001;
+ x | y 
+---+---
+(0 rows)
+
+select * from my_private_table order by y desc limit 1;
+ x | y 
+---+---
+(0 rows)
+
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+select * from my_private_table where x=100001;
+ x | y 
+---+---
+(0 rows)
+
+select * from my_private_table order by y desc limit 1;
+   x    |   y    
+--------+--------
+ 100000 | 100000
+(1 row)
+
+drop table  my_private_table;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index fc0f141..507cf7d 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -107,7 +107,7 @@ test: json jsonb json_encoding jsonpath jsonpath_encoding jsonb_jsonpath
 # NB: temp.sql does a reconnect which transiently uses 2 connections,
 # so keep this parallel group to at most 19 tests
 # ----------
-test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion truncate alter_table sequence polymorphism rowtypes returning largeobject with xml
+test: plancache limit plpgsql copy2 temp global_temp session_table domain rangefuncs prepare conversion truncate alter_table sequence polymorphism rowtypes returning largeobject with xml
 
 # ----------
 # Another group of parallel tests
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 68ac56a..3890777 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -172,6 +172,8 @@ test: limit
 test: plpgsql
 test: copy2
 test: temp
+test: global_temp
+test: session_table
 test: domain
 test: rangefuncs
 test: prepare
diff --git a/src/test/regress/sql/global_temp.sql b/src/test/regress/sql/global_temp.sql
new file mode 100644
index 0000000..3058b9b
--- /dev/null
+++ b/src/test/regress/sql/global_temp.sql
@@ -0,0 +1,151 @@
+--
+-- GLOBAL TEMP
+-- Test global temp relations
+--
+
+-- Test ON COMMIT DELETE ROWS
+
+CREATE GLOBAL TEMP TABLE global_temptest(col int) ON COMMIT DELETE ROWS;
+
+BEGIN;
+INSERT INTO global_temptest VALUES (1);
+INSERT INTO global_temptest VALUES (2);
+
+SELECT * FROM global_temptest;
+COMMIT;
+
+SELECT * FROM global_temptest;
+
+DROP TABLE global_temptest;
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
+
+SELECT * FROM global_temptest;
+COMMIT;
+
+SELECT * FROM global_temptest;
+
+DROP TABLE global_temptest;
+
+-- Test foreign keys
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest1(col int PRIMARY KEY);
+CREATE GLOBAL TEMP TABLE global_temptest2(col int REFERENCES global_temptest1)
+  ON COMMIT DELETE ROWS;
+INSERT INTO global_temptest1 VALUES (1);
+INSERT INTO global_temptest2 VALUES (1);
+COMMIT;
+SELECT * FROM global_temptest1;
+SELECT * FROM global_temptest2;
+
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temptest4(col int REFERENCES global_temptest3);
+COMMIT;
+
+-- For partitioned temp tables, ON COMMIT actions ignore storage-less
+-- partitioned tables.
+BEGIN;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit (a int)
+  PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE temp_parted_oncommit_1
+  PARTITION OF temp_parted_oncommit
+  FOR VALUES IN (1) ON COMMIT DELETE ROWS;
+INSERT INTO temp_parted_oncommit VALUES (1);
+COMMIT;
+-- partitions are emptied by the previous commit
+SELECT * FROM temp_parted_oncommit;
+DROP TABLE temp_parted_oncommit;
+
+-- Using ON COMMIT DELETE on a partitioned table does not remove
+-- all rows if partitions preserve their data.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test (a int)
+  PARTITION BY LIST (a) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_parted_oncommit_test1
+  PARTITION OF global_temp_parted_oncommit_test
+  FOR VALUES IN (1) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_parted_oncommit_test VALUES (1);
+COMMIT;
+-- Data from the remaining partition is still here as its rows are
+-- preserved.
+SELECT * FROM global_temp_parted_oncommit_test;
+-- two relations remain in this case.
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_parted_oncommit_test%';
+DROP TABLE global_temp_parted_oncommit_test;
+
+-- Check dependencies between ON COMMIT actions with inheritance trees.
+-- Data on the parent is removed, and the child goes away.
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_inh_oncommit_test1 ()
+  INHERITS(global_temp_inh_oncommit_test) ON COMMIT PRESERVE ROWS;
+INSERT INTO global_temp_inh_oncommit_test1 VALUES (1);
+INSERT INTO global_temp_inh_oncommit_test VALUES (1);
+COMMIT;
+SELECT * FROM global_temp_inh_oncommit_test;
+-- two relations remain
+SELECT relname FROM pg_class WHERE relname LIKE 'global_temp_inh_oncommit_test%';
+DROP TABLE global_temp_inh_oncommit_test1;
+DROP TABLE global_temp_inh_oncommit_test;
+
+-- Global temp table cannot inherit from temporary relation
+BEGIN;
+CREATE TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE GLOBAL TEMP TABLE global_temp_table1 ()
+  INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+ROLLBACK;
+
+-- Temp table can inherit from global temporary relation
+BEGIN;
+CREATE GLOBAL TEMP TABLE global_temp_table (a int) ON COMMIT DELETE ROWS;
+CREATE TEMP TABLE temp_table1 ()
+  INHERITS(global_temp_table) ON COMMIT PRESERVE ROWS;
+CREATE TEMP TABLE temp_table2 ()
+  INHERITS(global_temp_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO global_temp_table VALUES (0);
+SELECT * FROM global_temp_table;
+COMMIT;
+SELECT * FROM global_temp_table;
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE global_temp_table;
+
+-- Global temp table can inherit from normal relation
+BEGIN;
+CREATE TABLE normal_table (a int);
+CREATE GLOBAL TEMP TABLE temp_table1 ()
+  INHERITS(normal_table) ON COMMIT PRESERVE ROWS;
+CREATE GLOBAL TEMP TABLE temp_table2 ()
+  INHERITS(normal_table) ON COMMIT DELETE ROWS;
+INSERT INTO temp_table2 VALUES (2);
+INSERT INTO temp_table1 VALUES (1);
+INSERT INTO normal_table VALUES (0);
+SELECT * FROM normal_table;
+COMMIT;
+SELECT * FROM normal_table;
+DROP TABLE temp_table2;
+DROP TABLE temp_table1;
+DROP TABLE normal_table;
+
+-- Check SERIAL and BIGSERIAL pseudo-types
+CREATE GLOBAL TEMP TABLE global_temp_table ( aid BIGSERIAL, bid SERIAL );
+CREATE SEQUENCE test_sequence;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+\c
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+INSERT INTO global_temp_table DEFAULT VALUES;
+SELECT * FROM global_temp_table;
+SELECT NEXTVAL( 'test_sequence' );
+DROP TABLE global_temp_table;
+DROP SEQUENCE test_sequence;
diff --git a/src/test/regress/sql/session_table.sql b/src/test/regress/sql/session_table.sql
new file mode 100644
index 0000000..c6663dc
--- /dev/null
+++ b/src/test/regress/sql/session_table.sql
@@ -0,0 +1,18 @@
+create session table my_private_table(x integer primary key, y integer);
+insert into my_private_table values (generate_series(1,10000), generate_series(1,10000));
+select count(*) from my_private_table;
+\c
+select count(*) from my_private_table;
+select * from my_private_table where x=10001;
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+create index on my_private_table(y);
+select * from my_private_table where x=10001;
+select * from my_private_table where y=10001;
+select count(*) from my_private_table;
+\c
+select * from my_private_table where x=100001;
+select * from my_private_table order by y desc limit 1;
+insert into my_private_table values (generate_series(1,100000), generate_series(1,100000));
+select * from my_private_table where x=100001;
+select * from my_private_table order by y desc limit 1;
+drop table  my_private_table;

Reply via email to