On Tue, Nov 24, 2020 at 4:43 PM Hou, Zhijie <houzj.f...@cn.fujitsu.com> wrote:
>
> I'm very interested in this feature,
> and I'm looking at the patch, here are some comments.
>

Thanks for the review.

>
> How about the following style:
>
>                 if(TupIsNull(outerTupleSlot))
>                         Break;
>
>                 (void) node->ps.dest->receiveSlot(outerTupleSlot, 
> node->ps.dest);
>                 node->ps.state->es_processed++;
>
> Which looks cleaner.
>

Done.

>
> The check can be replaced by ISCTAS(into).
>

Done.

>
> 'inerst' looks like a typo (insert).
>

Corrected.

>
> The code here call strlen(intoclausestr) for two times,
> After checking the existing code in ExecInitParallelPlan,
> It used to store the strlen in a variable.
>
> So how about the following style:
>
>         intoclause_len = strlen(intoclausestr);
>         ...
>         /* Store serialized intoclause. */
>         intoclause_space = shm_toc_allocate(pcxt->toc, intoclause_len + 1);
>         memcpy(shmptr, intoclausestr, intoclause_len + 1);
>         shm_toc_insert(pcxt->toc, PARALLEL_KEY_INTO_CLAUSE, intoclause_space);
>

Done.

>
> The two check about intoclausestr seems can be combined like:
>
> if (intoclausestr != NULL)
> {
> ...
> }
> else
> {
> ...
> }
>

Done.

Attaching v5 patch. Please consider it for further review.

With Regards,
Bharath Rupireddy.
EnterpriseDB: http://www.enterprisedb.com
From 21fbbb8d4297e6daa7a7ec696a36327f592089bd Mon Sep 17 00:00:00 2001
From: Bharath Rupireddy <bharath.rupireddy@enterprisedb.com>
Date: Wed, 25 Nov 2020 04:44:29 +0530
Subject: [PATCH v5] Parallel Inserts in CREATE TABLE AS

The idea of this patch is to allow the leader and each worker
insert the tuples in parallel if the SELECT part of the CTAS is
parallelizable.

The design:
Let the planner know that the SELECT is from CTAS in createas.c
so that it can set the number of tuples transferred from the
workers to Gather node to 0. With this change, there are chances
that the planner may choose the parallel plan. After the planning,
check if the upper plan node is Gather in createas.c and mark a
parallelism flag in the CTAS dest receiver. Pass the into clause,
object id, command id from the leader to workers, so that each
worker can create its own CTAS dest receiver. Leader inserts it's
share of tuples if instructed to do, and so are workers. Each
worker writes atomically it's number of inserted tuples into a
shared memory variable, the leader combines this with it's own
number of inserted tuples and shares to the client.
---
 src/backend/access/heap/heapam.c             |  11 -
 src/backend/access/transam/xact.c            |  30 +-
 src/backend/commands/createas.c              | 313 ++++++++++++-------
 src/backend/commands/explain.c               |  36 +++
 src/backend/executor/execMain.c              |  18 ++
 src/backend/executor/execParallel.c          |  68 +++-
 src/backend/executor/nodeGather.c            |  99 +++++-
 src/backend/optimizer/path/costsize.c        |  12 +-
 src/include/access/xact.h                    |   1 +
 src/include/commands/createas.h              |  22 ++
 src/include/executor/execParallel.h          |   2 +
 src/include/nodes/execnodes.h                |   5 +
 src/include/nodes/parsenodes.h               |   1 +
 src/test/regress/expected/write_parallel.out | 143 +++++++++
 src/test/regress/sql/write_parallel.sql      |  65 ++++
 15 files changed, 688 insertions(+), 138 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 1b2f70499e..3045c0f046 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2043,17 +2043,6 @@ static HeapTuple
 heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
 					CommandId cid, int options)
 {
-	/*
-	 * To allow parallel inserts, we need to ensure that they are safe to be
-	 * performed in workers. We have the infrastructure to allow parallel
-	 * inserts in general except for the cases where inserts generate a new
-	 * CommandId (eg. inserts into a table having a foreign key column).
-	 */
-	if (IsParallelWorker())
-		ereport(ERROR,
-				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot insert tuples in a parallel worker")));
-
 	tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
 	tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
 	tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 9cd0b7c11b..db6eedd635 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -763,18 +763,34 @@ GetCurrentCommandId(bool used)
 	/* this is global to a transaction, not subtransaction-local */
 	if (used)
 	{
-		/*
-		 * Forbid setting currentCommandIdUsed in a parallel worker, because
-		 * we have no provision for communicating this back to the leader.  We
-		 * could relax this restriction when currentCommandIdUsed was already
-		 * true at the start of the parallel operation.
-		 */
-		Assert(!IsParallelWorker());
+		 /*
+		  * This is a temporary hack for all common parallel insert cases i.e.
+		  * insert into, ctas, copy from. To be changed later. In a parallel
+		  * worker, set currentCommandIdUsed to true only if it was not set to
+		  * true at the start of the parallel operation (by way of
+		  * SetCurrentCommandIdUsedForWorker()). We have to do this because
+		  * GetCurrentCommandId(true) may be called from anywhere, especially
+		  * for parallel inserts, within parallel worker.
+		  */
+		Assert(!(IsParallelWorker() && !currentCommandIdUsed));
 		currentCommandIdUsed = true;
 	}
 	return currentCommandId;
 }
 
+/*
+ *	SetCurrentCommandIdUsedForWorker
+ *
+ * For a parallel worker, record that the currentCommandId has been used. This
+ * must only be called at the start of a parallel operation.
+ */
+void
+SetCurrentCommandIdUsedForWorker(void)
+{
+	Assert(IsParallelWorker() && !currentCommandIdUsed && currentCommandId != InvalidCommandId);
+	currentCommandIdUsed = true;
+}
+
 /*
  *	SetParallelStartTimestamps
  *
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index 6bf6c5a310..afab904c6b 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -51,18 +51,6 @@
 #include "utils/rls.h"
 #include "utils/snapmgr.h"
 
-typedef struct
-{
-	DestReceiver pub;			/* publicly-known function pointers */
-	IntoClause *into;			/* target relation specification */
-	/* These fields are filled by intorel_startup: */
-	Relation	rel;			/* relation to write to */
-	ObjectAddress reladdr;		/* address of rel, for ExecCreateTableAs */
-	CommandId	output_cid;		/* cmin to insert in output tuples */
-	int			ti_options;		/* table_tuple_insert performance options */
-	BulkInsertState bistate;	/* bulk insert state */
-} DR_intorel;
-
 /* utility functions for CTAS definition creation */
 static ObjectAddress create_ctas_internal(List *attrList, IntoClause *into);
 static ObjectAddress create_ctas_nodata(List *tlist, IntoClause *into);
@@ -328,10 +316,27 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt,
 		query = linitial_node(Query, rewritten);
 		Assert(query->commandType == CMD_SELECT);
 
+		/*
+		 * Flag to let the planner know that the SELECT query is for CTAS. This
+		 * is used to calculate the tuple transfer cost from workers to gather
+		 * node(in case parallelism kicks in for the SELECT part of the CTAS),
+		 * to zero as each worker will parallelly insert its share of tuples.
+		 */
+		if (IsParallelInsertInCTASAllowed(into, NULL))
+			query->isForCTAS = true;
+
 		/* plan the query */
 		plan = pg_plan_query(query, pstate->p_sourcetext,
 							 CURSOR_OPT_PARALLEL_OK, params);
 
+		/*
+		 * SELECT part of the CTAS is parallelizable, so we can make each
+		 * parallel worker insert the tuples that are resulted in it's
+		 * execution into the target table.
+		 */
+		if (IsParallelInsertInCTASAllowed(into, plan))
+			((DR_intorel *) dest)->is_parallel = true;
+
 		/*
 		 * Use a snapshot with an updated command ID to ensure this query sees
 		 * results of any previously executed queries.  (This could only
@@ -418,6 +423,9 @@ CreateIntoRelDestReceiver(IntoClause *intoClause)
 	self->pub.rDestroy = intorel_destroy;
 	self->pub.mydest = DestIntoRel;
 	self->into = intoClause;
+	self->is_parallel = false;
+	self->is_parallel_worker = false;
+	self->object_id = InvalidOid;
 	/* other private fields will be set during intorel_startup */
 
 	return (DestReceiver *) self;
@@ -430,121 +438,169 @@ static void
 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
 {
 	DR_intorel *myState = (DR_intorel *) self;
-	IntoClause *into = myState->into;
-	bool		is_matview;
-	List	   *attrList;
 	ObjectAddress intoRelationAddr;
 	Relation	intoRelationDesc;
-	ListCell   *lc;
-	int			attnum;
-
-	Assert(into != NULL);		/* else somebody forgot to set it */
 
-	/* This code supports both CREATE TABLE AS and CREATE MATERIALIZED VIEW */
-	is_matview = (into->viewQuery != NULL);
+	if (myState->is_parallel_worker)
+	{
+		/* In the worker */
+		intoRelationDesc = table_open(myState->object_id, AccessExclusiveLock);
+		myState->rel = intoRelationDesc;
+		myState->reladdr = InvalidObjectAddress;
+		myState->ti_options = 0;
+		myState->bistate = GetBulkInsertState();
 
-	/*
-	 * Build column definitions using "pre-cooked" type and collation info. If
-	 * a column name list was specified in CREATE TABLE AS, override the
-	 * column names derived from the query.  (Too few column names are OK, too
-	 * many are not.)
-	 */
-	attrList = NIL;
-	lc = list_head(into->colNames);
-	for (attnum = 0; attnum < typeinfo->natts; attnum++)
+		/*
+		 * Right after the table is created in the leader, the command id is
+		 * incremented (in create_ctas_internal()). The new command id is
+		 * marked as used in inintorel_startup(), then the parallel mode is
+		 * entered. The command id and transaction id are serialized into
+		 * parallel DSM, they are then available to all parallel workers. All
+		 * the workers need to mark the command id as used before insertion.
+		 */
+		(void) SetCurrentCommandIdUsedForWorker();
+		myState->output_cid = GetCurrentCommandId(false);
+	}
+	else
 	{
-		Form_pg_attribute attribute = TupleDescAttr(typeinfo, attnum);
-		ColumnDef  *col;
-		char	   *colname;
+		IntoClause *into = myState->into;
+		bool		is_matview;
+		List	   *attrList;
+		ListCell   *lc;
+		int			attnum;
 
-		if (lc)
+		Assert(into != NULL);		/* else somebody forgot to set it */
+
+		/*
+		 * This code supports both CREATE TABLE AS and CREATE MATERIALIZED
+		 * VIEW.
+		 */
+		is_matview = (into->viewQuery != NULL);
+
+		/*
+		 * Build column definitions using "pre-cooked" type and collation info.
+		 * If a column name list was specified in CREATE TABLE AS, override the
+		 * column names derived from the query.  (Too few column names are OK,
+		 * too many are not.)
+		 */
+		attrList = NIL;
+		lc = list_head(into->colNames);
+		for (attnum = 0; attnum < typeinfo->natts; attnum++)
 		{
-			colname = strVal(lfirst(lc));
-			lc = lnext(into->colNames, lc);
+			Form_pg_attribute attribute = TupleDescAttr(typeinfo, attnum);
+			ColumnDef  *col;
+			char	   *colname;
+
+			if (lc)
+			{
+				colname = strVal(lfirst(lc));
+				lc = lnext(into->colNames, lc);
+			}
+			else
+				colname = NameStr(attribute->attname);
+
+			col = makeColumnDef(colname,
+								attribute->atttypid,
+								attribute->atttypmod,
+								attribute->attcollation);
+
+			/*
+			 * It's possible that the column is of a collatable type but the
+			 * collation could not be resolved, so double-check.  (We must
+			 * check this here because DefineRelation would adopt the type's
+			 * default collation rather than complaining.)
+			 */
+			if (!OidIsValid(col->collOid) &&
+				type_is_collatable(col->typeName->typeOid))
+				ereport(ERROR,
+						(errcode(ERRCODE_INDETERMINATE_COLLATION),
+						errmsg("no collation was derived for column \"%s\" with collatable type %s",
+								col->colname,
+								format_type_be(col->typeName->typeOid)),
+						errhint("Use the COLLATE clause to set the collation explicitly.")));
+
+			attrList = lappend(attrList, col);
 		}
-		else
-			colname = NameStr(attribute->attname);
 
-		col = makeColumnDef(colname,
-							attribute->atttypid,
-							attribute->atttypmod,
-							attribute->attcollation);
+		if (lc != NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_SYNTAX_ERROR),
+					errmsg("too many column names were specified")));
 
 		/*
-		 * It's possible that the column is of a collatable type but the
-		 * collation could not be resolved, so double-check.  (We must check
-		 * this here because DefineRelation would adopt the type's default
-		 * collation rather than complaining.)
+		 * Actually create the target table
 		 */
-		if (!OidIsValid(col->collOid) &&
-			type_is_collatable(col->typeName->typeOid))
-			ereport(ERROR,
-					(errcode(ERRCODE_INDETERMINATE_COLLATION),
-					 errmsg("no collation was derived for column \"%s\" with collatable type %s",
-							col->colname,
-							format_type_be(col->typeName->typeOid)),
-					 errhint("Use the COLLATE clause to set the collation explicitly.")));
+		intoRelationAddr = create_ctas_internal(attrList, into);
 
-		attrList = lappend(attrList, col);
-	}
+		/*
+		 * Finally we can open the target table
+		 */
+		intoRelationDesc = table_open(intoRelationAddr.objectId, AccessExclusiveLock);
 
-	if (lc != NULL)
-		ereport(ERROR,
-				(errcode(ERRCODE_SYNTAX_ERROR),
-				 errmsg("too many column names were specified")));
+		/*
+		 * Make sure the constructed table does not have RLS enabled.
+		 *
+		 * check_enable_rls() will ereport(ERROR) itself if the user has
+		 * requested something invalid, and otherwise will return RLS_ENABLED
+		 * if RLS should be enabled here.  We don't actually support that
+		 * currently, so throw our own ereport(ERROR) if that happens.
+		 */
+		if (check_enable_rls(intoRelationAddr.objectId, InvalidOid, false) == RLS_ENABLED)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					errmsg("policies not yet implemented for this command")));
 
-	/*
-	 * Actually create the target table
-	 */
-	intoRelationAddr = create_ctas_internal(attrList, into);
+		/*
+		 * Tentatively mark the target as populated, if it's a matview and
+		 * we're going to fill it; otherwise, no change needed.
+		 */
+		if (is_matview && !into->skipData)
+			SetMatViewPopulatedState(intoRelationDesc, true);
 
-	/*
-	 * Finally we can open the target table
-	 */
-	intoRelationDesc = table_open(intoRelationAddr.objectId, AccessExclusiveLock);
+		/*
+		 * Fill private fields of myState for use by later routines
+		 */
+		myState->rel = intoRelationDesc;
+		myState->reladdr = intoRelationAddr;
+		myState->output_cid = GetCurrentCommandId(true);
+		myState->ti_options = TABLE_INSERT_SKIP_FSM;
 
-	/*
-	 * Make sure the constructed table does not have RLS enabled.
-	 *
-	 * check_enable_rls() will ereport(ERROR) itself if the user has requested
-	 * something invalid, and otherwise will return RLS_ENABLED if RLS should
-	 * be enabled here.  We don't actually support that currently, so throw
-	 * our own ereport(ERROR) if that happens.
-	 */
-	if (check_enable_rls(intoRelationAddr.objectId, InvalidOid, false) == RLS_ENABLED)
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("policies not yet implemented for this command")));
+		/*
+		 * If WITH NO DATA is specified, there is no need to set up the state
+		 * for bulk inserts as there are no tuples to insert.
+		 */
+		if (!into->skipData)
+			myState->bistate = GetBulkInsertState();
+		else
+			myState->bistate = NULL;
 
-	/*
-	 * Tentatively mark the target as populated, if it's a matview and we're
-	 * going to fill it; otherwise, no change needed.
-	 */
-	if (is_matview && !into->skipData)
-		SetMatViewPopulatedState(intoRelationDesc, true);
+		if (myState->is_parallel == true)
+		{
+			myState->object_id = intoRelationAddr.objectId;
 
-	/*
-	 * Fill private fields of myState for use by later routines
-	 */
-	myState->rel = intoRelationDesc;
-	myState->reladdr = intoRelationAddr;
-	myState->output_cid = GetCurrentCommandId(true);
-	myState->ti_options = TABLE_INSERT_SKIP_FSM;
+			/*
+			 * We don't need to skip contacting FSM while inserting tuples
+			 * for parallel mode, while extending the relations, workers
+			 * instead of blocking on a page while another worker is inserting,
+			 * can check the FSM for another page that can accommodate the
+			 * tuples. This results in major benefit for parallel inserts.
+			 */
+			myState->ti_options = 0;
 
-	/*
-	 * If WITH NO DATA is specified, there is no need to set up the state for
-	 * bulk inserts as there are no tuples to insert.
-	 */
-	if (!into->skipData)
-		myState->bistate = GetBulkInsertState();
-	else
-		myState->bistate = NULL;
+			/*
+			 * rd_createSubid is marked invalid, otherwise, the table is
+			 * not allowed to extend by the workers.
+			 */
+			myState->rel->rd_createSubid = InvalidSubTransactionId;
+		}
 
-	/*
-	 * Valid smgr_targblock implies something already wrote to the relation.
-	 * This may be harmless, but this function hasn't planned for it.
-	 */
-	Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
+		/*
+		 * Valid smgr_targblock implies something already wrote to the
+		 * relation. This may be harmless, but this function hasn't planned for
+		 * it.
+		 */
+		Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
+	}
 }
 
 /*
@@ -606,3 +662,48 @@ intorel_destroy(DestReceiver *self)
 {
 	pfree(self);
 }
+
+/*
+ * IsParallelInsertInCTASAllowed --- determine whether or not parallel
+ * insertion is possible.
+ */
+bool IsParallelInsertInCTASAllowed(IntoClause *into, PlannedStmt *plannedstmt)
+{
+	bool allowed = false;
+
+	if (ISCTAS(into))
+	{
+		if (into->rel != NULL &&
+			into->rel->relpersistence != RELPERSISTENCE_TEMP)
+			allowed = true;
+
+		if (plannedstmt != NULL && allowed)
+		{
+			/*
+			 * We allow parallel inserts by the workers only if the upper node
+			 * is Gather. We can not let workers do parallel inserts when
+			 * GatherMerge node is involved as the leader backend does the
+			 * final phase(merge the results by workers).
+			 */
+			if (plannedstmt->parallelModeNeeded &&
+				plannedstmt->planTree != NULL &&
+				IsA(plannedstmt->planTree, Gather) &&
+				plannedstmt->planTree->lefttree != NULL &&
+				plannedstmt->planTree->lefttree->parallel_aware &&
+				plannedstmt->planTree->lefttree->parallel_safe)
+			{
+				/*
+				 * Since there are no rows that are transferred from workers to
+				 * Gather node, so we set it to 0 to be visible in explain
+				 * plans. Note that we would have accounted this for cost
+				 * calculations in cost_gather().
+				 */
+				plannedstmt->planTree->plan_rows = 0;
+			}
+			else
+				allowed = false;
+		}
+	}
+
+	return allowed;
+}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 43f9b01e83..bb01c8fc53 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -371,6 +371,15 @@ ExplainOneQuery(Query *query, int cursorOptions,
 		return;
 	}
 
+	/*
+	 * Flag to let the planner know that the SELECT query is for CTAS. This is
+	 * used to calculate the tuple transfer cost from workers to gather node(in
+	 * case parallelism kicks in for the SELECT part of the CTAS), to zero as
+	 * each worker will parallelly insert its share of tuples.
+	 */
+	if (IsParallelInsertInCTASAllowed(into, NULL))
+		query->isForCTAS = true;
+
 	/* if an advisor plugin is present, let it manage things */
 	if (ExplainOneQuery_hook)
 		(*ExplainOneQuery_hook) (query, cursorOptions, into, es,
@@ -536,7 +545,17 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	 * AS, we'd better use the appropriate tuple receiver.
 	 */
 	if (into)
+	{
 		dest = CreateIntoRelDestReceiver(into);
+
+		/*
+		 * SELECT part of the CTAS is parallelizable, so we can make each
+		 * parallel worker insert the tuples that are resulted in it's
+		 * execution into the target table.
+		 */
+		if (IsParallelInsertInCTASAllowed(into, plannedstmt))
+			((DR_intorel *) dest)->is_parallel = true;
+	}
 	else
 		dest = None_Receiver;
 
@@ -1753,6 +1772,23 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				Gather	   *gather = (Gather *) plan;
 
+				if (IsA(planstate, GatherState) &&
+					planstate->intoclause != NULL &&
+					IsA(planstate->intoclause,IntoClause) &&
+					planstate->dest != NULL &&
+					planstate->dest->mydest == DestIntoRel &&
+					((DR_intorel *) planstate->dest)->is_parallel == true &&
+					planstate->intoclause->rel != NULL &&
+					planstate->intoclause->rel->relname != NULL)
+				{
+					ExplainIndentText(es);
+					appendStringInfoString(es->str, "->  ");
+					appendStringInfoString(es->str, "Create ");
+					appendStringInfo(es->str, "%s\n", planstate->intoclause->rel->relname);
+					es->indent++;
+					ExplainIndentText(es);
+				}
+
 				show_scan_qual(plan->qual, "Filter", planstate, ancestors, es);
 				if (plan->qual)
 					show_instrumentation_count("Rows Removed by Filter", 1,
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7179f589f9..e4efa3ac76 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -45,6 +45,7 @@
 #include "access/xact.h"
 #include "catalog/namespace.h"
 #include "catalog/pg_publication.h"
+#include "commands/createas.h"
 #include "commands/matview.h"
 #include "commands/trigger.h"
 #include "executor/execdebug.h"
@@ -352,6 +353,23 @@ standard_ExecutorRun(QueryDesc *queryDesc,
 	if (sendTuples)
 		dest->rStartup(dest, operation, queryDesc->tupDesc);
 
+	/*
+	 * For parallelizing inserts in CTAS i.e. making each parallel worker
+	 * insert the tuples, we must send information such as intoclause(for each
+	 * worker to build separate dest receiver), object id(for each worker to
+	 * open the table).
+	 */
+	if (queryDesc->plannedstmt->parallelModeNeeded == true &&
+		dest != NULL &&
+		dest->mydest == DestIntoRel &&
+		((DR_intorel *) dest)->is_parallel == true &&
+		((DR_intorel *) dest)->is_parallel_worker != true)
+	{
+		queryDesc->planstate->intoclause = ((DR_intorel *) dest)->into;
+		queryDesc->planstate->objectid = ((DR_intorel *) dest)->object_id;
+		queryDesc->planstate->dest = dest;
+	}
+
 	/*
 	 * run plan
 	 */
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index befde52691..442c633232 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -23,6 +23,7 @@
 
 #include "postgres.h"
 
+#include "commands/createas.h"
 #include "executor/execParallel.h"
 #include "executor/executor.h"
 #include "executor/nodeAgg.h"
@@ -65,6 +66,7 @@
 #define PARALLEL_KEY_QUERY_TEXT		UINT64CONST(0xE000000000000008)
 #define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009)
 #define PARALLEL_KEY_WAL_USAGE			UINT64CONST(0xE00000000000000A)
+#define PARALLEL_KEY_INTO_CLAUSE		UINT64CONST(0xE00000000000000B)
 
 #define PARALLEL_TUPLE_QUEUE_SIZE		65536
 
@@ -77,6 +79,9 @@ typedef struct FixedParallelExecutorState
 	dsa_pointer param_exec;
 	int			eflags;
 	int			jit_flags;
+	Oid			objectid;		/* workers to open relation/table.  */
+	/* number tuples inserted by all the workers. */
+	pg_atomic_uint64	processed;
 } FixedParallelExecutorState;
 
 /*
@@ -600,6 +605,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate,
 	Size		dsa_minsize = dsa_minimum_size();
 	char	   *query_string;
 	int			query_len;
+	char 		*intoclausestr = NULL;
+	int			intoclause_len = 0;
 
 	/*
 	 * Force any initplan outputs that we're going to pass to workers to be
@@ -712,6 +719,15 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate,
 	shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
 	shm_toc_estimate_keys(&pcxt->estimator, 1);
 
+	/* Estimate space for into clause for CTAS. */
+	if (ISCTAS(planstate->intoclause))
+	{
+		intoclausestr = nodeToString(planstate->intoclause);
+		intoclause_len = strlen(intoclausestr);
+		shm_toc_estimate_chunk(&pcxt->estimator, intoclause_len + 1);
+		shm_toc_estimate_keys(&pcxt->estimator, 1);
+	}
+
 	/* Everyone's had a chance to ask for space, so now create the DSM. */
 	InitializeParallelDSM(pcxt);
 
@@ -729,6 +745,15 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate,
 	fpes->param_exec = InvalidDsaPointer;
 	fpes->eflags = estate->es_top_eflags;
 	fpes->jit_flags = estate->es_jit_flags;
+	pg_atomic_init_u64(&fpes->processed, 0);
+	pei->processed = &fpes->processed;
+
+	if (intoclausestr != NULL &&
+		planstate->objectid != InvalidOid)
+		fpes->objectid = planstate->objectid;
+	else
+		fpes->objectid = InvalidOid;
+
 	shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, fpes);
 
 	/* Store query string */
@@ -758,8 +783,18 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate,
 	shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage_space);
 	pei->wal_usage = walusage_space;
 
-	/* Set up the tuple queues that the workers will write into. */
-	pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
+	if (intoclausestr != NULL)
+	{
+		char *intoclause_space = shm_toc_allocate(pcxt->toc,
+												  intoclause_len + 1);
+		memcpy(intoclause_space, intoclausestr, intoclause_len + 1);
+		shm_toc_insert(pcxt->toc, PARALLEL_KEY_INTO_CLAUSE, intoclause_space);
+	}
+	else
+	{
+		/* Set up the tuple queues that the workers will write into. */
+		pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
+	}
 
 	/* We don't need the TupleQueueReaders yet, though. */
 	pei->reader = NULL;
@@ -1387,12 +1422,30 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
 	void	   *area_space;
 	dsa_area   *area;
 	ParallelWorkerContext pwcxt;
+	char		*intoclausestr = NULL;
+	IntoClause	*intoclause = NULL;
 
 	/* Get fixed-size state. */
 	fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
 
-	/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
-	receiver = ExecParallelGetReceiver(seg, toc);
+	intoclausestr = shm_toc_lookup(toc, PARALLEL_KEY_INTO_CLAUSE, true);
+	if (intoclausestr != NULL)
+	{
+		/*
+		 * If the worker is for parallel insert in CTAS, then use the proper
+		 * dest receiver.
+		 */
+		intoclause = (IntoClause *) stringToNode(intoclausestr);
+		receiver = CreateIntoRelDestReceiver(intoclause);
+		((DR_intorel *)receiver)->is_parallel_worker = true;
+		((DR_intorel *)receiver)->object_id = fpes->objectid;
+	}
+	else
+	{
+		/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
+		receiver = ExecParallelGetReceiver(seg, toc);
+	}
+
 	instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
 	if (instrumentation != NULL)
 		instrument_options = instrumentation->instrument_options;
@@ -1471,6 +1524,13 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
 			queryDesc->estate->es_jit->instr;
 	}
 
+	/*
+	 * Write out the number of tuples this worker has inserted. Leader will use
+	 * it to inform to the end client.
+	 */
+	if (intoclausestr != NULL)
+		pg_atomic_add_fetch_u64(&fpes->processed, queryDesc->estate->es_processed);
+
 	/* Must do this after capturing instrumentation. */
 	ExecutorEnd(queryDesc);
 
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index a01b46af14..93d0d95704 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -32,6 +32,7 @@
 
 #include "access/relscan.h"
 #include "access/xact.h"
+#include "commands/createas.h"
 #include "executor/execdebug.h"
 #include "executor/execParallel.h"
 #include "executor/nodeGather.h"
@@ -48,7 +49,7 @@ static TupleTableSlot *ExecGather(PlanState *pstate);
 static TupleTableSlot *gather_getnext(GatherState *gatherstate);
 static MinimalTuple gather_readnext(GatherState *gatherstate);
 static void ExecShutdownGatherWorkers(GatherState *node);
-
+static void ExecParallelInsertInCTAS(GatherState *node);
 
 /* ----------------------------------------------------------------
  *		ExecInitGather
@@ -131,6 +132,67 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
 	return gatherstate;
 }
 
+/* ----------------------------------------------------------------
+ *		ExecParallelInsertInCTAS(node)
+ *
+ *		Facilitates parallel inserts by parallel workers and/or
+ *		leader for Create Table AS.
+ * ----------------------------------------------------------------
+ */
+static void
+ExecParallelInsertInCTAS(GatherState *node)
+{
+	/* Enable leader to insert in case no parallel workers were launched. */
+	if (node->nworkers_launched == 0 &&
+		!node->need_to_scan_locally)
+		node->need_to_scan_locally = true;
+	/*
+	 * By now, for parallel workers (if launched any), would have started their
+	 * work i.e. insertion to target table. In case the leader is chosen to
+	 * participate for parallel inserts in CTAS, then finish it's share before
+	 * going to wait for the parallel workers to finish.
+	 */
+	if (node->need_to_scan_locally == true &&
+		node->ps.dest != NULL &&
+		node->ps.dest->mydest == DestIntoRel)
+	{
+		EState	   *estate = node->ps.state;
+		TupleTableSlot *outerTupleSlot;
+
+		for(;;)
+		{
+			/* Install our DSA area while executing the plan. */
+			estate->es_query_dsa =
+					node->pei ? node->pei->area : NULL;
+
+			outerTupleSlot = ExecProcNode(node->ps.lefttree);
+
+			estate->es_query_dsa = NULL;
+
+			if(TupIsNull(outerTupleSlot))
+				break;
+
+			(void) node->ps.dest->receiveSlot(outerTupleSlot, node->ps.dest);
+
+			node->ps.state->es_processed++;
+		}
+
+		node->need_to_scan_locally = false;
+	}
+
+	/* Wait for the parallel workers to finish. */
+	if (node->nworkers_launched > 0)
+	{
+		ExecShutdownGatherWorkers(node);
+
+		/*
+		 * Add up the total tuples inserted by all workers, to the tuples
+		 * inserted by the leader(if any). This will be shared to client.
+		 */
+		node->ps.state->es_processed += pg_atomic_read_u64(node->pei->processed);
+	}
+}
+
 /* ----------------------------------------------------------------
  *		ExecGather(node)
  *
@@ -166,6 +228,16 @@ ExecGather(PlanState *pstate)
 		{
 			ParallelContext *pcxt;
 
+			/*
+			 * Take the necessary information to be passed to workers for
+			 * parallel inserts in CTAS.
+			 */
+			if (ISCTAS(node->ps.intoclause))
+			{
+				node->ps.lefttree->intoclause = node->ps.intoclause;
+				node->ps.lefttree->objectid = node->ps.objectid;
+			}
+
 			/* Initialize, or re-initialize, shared state needed by workers. */
 			if (!node->pei)
 				node->pei = ExecInitParallelPlan(node->ps.lefttree,
@@ -190,13 +262,16 @@ ExecGather(PlanState *pstate)
 			/* Set up tuple queue readers to read the results. */
 			if (pcxt->nworkers_launched > 0)
 			{
-				ExecParallelCreateReaders(node->pei);
-				/* Make a working array showing the active readers */
-				node->nreaders = pcxt->nworkers_launched;
-				node->reader = (TupleQueueReader **)
-					palloc(node->nreaders * sizeof(TupleQueueReader *));
-				memcpy(node->reader, node->pei->reader,
-					   node->nreaders * sizeof(TupleQueueReader *));
+				if (!(ISCTAS(node->ps.intoclause)))
+				{
+					ExecParallelCreateReaders(node->pei);
+					/* Make a working array showing the active readers */
+					node->nreaders = pcxt->nworkers_launched;
+					node->reader = (TupleQueueReader **)
+						palloc(node->nreaders * sizeof(TupleQueueReader *));
+					memcpy(node->reader, node->pei->reader,
+						node->nreaders * sizeof(TupleQueueReader *));
+				}
 			}
 			else
 			{
@@ -208,7 +283,8 @@ ExecGather(PlanState *pstate)
 		}
 
 		/* Run plan locally if no workers or enabled and not single-copy. */
-		node->need_to_scan_locally = (node->nreaders == 0)
+		node->need_to_scan_locally = (node->nreaders == 0 &&
+			!(ISCTAS(node->ps.intoclause)))
 			|| (!gather->single_copy && parallel_leader_participation);
 		node->initialized = true;
 	}
@@ -220,6 +296,11 @@ ExecGather(PlanState *pstate)
 	econtext = node->ps.ps_ExprContext;
 	ResetExprContext(econtext);
 
+	if (ISCTAS(node->ps.intoclause))
+	{
+		ExecParallelInsertInCTAS(node);
+		return NULL;
+	}
 	/*
 	 * Get next tuple, either from one of our workers, or by running the plan
 	 * ourselves.
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 22d6935824..4f03db31c7 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -393,7 +393,17 @@ cost_gather(GatherPath *path, PlannerInfo *root,
 
 	/* Parallel setup and communication cost. */
 	startup_cost += parallel_setup_cost;
-	run_cost += parallel_tuple_cost * path->path.rows;
+
+	/*
+	 * Make the number of tuples that are transferred from workers to gather
+	 * node zero as each worker parallelly insert the tuples that are resulted
+	 * from its chunk of plan execution. This change may make the parallel
+	 * plan cheap among all other plans, and influence the planner to consider
+	 * this parallel plan.
+	 */
+	if (!(root->parse->isForCTAS &&
+		root->query_level == 1))
+		run_cost += parallel_tuple_cost * path->path.rows;
 
 	path->path.startup_cost = startup_cost;
 	path->path.total_cost = (startup_cost + run_cost);
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index 7320de345c..5beae6c617 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -389,6 +389,7 @@ extern FullTransactionId GetCurrentFullTransactionIdIfAny(void);
 extern void MarkCurrentTransactionIdLoggedIfAny(void);
 extern bool SubTransactionIsActive(SubTransactionId subxid);
 extern CommandId GetCurrentCommandId(bool used);
+extern void SetCurrentCommandIdUsedForWorker(void);
 extern void SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts);
 extern TimestampTz GetCurrentTransactionStartTimestamp(void);
 extern TimestampTz GetCurrentStatementStartTimestamp(void);
diff --git a/src/include/commands/createas.h b/src/include/commands/createas.h
index 7629230254..9271e84e4d 100644
--- a/src/include/commands/createas.h
+++ b/src/include/commands/createas.h
@@ -14,12 +14,31 @@
 #ifndef CREATEAS_H
 #define CREATEAS_H
 
+#include "access/heapam.h"
 #include "catalog/objectaddress.h"
 #include "nodes/params.h"
+#include "nodes/plannodes.h"
 #include "parser/parse_node.h"
 #include "tcop/dest.h"
 #include "utils/queryenvironment.h"
 
+typedef struct
+{
+	DestReceiver pub;			/* publicly-known function pointers */
+	IntoClause *into;			/* target relation specification */
+	/* These fields are filled by intorel_startup: */
+	Relation	rel;			/* relation to write to */
+	ObjectAddress reladdr;		/* address of rel, for ExecCreateTableAs */
+	CommandId	output_cid;		/* cmin to insert in output tuples */
+	int			ti_options;		/* table_tuple_insert performance options */
+	BulkInsertState bistate;	/* bulk insert state */
+	bool		is_parallel;	/* is parallelism to be considered? */
+	bool		is_parallel_worker; /* true for parallel worker */
+	/* Used for table open by parallel worker */
+	Oid			object_id;
+} DR_intorel;
+
+#define ISCTAS(intoclause) (intoclause != NULL && IsA(intoclause, IntoClause))
 
 extern ObjectAddress ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt,
 									   ParamListInfo params, QueryEnvironment *queryEnv,
@@ -29,4 +48,7 @@ extern int	GetIntoRelEFlags(IntoClause *intoClause);
 
 extern DestReceiver *CreateIntoRelDestReceiver(IntoClause *intoClause);
 
+extern bool IsParallelInsertInCTASAllowed(IntoClause *intoClause,
+										  PlannedStmt *plannedstmt);
+
 #endif							/* CREATEAS_H */
diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h
index 5a39a5b29c..e475fbdd35 100644
--- a/src/include/executor/execParallel.h
+++ b/src/include/executor/execParallel.h
@@ -35,6 +35,8 @@ typedef struct ParallelExecutorInfo
 	/* These two arrays have pcxt->nworkers_launched entries: */
 	shm_mq_handle **tqueue;		/* tuple queues for worker output */
 	struct TupleQueueReader **reader;	/* tuple reader/writer support */
+	/* Number of tuples inserted by all workers */
+	volatile pg_atomic_uint64	*processed;
 } ParallelExecutorInfo;
 
 extern ParallelExecutorInfo *ExecInitParallelPlan(PlanState *planstate,
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 61ba4c3666..5277d66150 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -23,6 +23,7 @@
 #include "nodes/tidbitmap.h"
 #include "partitioning/partdefs.h"
 #include "storage/condition_variable.h"
+#include "tcop/dest.h"
 #include "utils/hsearch.h"
 #include "utils/queryenvironment.h"
 #include "utils/reltrigger.h"
@@ -1009,6 +1010,10 @@ typedef struct PlanState
 	bool		outeropsset;
 	bool		inneropsset;
 	bool		resultopsset;
+	/* Prallel inserts in CTAS related info is specified below. */
+	IntoClause	*intoclause;
+	Oid			objectid;
+	DestReceiver *dest;
 } PlanState;
 
 /* ----------------
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index d1f9ef29ca..65c393743c 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -180,6 +180,7 @@ typedef struct Query
 	 */
 	int			stmt_location;	/* start location, or -1 if unknown */
 	int			stmt_len;		/* length in bytes; 0 means "rest of string" */
+	bool		isForCTAS; 		/* Is the SELECT query for CREATE TABLE AS */
 } Query;
 
 
diff --git a/src/test/regress/expected/write_parallel.out b/src/test/regress/expected/write_parallel.out
index 0c4da2591a..11ef18b8a4 100644
--- a/src/test/regress/expected/write_parallel.out
+++ b/src/test/regress/expected/write_parallel.out
@@ -76,4 +76,147 @@ explain (costs off) create table parallel_write as execute prep_stmt;
 
 create table parallel_write as execute prep_stmt;
 drop table parallel_write;
+--
+-- Test parallel inserts in create table as/select into/create materialized view
+--
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select length(stringu1) from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop table parallel_write;
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+create temporary table parallel_write as select length(stringu1) from tenk1;
+                         QUERY PLAN                          
+-------------------------------------------------------------
+ Gather (actual rows=10000 loops=1)
+   Workers Planned: 4
+   Workers Launched: 4
+   ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(4 rows)
+
+drop table parallel_write;
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create unlogged table parallel_write as select length(stringu1) from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop table parallel_write;
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into parallel_write from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop table parallel_write;
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into temporary parallel_write from tenk1;
+                         QUERY PLAN                          
+-------------------------------------------------------------
+ Gather (actual rows=10000 loops=1)
+   Workers Planned: 4
+   Workers Launched: 4
+   ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(4 rows)
+
+drop table parallel_write;
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into unlogged parallel_write from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop table parallel_write;
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select length(stringu1) from tenk1 for update;
+                     QUERY PLAN                      
+-----------------------------------------------------
+ LockRows (actual rows=10000 loops=1)
+   ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+(2 rows)
+
+drop table parallel_write;
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create materialized view parallel_mat_view as
+select length(stringu1) from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_mat_view
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop materialized view parallel_mat_view;
+-- parallel inserts must occur
+prepare parallel_write_prep as select length(stringu1) from tenk1;
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as execute parallel_write_prep;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+deallocate parallel_write_prep;
+drop table parallel_write;
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select now(), four from tenk1;
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Gather (actual rows=0 loops=1)
+   ->  Create parallel_write
+     Workers Planned: 4
+     Workers Launched: 4
+     ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
+(5 rows)
+
+drop table parallel_write;
+-- parallel inserts must not occur
+create sequence parallel_write_sequence;
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as
+select nextval('parallel_write_sequence'), four from tenk1;
+                  QUERY PLAN                   
+-----------------------------------------------
+ Seq Scan on tenk1 (actual rows=10000 loops=1)
+(1 row)
+
+drop table parallel_write;
+drop sequence parallel_write_sequence;
 rollback;
diff --git a/src/test/regress/sql/write_parallel.sql b/src/test/regress/sql/write_parallel.sql
index 78b479cedf..dd4233b399 100644
--- a/src/test/regress/sql/write_parallel.sql
+++ b/src/test/regress/sql/write_parallel.sql
@@ -39,4 +39,69 @@ explain (costs off) create table parallel_write as execute prep_stmt;
 create table parallel_write as execute prep_stmt;
 drop table parallel_write;
 
+--
+-- Test parallel inserts in create table as/select into/create materialized view
+--
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select length(stringu1) from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+create temporary table parallel_write as select length(stringu1) from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create unlogged table parallel_write as select length(stringu1) from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into parallel_write from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into temporary parallel_write from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+select length(stringu1) into unlogged parallel_write from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must not occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select length(stringu1) from tenk1 for update;
+drop table parallel_write;
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create materialized view parallel_mat_view as
+select length(stringu1) from tenk1;
+drop materialized view parallel_mat_view;
+
+-- parallel inserts must occur
+prepare parallel_write_prep as select length(stringu1) from tenk1;
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as execute parallel_write_prep;
+deallocate parallel_write_prep;
+drop table parallel_write;
+
+-- parallel inserts must occur
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as select now(), four from tenk1;
+drop table parallel_write;
+
+-- parallel inserts must not occur
+create sequence parallel_write_sequence;
+explain (costs off, analyze on, timing off, summary off)
+create table parallel_write as
+select nextval('parallel_write_sequence'), four from tenk1;
+drop table parallel_write;
+drop sequence parallel_write_sequence;
+
 rollback;
-- 
2.25.1

Reply via email to