diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index a01b46a..ff08f37 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -166,6 +166,23 @@ ExecGather(PlanState *pstate)
 		{
 			ParallelContext *pcxt;
 
+			/*
+			 * We need to avoid an attempt on INSERT to assign a
+			 * FullTransactionId whilst in parallel mode (which is in
+			 * effect due to the underlying parallel query) - so the
+			 * FullTransactionId is assigned here. Parallel mode must
+			 * be temporarily escaped in order for this to be possible.
+			 * The FullTransactionId will be included in the transaction
+			 * state that is serialized in the parallel DSM.
+			 */
+			if (estate->es_plannedstmt->commandType == CMD_INSERT)
+			{
+				Assert(IsInParallelMode());
+				ExitParallelMode();
+				GetCurrentFullTransactionId();
+				EnterParallelMode();
+			}
+
 			/* Initialize, or re-initialize, shared state needed by workers. */
 			if (!node->pei)
 				node->pei = ExecInitParallelPlan(node->ps.lefttree,
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index 4712934..cc197dd 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -1,4 +1,4 @@
-/*-------------------------------------------------------------------------
+/*------------------------------------------------------------------------
  *
  * nodeGatherMerge.c
  *		Scan a plan in multiple workers, and do order-preserving merge.
@@ -210,6 +210,21 @@ ExecGatherMerge(PlanState *pstate)
 		{
 			ParallelContext *pcxt;
 
+			if (estate->es_plannedstmt->commandType == CMD_INSERT)
+			{
+				/*
+				 * We need to avoid an attempt on INSERT to assign a
+				 * FullTransactionId whilst in parallel mode (which is in
+				 * effect due to the underlying parallel query) - so the
+				 * FullTransactionId is assigned here. Parallel mode must
+				 * be temporarily escaped in order for this to be possible.
+				*/
+				Assert(IsInParallelMode());
+				ExitParallelMode();
+				GetCurrentFullTransactionId();
+				EnterParallelMode();
+			}
+
 			/* Initialize, or re-initialize, shared state needed by workers. */
 			if (!node->pei)
 				node->pei = ExecInitParallelPlan(node->ps.lefttree,
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index f331f82..fd5b47a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -337,7 +337,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 	 */
 	if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
 		IsUnderPostmaster &&
-		parse->commandType == CMD_SELECT &&
+		(parse->commandType == CMD_SELECT || parse->commandType == CMD_INSERT) &&
 		!parse->hasModifyingCTE &&
 		max_parallel_workers_per_gather > 0 &&
 		!IsParallelWorker())
@@ -371,6 +371,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 	 * parallel-unsafe, or else the query planner itself has a bug.
 	 */
 	glob->parallelModeNeeded = glob->parallelModeOK &&
+		(parse->commandType == CMD_SELECT) &&
 		(force_parallel_mode != FORCE_PARALLEL_OFF);
 
 	/* Determine what fraction of the plan is likely to be scanned */
@@ -425,7 +426,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 	 * Optionally add a Gather node for testing purposes, provided this is
 	 * actually a safe thing to do.
 	 */
-	if (force_parallel_mode != FORCE_PARALLEL_OFF && top_plan->parallel_safe)
+	if (force_parallel_mode != FORCE_PARALLEL_OFF && parse->commandType == CMD_SELECT && top_plan->parallel_safe)
 	{
 		Gather	   *gather = makeNode(Gather);
 
