diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index 340c961..43f5081 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -2986,6 +2986,11 @@ VALUES ('Albany', NULL, NULL, 'NY');
     foreign table partitions.
    </para>
 
+   <para>
+    Updating the partition key of a row might cause it to be moved into a
+    different partition where this row satisfies its partition constraint.
+   </para>
+
    <sect3 id="ddl-partitioning-declarative-example">
     <title>Example</title>
 
@@ -3278,9 +3283,20 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
 
      <listitem>
       <para>
-       An <command>UPDATE</> that causes a row to move from one partition to
-       another fails, because the new value of the row fails to satisfy the
-       implicit partition constraint of the original partition.
+       When an <command>UPDATE</> causes a row to move from one partition to
+       another, there is a chance that another concurrent <command>UPDATE</> or
+       <command>DELETE</> misses this row. Suppose, during the row movement,
+       the row is still visible for the concurrent session, and it is about to
+       do an <command>UPDATE</> or <command>DELETE</> operation on the same
+       row. This DML operation can silently miss this row if the row now gets
+       deleted from the partition by the first session as part of its
+       <command>UPDATE</> row movement. In such case, the concurrent
+       <command>UPDATE</>/<command>DELETE</>, being unaware of the row
+       movement, interprets that the row has just been deleted so there is
+       nothing to be done for this row. Whereas, in the usual case where the
+       table is not partitioned, or where there is no row movement, the second
+       session would have identified the newly updated row and carried
+       <command>UPDATE</>/<command>DELETE</> on this new row version.
       </para>
      </listitem>
 
diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml
index 8a1619f..28cfc1a 100644
--- a/doc/src/sgml/ref/update.sgml
+++ b/doc/src/sgml/ref/update.sgml
@@ -282,10 +282,17 @@ UPDATE <replaceable class="parameter">count</replaceable>
 
   <para>
    In the case of a partitioned table, updating a row might cause it to no
-   longer satisfy the partition constraint.  Since there is no provision to
-   move the row to the partition appropriate to the new value of its
-   partitioning key, an error will occur in this case.  This can also happen
-   when updating a partition directly.
+   longer satisfy the partition constraint of the containing partition. In that
+   case, if there is some other partition in the partition tree for which this
+   row satisfies its partition constraint, then the row is moved to that
+   partition. If there isn't such a partition, an error will occur. The error
+   will also occur when updating a partition directly. Behind the scenes, the
+   row movement is actually a <command>DELETE</> and
+   <command>INSERT</> operation. However, there is a possibility that a
+   concurrent <command>UPDATE</> or <command>DELETE</> on the same row may miss
+   this row. For details see the section
+   <xref linkend="ddl-partitioning-declarative-limitations">.
+
   </para>
  </refsect1>
 
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index 8f724c8..b0ed167 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -151,6 +151,29 @@
    </para>
 
    <para>
+    If an <command>UPDATE</command> on a partitioned table causes a row to
+    move to another partition, it will be performed as a
+    <command>DELETE</command> from the original partition followed by
+    <command>INSERT</command> into the new partition. In this case, all
+    row-level <literal>BEFORE</> <command>UPDATE</command> triggers and all
+    row-level <literal>BEFORE</> <command>DELETE</command> triggers are fired
+    on the original partition. Then all row-level <literal>BEFORE</>
+    <command>INSERT</command> triggers are fired on the destination partition.
+    The possibility of surprising outcomes should be considered when all these
+    triggers affect the row being moved. As far as <literal>AFTER ROW</>
+    triggers are concerned, <literal>AFTER</> <command>DELETE</command> and
+    <literal>AFTER</> <command>INSERT</command> triggers are applied; but
+    <literal>AFTER</> <command>UPDATE</command> triggers are not applied
+    because the <command>UPDATE</command> has been converted to a
+    <command>DELETE</command> and <command>INSERT</command>. As far as
+    statement-level triggers are concerned, none of the
+    <command>DELETE</command> or <command>INSERT</command> triggers are fired,
+    even if row movement occurs; only the <command>UPDATE</command> triggers
+    defined on the target table used in the <command>UPDATE</command> statement
+    will be fired.
+   </para>
+
+   <para>
     Trigger functions invoked by per-statement triggers should always
     return <symbol>NULL</symbol>. Trigger functions invoked by per-row
     triggers can return a table row (a value of
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 8c58808..c1ccdc5 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2651,7 +2651,7 @@ CopyFrom(CopyState cstate)
 				/* Check the constraints of the tuple */
 				if (cstate->rel->rd_att->constr ||
 					resultRelInfo->ri_PartitionCheck)
-					ExecConstraints(resultRelInfo, slot, oldslot, estate);
+					ExecConstraints(resultRelInfo, slot, oldslot, estate, true);
 
 				if (useHeapMultiInsert)
 				{
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 920b120..d4ba965 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1783,7 +1783,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
  *
  * Note: This is called *iff* resultRelInfo is the main target table.
  */
-static bool
+bool
 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
 				   EState *estate)
 {
@@ -1820,8 +1820,8 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
 /*
  * ExecConstraints - check constraints of the tuple in 'slot'
  *
- * This checks the traditional NOT NULL and check constraints, as well as
- * the partition constraint, if any.
+ * This checks the traditional NOT NULL and check constraints, and if requested,
+ * checks the partition constraint.
  *
  * Note: 'slot' contains the tuple to check the constraints of, which may
  * have been converted from the original input tuple after tuple routing,
@@ -1831,7 +1831,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
 void
 ExecConstraints(ResultRelInfo *resultRelInfo,
 				TupleTableSlot *slot, TupleTableSlot *orig_slot,
-				EState *estate)
+				EState *estate, bool check_partition_constraint)
 {
 	Relation	rel = resultRelInfo->ri_RelationDesc;
 	TupleDesc	tupdesc = RelationGetDescr(rel);
@@ -1918,33 +1918,51 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
 		}
 	}
 
-	if (resultRelInfo->ri_PartitionCheck &&
+	if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
 		!ExecPartitionCheck(resultRelInfo, slot, estate))
-	{
-		char	   *val_desc;
-		Relation	orig_rel = rel;
+		ExecPartitionCheckEmitError(resultRelInfo, orig_slot, estate);
+}
 
-		/* See the comment above. */
-		if (resultRelInfo->ri_PartitionRoot)
-		{
-			rel = resultRelInfo->ri_PartitionRoot;
-			tupdesc = RelationGetDescr(rel);
-		}
+/*
+ * ExecPartitionCheckEmitError - Form and emit an error message after a failed
+ * partition constraint check.
+ *
+ * 'orig_slot' contains the original tuple to be shown in the error message.
+ */
+void
+ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
+							TupleTableSlot *orig_slot,
+							EState *estate)
+{
+	Relation	rel = resultRelInfo->ri_RelationDesc;
+	Relation	orig_rel = rel;
+	TupleDesc	tupdesc = RelationGetDescr(rel);
+	char	   *val_desc;
+	Bitmapset  *modifiedCols;
+	Bitmapset  *insertedCols;
+	Bitmapset  *updatedCols;
 
-		insertedCols = GetInsertedColumns(resultRelInfo, estate);
-		updatedCols = GetUpdatedColumns(resultRelInfo, estate);
-		modifiedCols = bms_union(insertedCols, updatedCols);
-		val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
-												 orig_slot,
-												 tupdesc,
-												 modifiedCols,
-												 64);
-		ereport(ERROR,
-				(errcode(ERRCODE_CHECK_VIOLATION),
-		  errmsg("new row for relation \"%s\" violates partition constraint",
-				 RelationGetRelationName(orig_rel)),
-			val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
+
+	/* See the comments in ExecConstraints. */
+	if (resultRelInfo->ri_PartitionRoot)
+	{
+		rel = resultRelInfo->ri_PartitionRoot;
+		tupdesc = RelationGetDescr(rel);
 	}
+
+	insertedCols = GetInsertedColumns(resultRelInfo, estate);
+	updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+	modifiedCols = bms_union(insertedCols, updatedCols);
+	val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+											 orig_slot,
+											 tupdesc,
+											 modifiedCols,
+											 64);
+	ereport(ERROR,
+			(errcode(ERRCODE_CHECK_VIOLATION),
+	  errmsg("new row for relation \"%s\" violates partition constraint",
+			 RelationGetRelationName(orig_rel)),
+		val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
 }
 
 /*
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index f20d728..2f76140 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -389,7 +389,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
 
 		/* Check the constraints of the tuple */
 		if (rel->rd_att->constr)
-			ExecConstraints(resultRelInfo, slot, slot, estate);
+			ExecConstraints(resultRelInfo, slot, slot, estate, true);
 
 		/* Store the slot into tuple that we can inspect. */
 		tuple = ExecMaterializeSlot(slot);
@@ -448,7 +448,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
 
 		/* Check the constraints of the tuple */
 		if (rel->rd_att->constr)
-			ExecConstraints(resultRelInfo, slot, slot, estate);
+			ExecConstraints(resultRelInfo, slot, slot, estate, true);
 
 		/* Store the slot into tuple that we can write. */
 		tuple = ExecMaterializeSlot(slot);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 0b524e0..64e40fe 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -62,7 +62,10 @@ static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
 					 EState *estate,
 					 bool canSetTag,
 					 TupleTableSlot **returning);
-
+static void ExecInitPartitionWithCheckOptions(ModifyTableState *mtstate,
+											  Relation root_rel);
+static void ExecInitPartitionReturningProjection(ModifyTableState *mtstate,
+												 Relation root_rel);
 /*
  * Verify that the tuples to be produced by INSERT or UPDATE match the
  * target relation's rowtype
@@ -435,7 +438,7 @@ ExecInsert(ModifyTableState *mtstate,
 		 * Check the constraints of the tuple
 		 */
 		if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
-			ExecConstraints(resultRelInfo, slot, oldslot, estate);
+			ExecConstraints(resultRelInfo, slot, oldslot, estate, true);
 
 		if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
 		{
@@ -625,6 +628,8 @@ ExecDelete(ItemPointer tupleid,
 		   TupleTableSlot *planSlot,
 		   EPQState *epqstate,
 		   EState *estate,
+		   bool   *concurrently_deleted,
+		   bool process_returning,
 		   bool canSetTag)
 {
 	ResultRelInfo *resultRelInfo;
@@ -633,6 +638,9 @@ ExecDelete(ItemPointer tupleid,
 	HeapUpdateFailureData hufd;
 	TupleTableSlot *slot = NULL;
 
+	if (concurrently_deleted)
+		*concurrently_deleted = false;
+
 	/*
 	 * get information on the (current) result relation
 	 */
@@ -776,6 +784,8 @@ ldelete:;
 					}
 				}
 				/* tuple already deleted; nothing to do */
+				if (concurrently_deleted)
+					*concurrently_deleted = true;
 				return NULL;
 
 			default:
@@ -799,8 +809,8 @@ ldelete:;
 	/* AFTER ROW DELETE Triggers */
 	ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple);
 
-	/* Process RETURNING if present */
-	if (resultRelInfo->ri_projectReturning)
+	/* Process RETURNING if present and if requested */
+	if (process_returning && resultRelInfo->ri_projectReturning)
 	{
 		/*
 		 * We have to put the target tuple into a slot, which means first we
@@ -878,7 +888,8 @@ ldelete:;
  * ----------------------------------------------------------------
  */
 static TupleTableSlot *
-ExecUpdate(ItemPointer tupleid,
+ExecUpdate(ModifyTableState *mtstate,
+		   ItemPointer tupleid,
 		   HeapTuple oldtuple,
 		   TupleTableSlot *slot,
 		   TupleTableSlot *planSlot,
@@ -988,12 +999,90 @@ lreplace:;
 								 resultRelInfo, slot, estate);
 
 		/*
+		 * If a partition check fails, try to move the row into the right
+		 * partition.
+		 */
+		if (resultRelInfo->ri_PartitionCheck &&
+			!ExecPartitionCheck(resultRelInfo, slot, estate))
+		{
+			bool	is_partitioned_table = true;
+
+			if (mtstate->mt_partition_dispatch_info == NULL)
+			{
+				ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
+				Relation root_rel;
+
+				/*
+				 * If this is a partitioned table, we need to open the root
+				 * table RT index which is at the head of partitioned_rels
+				 */
+				if (node->partitioned_rels)
+				{
+					Index	root_rti;
+					Oid		root_oid;
+
+					root_rti = linitial_int(node->partitioned_rels);
+					root_oid = getrelid(root_rti, estate->es_range_table);
+					root_rel = heap_open(root_oid, NoLock);	/* locked by InitPlan */
+				}
+				else /* this may be a leaf partition */
+					root_rel = mtstate->resultRelInfo->ri_RelationDesc;
+
+				is_partitioned_table =
+					root_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
+
+				if (is_partitioned_table)
+					ExecSetupPartitionTupleRouting(
+										root_rel,
+										&mtstate->mt_partition_dispatch_info,
+										&mtstate->mt_partitions,
+										&mtstate->mt_partition_tupconv_maps,
+										&mtstate->mt_partition_tuple_slot,
+										&mtstate->mt_num_dispatch,
+										&mtstate->mt_num_partitions);
+
+				/* Build WITH CHECK OPTION constraints for leaf partitions */
+				ExecInitPartitionWithCheckOptions(mtstate, root_rel);
+
+				/* Build a projection for each leaf partition rel. */
+				ExecInitPartitionReturningProjection(mtstate, root_rel);
+
+				/* Close the root partitioned rel if we opened it above. */
+				if (root_rel != mtstate->resultRelInfo->ri_RelationDesc)
+					heap_close(root_rel, NoLock);
+			}
+
+			if (is_partitioned_table)
+			{
+				bool	concurrently_deleted;
+
+				/*
+				 * Skip RETURNING processing for DELETE. We want to return rows
+				 * from INSERT.
+				 */
+				ExecDelete(tupleid, oldtuple, planSlot, epqstate, estate,
+						   &concurrently_deleted, false, false);
+
+				if (concurrently_deleted)
+					return NULL;
+
+				return ExecInsert(mtstate, slot, planSlot, NULL,
+									  ONCONFLICT_NONE, estate, canSetTag);
+			}
+
+			/* It's not a partitioned table after all; error out. */
+			ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
+		}
+
+		/*
 		 * Check the constraints of the tuple.  Note that we pass the same
 		 * slot for the orig_slot argument, because unlike ExecInsert(), no
 		 * tuple-routing is performed here, hence the slot remains unchanged.
+		 * We have already checked partition constraints above, so skip them
+		 * below.
 		 */
-		if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
-			ExecConstraints(resultRelInfo, slot, slot, estate);
+		if (resultRelationDesc->rd_att->constr)
+			ExecConstraints(resultRelInfo, slot, slot, estate, false);
 
 		/*
 		 * replace the heap tuple
@@ -1313,7 +1402,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
 	 */
 
 	/* Execute UPDATE with projection */
-	*returning = ExecUpdate(&tuple.t_self, NULL,
+	*returning = ExecUpdate(mtstate, &tuple.t_self, NULL,
 							mtstate->mt_conflproj, planSlot,
 							&mtstate->mt_epqstate, mtstate->ps.state,
 							canSetTag);
@@ -1583,12 +1672,13 @@ ExecModifyTable(ModifyTableState *node)
 								  estate, node->canSetTag);
 				break;
 			case CMD_UPDATE:
-				slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
+				slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot,
 								&node->mt_epqstate, estate, node->canSetTag);
 				break;
 			case CMD_DELETE:
 				slot = ExecDelete(tupleid, oldtuple, planSlot,
-								&node->mt_epqstate, estate, node->canSetTag);
+								&node->mt_epqstate, estate,
+								NULL, true, node->canSetTag);
 				break;
 			default:
 				elog(ERROR, "unknown operation");
@@ -1790,44 +1880,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	}
 
 	/*
-	 * Build WITH CHECK OPTION constraints for each leaf partition rel.
-	 * Note that we didn't build the withCheckOptionList for each partition
-	 * within the planner, but simple translation of the varattnos for each
-	 * partition will suffice.  This only occurs for the INSERT case;
-	 * UPDATE/DELETE cases are handled above.
+	 * Build WITH CHECK OPTION constraints for each leaf partition rel. This
+	 * only occurs for INSERT case; UPDATE/DELETE are handled above.
 	 */
-	if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
-	{
-		List		*wcoList;
-
-		Assert(operation == CMD_INSERT);
-		resultRelInfo = mtstate->mt_partitions;
-		wcoList = linitial(node->withCheckOptionLists);
-		for (i = 0; i < mtstate->mt_num_partitions; i++)
-		{
-			Relation	partrel = resultRelInfo->ri_RelationDesc;
-			List	   *mapped_wcoList;
-			List	   *wcoExprs = NIL;
-			ListCell   *ll;
-
-			/* varno = node->nominalRelation */
-			mapped_wcoList = map_partition_varattnos(wcoList,
-													 node->nominalRelation,
-													 partrel, rel);
-			foreach(ll, mapped_wcoList)
-			{
-				WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
-				ExprState  *wcoExpr = ExecInitQual((List *) wco->qual,
-												   mtstate->mt_plans[i]);
-
-				wcoExprs = lappend(wcoExprs, wcoExpr);
-			}
-
-			resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
-			resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
-			resultRelInfo++;
-		}
-	}
+	ExecInitPartitionWithCheckOptions(mtstate, rel);
 
 	/*
 	 * Initialize RETURNING projections if needed.
@@ -1836,7 +1892,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	{
 		TupleTableSlot *slot;
 		ExprContext *econtext;
-		List	   *returningList;
 
 		/*
 		 * Initialize result tuple slot and assign its rowtype using the first
@@ -1870,28 +1925,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 		}
 
 		/*
-		 * Build a projection for each leaf partition rel.  Note that we
-		 * didn't build the returningList for each partition within the
-		 * planner, but simple translation of the varattnos for each partition
-		 * will suffice.  This only occurs for the INSERT case; UPDATE/DELETE
-		 * are handled above.
+		 * Build a projection for each leaf partition rel. This only occurs for
+		 * the INSERT case; UPDATE/DELETE are handled above.
 		 */
-		resultRelInfo = mtstate->mt_partitions;
-		returningList = linitial(node->returningLists);
-		for (i = 0; i < mtstate->mt_num_partitions; i++)
-		{
-			Relation	partrel = resultRelInfo->ri_RelationDesc;
-			List	   *rlist;
-
-			/* varno = node->nominalRelation */
-			rlist = map_partition_varattnos(returningList,
-											node->nominalRelation,
-											partrel, rel);
-			resultRelInfo->ri_projectReturning =
-				ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
-									 resultRelInfo->ri_RelationDesc->rd_att);
-			resultRelInfo++;
-		}
+		ExecInitPartitionReturningProjection(mtstate, rel);
 	}
 	else
 	{
@@ -2118,6 +2155,104 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 }
 
 /* ----------------------------------------------------------------
+ *		ExecInitPartitionWithCheckOptions
+ *
+ * Build WITH CHECK OPTION constraints for each leaf partition rel.
+ * Note that we don't build the withCheckOptionList for each partition
+ * within the planner, but simple translation of the varattnos for each
+ * partition suffices. This only occurs for the INSERT case; UPDATE/DELETE
+ * cases are handled separately.
+ * ----------------------------------------------------------------
+ */
+
+static void
+ExecInitPartitionWithCheckOptions(ModifyTableState *mtstate, Relation root_rel)
+{
+	ResultRelInfo  *resultRelInfo = mtstate->mt_partitions;
+	ModifyTable	   *node = (ModifyTable *) mtstate->ps.plan;
+	List		*wcoList;
+	int			i;
+
+	if (node->withCheckOptionLists == NIL || mtstate->mt_num_partitions == 0)
+		return;
+
+	wcoList = linitial(node->withCheckOptionLists);
+	for (i = 0; i < mtstate->mt_num_partitions; i++)
+	{
+		Relation	partrel = resultRelInfo->ri_RelationDesc;
+		List	   *mapped_wcoList;
+		List	   *wcoExprs = NIL;
+		ListCell   *ll;
+
+		/* varno = node->nominalRelation */
+		mapped_wcoList = map_partition_varattnos(wcoList,
+												 node->nominalRelation,
+												 partrel, root_rel);
+		foreach(ll, mapped_wcoList)
+		{
+			WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
+			ExprState  *wcoExpr = ExecInitQual((List *) wco->qual,
+										   mtstate->mt_plans[i]);
+
+			wcoExprs = lappend(wcoExprs, wcoExpr);
+		}
+
+		resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
+		resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
+		resultRelInfo++;
+	}
+}
+
+/* ----------------------------------------------------------------
+ *		ExecInitPartitionReturningProjection
+ *
+ * Initialize stuff required to handle RETURNING for leaf partitions.
+ * We don't build the returningList for each partition within the planner, but
+ * simple translation of the varattnos for each partition suffices.  This
+ * actually is helpful only for INSERT case; UPDATE/DELETE are handled
+ * differently.
+ * ----------------------------------------------------------------
+ */
+static void
+ExecInitPartitionReturningProjection(ModifyTableState *mtstate, Relation root_rel)
+{
+	ResultRelInfo  *resultRelInfo = mtstate->mt_partitions;
+	ModifyTable	   *node = (ModifyTable *) mtstate->ps.plan;
+	TupleTableSlot *returning_slot = mtstate->ps.ps_ResultTupleSlot;
+	List		   *returningList;
+	int				i;
+
+	/*
+	 * If there is no returning clause, or if we have already initialized the
+	 * returning projection info, there is nothing to be done.
+	 */
+	if (node->returningLists == NIL ||
+		(resultRelInfo && resultRelInfo->ri_projectReturning != NULL) ||
+		mtstate->mt_num_partitions == 0)
+		return;
+
+	returningList = linitial(node->returningLists);
+	for (i = 0; i < mtstate->mt_num_partitions; i++)
+	{
+		Relation	partrel = resultRelInfo->ri_RelationDesc;
+		List	   *rlist;
+
+		/* varno = node->nominalRelation */
+		rlist = map_partition_varattnos(returningList,
+										node->nominalRelation,
+										partrel, root_rel);
+		resultRelInfo->ri_projectReturning =
+			ExecBuildProjectionInfo(rlist,
+									mtstate->ps.ps_ExprContext,
+									returning_slot,
+									&mtstate->ps,
+									resultRelInfo->ri_RelationDesc->rd_att);
+		resultRelInfo++;
+	}
+}
+
+
+/* ----------------------------------------------------------------
  *		ExecEndModifyTable
  *
  *		Shuts down the plan.
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index d3849b9..102fc97 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -187,7 +187,9 @@ extern ResultRelInfo *ExecGetTriggerResultRel(EState *estate, Oid relid);
 extern bool ExecContextForcesOids(PlanState *planstate, bool *hasoids);
 extern void ExecConstraints(ResultRelInfo *resultRelInfo,
 				TupleTableSlot *slot, TupleTableSlot *orig_slot,
-				EState *estate);
+				EState *estate, bool check_partition_constraint);
+extern void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
+									TupleTableSlot *orig_slot, EState *estate);
 extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
 					 TupleTableSlot *slot, EState *estate);
 extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo);
@@ -216,6 +218,9 @@ extern int ExecFindPartition(ResultRelInfo *resultRelInfo,
 				  PartitionDispatch *pd,
 				  TupleTableSlot *slot,
 				  EState *estate);
+extern bool ExecPartitionCheck(ResultRelInfo *resultRelInfo,
+							TupleTableSlot *slot,
+							EState *estate);
 
 #define EvalPlanQualSetSlot(epqstate, slot)  ((epqstate)->origslot = (slot))
 extern void EvalPlanQualFetchRowMarks(EPQState *epqstate);
diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out
index 9366f04..a56afab 100644
--- a/src/test/regress/expected/update.out
+++ b/src/test/regress/expected/update.out
@@ -198,25 +198,121 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
 
 DROP TABLE update_test;
 DROP TABLE upsert_test;
--- update to a partition should check partition bound constraint for the new tuple
-create table range_parted (
+-- update to a partition should check partition bound constraint for the new tuple.
+-- If partition key is updated, the row should be moved to the appropriate
+-- partition. updatable views using partitions should enforce the check options
+-- for the rows that have been moved.
+CREATE TABLE range_parted (
 	a text,
-	b int
+	b int,
+	c int
 ) partition by range (a, b);
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE c > 120 WITH CHECK OPTION;
 create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
 create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
 create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10);
-create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20);
+create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20) partition by range (c);
+create table part_c_1_100 partition of part_b_10_b_20 for values from (1) to (100);
+create table part_c_100_200 partition of part_b_10_b_20 for values from (100) to (200);
 insert into part_a_1_a_10 values ('a', 1);
-insert into part_b_10_b_20 values ('b', 10);
--- fail
-update part_a_1_a_10 set a = 'b' where a = 'a';
-ERROR:  new row for relation "part_a_1_a_10" violates partition constraint
-DETAIL:  Failing row contains (b, 1).
-update range_parted set b = b - 1 where b = 10;
-ERROR:  new row for relation "part_b_10_b_20" violates partition constraint
-DETAIL:  Failing row contains (b, 9).
--- ok
-update range_parted set b = b + 1 where b = 10;
+insert into part_a_10_a_20 values ('a', 10, 200);
+insert into part_c_1_100 values ('b', 12, 96);
+insert into part_c_1_100 values ('b', 13, 97);
+insert into part_c_100_200 values ('b', 15, 105);
+insert into part_c_100_200 values ('b', 17, 105);
+-- fail (row movement happens only within the partition subtree) :
+update part_c_1_100 set c = c + 20 where c = 96; -- No row found :
+ERROR:  new row for relation "part_c_1_100" violates partition constraint
+DETAIL:  Failing row contains (b, 12, 116).
+update part_c_1_100 set c = c + 20 where c = 98;
+-- ok (row movement)
+update part_b_10_b_20 set c = c + 20 ;
+select * from part_c_1_100 order by 1, 2, 3;
+ a | b | c 
+---+---+---
+(0 rows)
+
+select * from part_c_100_200 order by 1, 2, 3;
+ a | b  |  c  
+---+----+-----
+ b | 12 | 116
+ b | 13 | 117
+ b | 15 | 125
+ b | 17 | 125
+(4 rows)
+
+-- fail (row movement happens only within the partition subtree) :
+update part_b_10_b_20 set b = b - 6 where c > 116 returning *;
+ERROR:  new row for relation "part_c_100_200" violates partition constraint
+DETAIL:  Failing row contains (b, 7, 117).
+-- ok (row movement, with subset of rows moved into different partition)
+update range_parted set b = b - 6 where c > 116 returning a, b + c;
+ a | ?column? 
+---+----------
+ a |      204
+ b |      124
+ b |      134
+ b |      136
+(4 rows)
+
+select * from part_a_1_a_10 order by 1, 2, 3;
+ a | b |  c  
+---+---+-----
+ a | 1 |    
+ a | 4 | 200
+(2 rows)
+
+select * from part_a_10_a_20 order by 1, 2, 3;
+ a | b | c 
+---+---+---
+(0 rows)
+
+select * from part_b_1_b_10 order by 1, 2, 3;
+ a | b |  c  
+---+---+-----
+ b | 7 | 117
+ b | 9 | 125
+(2 rows)
+
+select * from part_c_1_100 order by 1, 2, 3;
+ a | b | c 
+---+---+---
+(0 rows)
+
+select * from part_c_100_200 order by 1, 2, 3;
+ a | b  |  c  
+---+----+-----
+ b | 11 | 125
+ b | 12 | 116
+(2 rows)
+
+-- update partition key using updatable view.
+-- succeeds
+update upview set c = 199 where b = 4;
+-- fail, check option violation
+update upview set c = 120 where b = 4;
+ERROR:  new row violates check option for view "upview"
+DETAIL:  Failing row contains (a, 4, 120).
+-- fail, row movement with check option violation
+update upview set a = 'b', b = 15, c = 120 where b = 4;
+ERROR:  new row violates check option for view "upview"
+DETAIL:  Failing row contains (b, 15, 120).
+-- succeeds, row movement , check option passes
+update upview set a = 'b', b = 15 where b = 4;
+select * from part_a_1_a_10 order by 1, 2, 3;
+ a | b | c 
+---+---+---
+ a | 1 |  
+(1 row)
+
+select * from part_c_100_200 order by 1, 2, 3;
+ a | b  |  c  
+---+----+-----
+ b | 11 | 125
+ b | 12 | 116
+ b | 15 | 199
+(3 rows)
+
 -- cleanup
+drop view upview;
 drop table range_parted;
diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql
index 6637119..cda9906 100644
--- a/src/test/regress/sql/update.sql
+++ b/src/test/regress/sql/update.sql
@@ -107,23 +107,61 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
 DROP TABLE update_test;
 DROP TABLE upsert_test;
 
--- update to a partition should check partition bound constraint for the new tuple
-create table range_parted (
+-- update to a partition should check partition bound constraint for the new tuple.
+-- If partition key is updated, the row should be moved to the appropriate
+-- partition. updatable views using partitions should enforce the check options
+-- for the rows that have been moved.
+CREATE TABLE range_parted (
 	a text,
-	b int
+	b int,
+	c int
 ) partition by range (a, b);
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE c > 120 WITH CHECK OPTION;
+
 create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
 create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
 create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10);
-create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20);
+create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20) partition by range (c);
+create table part_c_1_100 partition of part_b_10_b_20 for values from (1) to (100);
+create table part_c_100_200 partition of part_b_10_b_20 for values from (100) to (200);
 insert into part_a_1_a_10 values ('a', 1);
-insert into part_b_10_b_20 values ('b', 10);
-
--- fail
-update part_a_1_a_10 set a = 'b' where a = 'a';
-update range_parted set b = b - 1 where b = 10;
--- ok
-update range_parted set b = b + 1 where b = 10;
-
+insert into part_a_10_a_20 values ('a', 10, 200);
+insert into part_c_1_100 values ('b', 12, 96);
+insert into part_c_1_100 values ('b', 13, 97);
+insert into part_c_100_200 values ('b', 15, 105);
+insert into part_c_100_200 values ('b', 17, 105);
+
+-- fail (row movement happens only within the partition subtree) :
+update part_c_1_100 set c = c + 20 where c = 96; -- No row found :
+update part_c_1_100 set c = c + 20 where c = 98;
+-- ok (row movement)
+update part_b_10_b_20 set c = c + 20 ;
+select * from part_c_1_100 order by 1, 2, 3;
+select * from part_c_100_200 order by 1, 2, 3;
+
+-- fail (row movement happens only within the partition subtree) :
+update part_b_10_b_20 set b = b - 6 where c > 116 returning *;
+-- ok (row movement, with subset of rows moved into different partition)
+update range_parted set b = b - 6 where c > 116 returning a, b + c;
+
+select * from part_a_1_a_10 order by 1, 2, 3;
+select * from part_a_10_a_20 order by 1, 2, 3;
+select * from part_b_1_b_10 order by 1, 2, 3;
+select * from part_c_1_100 order by 1, 2, 3;
+select * from part_c_100_200 order by 1, 2, 3;
+
+-- update partition key using updatable view.
+
+-- succeeds
+update upview set c = 199 where b = 4;
+-- fail, check option violation
+update upview set c = 120 where b = 4;
+-- fail, row movement with check option violation
+update upview set a = 'b', b = 15, c = 120 where b = 4;
+-- succeeds, row movement , check option passes
+update upview set a = 'b', b = 15 where b = 4;
+select * from part_a_1_a_10 order by 1, 2, 3;
+select * from part_c_100_200 order by 1, 2, 3;
 -- cleanup
+drop view upview;
 drop table range_parted;
