heap_lock_tuple() has the following comment on top:

 * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
 * (the last only for HeapTupleSelfUpdated, since we
 * cannot obtain cmax from a combocid generated by another transaction).
 * See comments for struct HeapUpdateFailureData for additional info.

With the patch as submitted, this struct is not filled in the
HeapTupleWouldBlock case.  I'm not sure this is okay, though I admit the
only caller that passes LockWaitSkip doesn't care, so maybe we could
just ignore the issue (after properly modifying the comment).  It seems
easy to add a LockBuffer() and "goto failed" rather than a return; that
would make that failure case conform to HeapTupleUpdated and
HeapTupleSelfUpdated.  OTOH it might be pointless to worry about what
would be essentially dead code currently ...


Did you consider heap_lock_updated_tuple?  A rationale for saying it
doesn't need to pay attention to the wait policy is: if you're trying to
lock-skip-locked an updated tuple, then you either skip it because its
updater is running, or you return it because it's no longer running; and
if you return it, it is not possible for the updater to be locking the
updated version.  However, what if there's a third transaction that
locked the updated version?  It might be difficult to hit this case or
construct an isolationtester spec file though, since there's a narrow
window you need to race to.

I also pgindented.

-- 
Álvaro Herrera                http://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Training & Services
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 31073bc..5792036 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -109,8 +109,7 @@ static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 in
 				Relation rel, ItemPointer ctid, XLTW_Oper oper,
 				int *remaining);
 static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
-						   uint16 infomask, Relation rel, ItemPointer ctid,
-						   XLTW_Oper oper, int *remaining);
+						   uint16 infomask, Relation rel, int *remaining);
 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
 static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
 					   bool *copy);
@@ -2818,7 +2817,7 @@ l1:
 		if (result == HeapTupleSelfUpdated)
 			hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
 		else
-			hufd->cmax = 0;		/* for lack of an InvalidCommandId value */
+			hufd->cmax = InvalidCommandId;
 		UnlockReleaseBuffer(buffer);
 		if (have_tuple_lock)
 			UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
@@ -3415,7 +3414,7 @@ l2:
 		if (result == HeapTupleSelfUpdated)
 			hufd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
 		else
-			hufd->cmax = 0;		/* for lack of an InvalidCommandId value */
+			hufd->cmax = InvalidCommandId;
 		UnlockReleaseBuffer(buffer);
 		if (have_tuple_lock)
 			UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
@@ -4222,22 +4221,27 @@ l3:
 		 */
 		if (!have_tuple_lock)
 		{
-			if (wait_policy == LockWaitBlock)
+			switch (wait_policy)
 			{
-				LockTupleTuplock(relation, tid, mode);
-			}
-			else if (wait_policy == LockWaitError)
-			{
-				if (!ConditionalLockTupleTuplock(relation, tid, mode))
-					ereport(ERROR,
-							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-					errmsg("could not obtain lock on row in relation \"%s\"",
-						   RelationGetRelationName(relation))));
-			}
-			else /* wait_policy == LockWaitSkip */
-			{
-				if (!ConditionalLockTupleTuplock(relation, tid, mode))
-					return HeapTupleWouldBlock;
+				case LockWaitBlock:
+					LockTupleTuplock(relation, tid, mode);
+					break;
+				case LockWaitSkip:
+					if (!ConditionalLockTupleTuplock(relation, tid, mode))
+					{
+						result = HeapTupleWouldBlock;
+						/* recovery code expects to have buffer lock held */
+						LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+						goto failed;
+					}
+					break;
+				case LockWaitError:
+					if (!ConditionalLockTupleTuplock(relation, tid, mode))
+						ereport(ERROR,
+								(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+								 errmsg("could not obtain lock on row in relation \"%s\"",
+										RelationGetRelationName(relation))));
+					break;
 			}
 			have_tuple_lock = true;
 		}
@@ -4441,34 +4445,34 @@ l3:
 				if (status >= MultiXactStatusNoKeyUpdate)
 					elog(ERROR, "invalid lock mode in heap_lock_tuple");
 
-				/* wait for multixact to end */
-				if (wait_policy == LockWaitBlock)
-				{
-					MultiXactIdWait((MultiXactId) xwait, status, infomask,
-									relation, &tuple->t_data->t_ctid,
-									XLTW_Lock, NULL);
-				}
-				else if (wait_policy == LockWaitError)
-				{
-					if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-												  status, infomask, relation,
-													&tuple->t_data->t_ctid,
-													XLTW_Lock, NULL))
-						ereport(ERROR,
-								(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-								 errmsg("could not obtain lock on row in relation \"%s\"",
-										RelationGetRelationName(relation))));
-				}
-				else /* wait_policy == LockWaitSkip */
+				/* wait for multixact to end, or die trying */
+				switch (wait_policy)
 				{
-					if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-													status, infomask, relation,
-													&tuple->t_data->t_ctid,
-													XLTW_Lock, NULL))
-					{
-						UnlockTupleTuplock(relation, tid, mode);
-						return HeapTupleWouldBlock;
-					}
+					case LockWaitBlock:
+						MultiXactIdWait((MultiXactId) xwait, status, infomask,
+										relation, &tuple->t_data->t_ctid, XLTW_Lock, NULL);
+						break;
+					case LockWaitSkip:
+						if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
+														status, infomask, relation,
+														NULL))
+						{
+							result = HeapTupleWouldBlock;
+							/* recovery code expects to have buffer lock held */
+							LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+							goto failed;
+						}
+						break;
+					case LockWaitError:
+						if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
+														status, infomask, relation,
+														NULL))
+							ereport(ERROR,
+									(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+									 errmsg("could not obtain lock on row in relation \"%s\"",
+											RelationGetRelationName(relation))));
+
+						break;
 				}
 
 				/* if there are updates, follow the update chain */
@@ -4514,27 +4518,29 @@ l3:
 			}
 			else
 			{
-				/* wait for regular transaction to end */
-				if (wait_policy == LockWaitBlock)
+				/* wait for regular transaction to end, or die trying */
+				switch (wait_policy)
 				{
-					XactLockTableWait(xwait, relation, &tuple->t_data->t_ctid,
-									  XLTW_Lock);
-				}
-				else if (wait_policy == LockWaitError)
-				{
-					if (!ConditionalXactLockTableWait(xwait))
-						ereport(ERROR,
-								(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-								 errmsg("could not obtain lock on row in relation \"%s\"",
-										RelationGetRelationName(relation))));
-				}
-				else /* wait_policy == LockWaitSkip */
-				{
-					if (!ConditionalXactLockTableWait(xwait))
-					{
-						UnlockTupleTuplock(relation, tid, mode);
-						return HeapTupleWouldBlock;
-					}
+					case LockWaitBlock:
+						XactLockTableWait(xwait, relation, &tuple->t_data->t_ctid,
+										  XLTW_Lock);
+						break;
+					case LockWaitSkip:
+						if (!ConditionalXactLockTableWait(xwait))
+						{
+							result = HeapTupleWouldBlock;
+							/* recovery code expects to have buffer lock held */
+							LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+							goto failed;
+						}
+						break;
+					case LockWaitError:
+						if (!ConditionalXactLockTableWait(xwait))
+							ereport(ERROR,
+									(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+									 errmsg("could not obtain lock on row in relation \"%s\"",
+											RelationGetRelationName(relation))));
+						break;
 				}
 
 				/* if there are updates, follow the update chain */
@@ -4597,14 +4603,15 @@ l3:
 failed:
 	if (result != HeapTupleMayBeUpdated)
 	{
-		Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated);
+		Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
+			   result == HeapTupleWouldBlock);
 		Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
 		hufd->ctid = tuple->t_data->t_ctid;
 		hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
 		if (result == HeapTupleSelfUpdated)
 			hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
 		else
-			hufd->cmax = 0;		/* for lack of an InvalidCommandId value */
+			hufd->cmax = InvalidCommandId;
 		LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
 		if (have_tuple_lock)
 			UnlockTupleTuplock(relation, tid, mode);
@@ -6277,11 +6284,10 @@ MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
  */
 static bool
 ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
-						   uint16 infomask, Relation rel, ItemPointer ctid,
-						   XLTW_Oper oper, int *remaining)
+						   uint16 infomask, Relation rel, int *remaining)
 {
 	return Do_MultiXactIdWait(multi, status, infomask, true,
-							  rel, ctid, oper, remaining);
+							  rel, NULL, XLTW_None, remaining);
 }
 
 /*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 71df0c2..0f6e9b0 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -2372,13 +2372,13 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
 			switch (rte->rtekind)
 			{
 				case RTE_RELATION:
-					applyLockingClause(qry, i,
-									   lc->strength, lc->waitPolicy, pushedDown);
+					applyLockingClause(qry, i, lc->strength, lc->waitPolicy,
+									   pushedDown);
 					rte->requiredPerms |= ACL_SELECT_FOR_UPDATE;
 					break;
 				case RTE_SUBQUERY:
-					applyLockingClause(qry, i,
-									   lc->strength, lc->waitPolicy, pushedDown);
+					applyLockingClause(qry, i, lc->strength, lc->waitPolicy,
+									   pushedDown);
 
 					/*
 					 * FOR UPDATE/SHARE of subquery is propagated to all of
@@ -2424,15 +2424,13 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
 					switch (rte->rtekind)
 					{
 						case RTE_RELATION:
-							applyLockingClause(qry, i,
-											   lc->strength, lc->waitPolicy,
-											   pushedDown);
+							applyLockingClause(qry, i, lc->strength,
+											   lc->waitPolicy, pushedDown);
 							rte->requiredPerms |= ACL_SELECT_FOR_UPDATE;
 							break;
 						case RTE_SUBQUERY:
-							applyLockingClause(qry, i,
-											   lc->strength, lc->waitPolicy,
-											   pushedDown);
+							applyLockingClause(qry, i, lc->strength,
+											   lc->waitPolicy, pushedDown);
 							/* see comment above */
 							transformLockingClause(pstate, rte->subquery,
 												   allrels, true);
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 9dbfb40..6d4edcd 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -62,8 +62,8 @@ static void rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation,
 static void rewriteTargetListUD(Query *parsetree, RangeTblEntry *target_rte,
 					Relation target_relation);
 static void markQueryForLocking(Query *qry, Node *jtnode,
-								LockClauseStrength strength, LockWaitPolicy waitPolicy,
-								bool pushedDown);
+					LockClauseStrength strength, LockWaitPolicy waitPolicy,
+					bool pushedDown);
 static List *matchLocks(CmdType event, RuleLock *rulelocks,
 		   int varno, Query *parsetree);
 static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h
index 383df42..f5da6bf 100644
--- a/src/include/parser/analyze.h
+++ b/src/include/parser/analyze.h
@@ -39,7 +39,7 @@ extern bool analyze_requires_snapshot(Node *parseTree);
 extern char *LCS_asString(LockClauseStrength strength);
 extern void CheckSelectLocking(Query *qry, LockClauseStrength strength);
 extern void applyLockingClause(Query *qry, Index rtindex,
-							   LockClauseStrength strength,
-							   LockWaitPolicy waitPolicy, bool pushedDown);
+				   LockClauseStrength strength,
+				   LockWaitPolicy waitPolicy, bool pushedDown);
 
 #endif   /* ANALYZE_H */
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to