Alvaro Herrera írta:
> Boszormenyi Zoltan wrote:
>
>   
>> The vague consensus for syntax options was that the GUC
>> 'lock_timeout' and WAIT [N] extension (wherever NOWAIT
>> is allowed) both should be implemented.
>>
>> Behaviour would be that N seconds timeout should be
>> applied to every lock that the statement would take.
>>     
>
> In http://archives.postgresql.org/message-id/291.1242053...@sss.pgh.pa.us
> Tom argues that lock_timeout should be sufficient.  I'm not sure what
> does WAIT [N] buy

Okay, we implemented only the lock_timeout GUC.
Patch attached, hopefully in an acceptable form.
Documentation included in the patch, lock_timeout
works the same way as statement_timeout, takes
value in milliseconds and 0 disables the timeout.

Best regards,
Zoltán Böszörményi

-- 
Bible has answers for everything. Proof:
"But let your communication be, Yea, yea; Nay, nay: for whatsoever is more
than these cometh of evil." (Matthew 5:37) - basics of digital technology.
"May your kingdom come" - superficial description of plate tectonics

----------------------------------
Zoltán Böszörményi
Cybertec Schönig & Schönig GmbH
http://www.postgresql.at/

diff -dcrpN pgsql.orig/doc/src/sgml/config.sgml pgsql/doc/src/sgml/config.sgml
*** pgsql.orig/doc/src/sgml/config.sgml	2009-07-17 07:50:48.000000000 +0200
--- pgsql/doc/src/sgml/config.sgml	2009-07-30 13:12:07.000000000 +0200
*************** COPY postgres_log FROM '/full/path/to/lo
*** 4018,4023 ****
--- 4018,4046 ----
        </listitem>
       </varlistentry>
  
+      <varlistentry id="guc-lock-timeout" xreflabel="lock_timeout">
+       <term><varname>lock_timeout</varname> (<type>integer</type>)</term>
+       <indexterm>
+        <primary><varname>lock_timeout</> configuration parameter</primary>
+       </indexterm>
+       <listitem>
+        <para>
+         Abort any statement that tries to lock any rows or tables and the lock
+         has to wait more than the specified number of milliseconds, starting
+         from the time the command arrives at the server from the client.
+         If <varname>log_min_error_statement</> is set to <literal>ERROR</> or
+         lower, the statement that timed out will also be logged.
+         A value of zero (the default) turns off the limitation.
+        </para>
+ 
+        <para>
+         Setting <varname>lock_timeout</> in
+         <filename>postgresql.conf</> is not recommended because it
+         affects all sessions.
+        </para>
+       </listitem>
+      </varlistentry>
+ 
       <varlistentry id="guc-vacuum-freeze-table-age" xreflabel="vacuum_freeze_table_age">
        <term><varname>vacuum_freeze_table_age</varname> (<type>integer</type>)</term>
        <indexterm>
diff -dcrpN pgsql.orig/doc/src/sgml/ref/lock.sgml pgsql/doc/src/sgml/ref/lock.sgml
*** pgsql.orig/doc/src/sgml/ref/lock.sgml	2009-01-16 11:44:56.000000000 +0100
--- pgsql/doc/src/sgml/ref/lock.sgml	2009-07-30 13:29:07.000000000 +0200
*************** where <replaceable class="PARAMETER">loc
*** 39,46 ****
     <literal>NOWAIT</literal> is specified, <command>LOCK
     TABLE</command> does not wait to acquire the desired lock: if it
     cannot be acquired immediately, the command is aborted and an
!    error is emitted.  Once obtained, the lock is held for the
!    remainder of the current transaction.  (There is no <command>UNLOCK
     TABLE</command> command; locks are always released at transaction
     end.)
    </para>
--- 39,49 ----
     <literal>NOWAIT</literal> is specified, <command>LOCK
     TABLE</command> does not wait to acquire the desired lock: if it
     cannot be acquired immediately, the command is aborted and an
!    error is emitted. If <varname>lock_timeout</varname> is set to a value
!    higher than 0, and the lock cannot be acquired under the specified
!    timeout value in milliseconds, the command is aborted and an error
!    is emitted. Once obtained, the lock is held for the remainder of
!    the current transaction.  (There is no <command>UNLOCK
     TABLE</command> command; locks are always released at transaction
     end.)
    </para>
diff -dcrpN pgsql.orig/doc/src/sgml/ref/select.sgml pgsql/doc/src/sgml/ref/select.sgml
*** pgsql.orig/doc/src/sgml/ref/select.sgml	2009-05-04 11:00:49.000000000 +0200
--- pgsql/doc/src/sgml/ref/select.sgml	2009-07-30 13:36:57.000000000 +0200
*************** FOR SHARE [ OF <replaceable class="param
*** 1101,1106 ****
--- 1101,1114 ----
     </para>
  
     <para>
+     If <literal>NOWAIT</> option is not specified and <varname>lock_timeout</varname>
+     is set to a value higher than 0, and the lock needs to wait more than
+     the specified value in milliseconds, the command reports an error after
+     timing out, rather than waiting indefinitely. The note in the previous
+     paragraph applies to the <varname>lock_timeout</varname>, too.
+    </para>
+ 
+    <para>
      <literal>FOR SHARE</literal> behaves similarly, except that it
      acquires a shared rather than exclusive lock on each retrieved
      row.  A shared lock blocks other transactions from performing
diff -dcrpN pgsql.orig/src/backend/access/heap/heapam.c pgsql/src/backend/access/heap/heapam.c
*** pgsql.orig/src/backend/access/heap/heapam.c	2009-06-13 18:24:46.000000000 +0200
--- pgsql/src/backend/access/heap/heapam.c	2009-07-30 12:29:17.000000000 +0200
*************** l3:
*** 3142,3157 ****
  		 */
  		if (!have_tuple_lock)
  		{
  			if (nowait)
! 			{
! 				if (!ConditionalLockTuple(relation, tid, tuple_lock_type))
! 					ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
! 			}
! 			else
! 				LockTuple(relation, tid, tuple_lock_type);
  			have_tuple_lock = true;
  		}
  
--- 3142,3160 ----
  		 */
  		if (!have_tuple_lock)
  		{
+ 			bool	result;
+ 
  			if (nowait)
! 				result = ConditionalLockTuple(relation, tid, tuple_lock_type);
! 			else
! 				result = TimedLockTuple(relation, tid, tuple_lock_type);
! 
! 			if (!result)
! 				ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
! 
  			have_tuple_lock = true;
  		}
  
*************** l3:
*** 3172,3188 ****
  		}
  		else if (infomask & HEAP_XMAX_IS_MULTI)
  		{
  			/* wait for multixact to end */
  			if (nowait)
! 			{
! 				if (!ConditionalMultiXactIdWait((MultiXactId) xwait))
! 					ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
- 			}
- 			else
- 				MultiXactIdWait((MultiXactId) xwait);
  
  			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
  
--- 3175,3191 ----
  		}
  		else if (infomask & HEAP_XMAX_IS_MULTI)
  		{
+ 			bool	result;
  			/* wait for multixact to end */
  			if (nowait)
! 				result = ConditionalMultiXactIdWait((MultiXactId) xwait);
! 			else
! 				result = TimedMultiXactIdWait((MultiXactId) xwait);
! 			if (!result)
! 				ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
  
  			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
  
*************** l3:
*** 3207,3223 ****
  		}
  		else
  		{
  			/* wait for regular transaction to end */
  			if (nowait)
! 			{
! 				if (!ConditionalXactLockTableWait(xwait))
! 					ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
- 			}
- 			else
- 				XactLockTableWait(xwait);
  
  			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
  
--- 3210,3226 ----
  		}
  		else
  		{
+ 			bool	result;
  			/* wait for regular transaction to end */
  			if (nowait)
! 				result = ConditionalXactLockTableWait(xwait);
! 			else
! 				result = TimedXactLockTableWait(xwait);
! 			if (!result)
! 				ereport(ERROR,
  							(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					errmsg("could not obtain lock on row in relation \"%s\"",
  						   RelationGetRelationName(relation))));
  
  			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
  
diff -dcrpN pgsql.orig/src/backend/access/transam/multixact.c pgsql/src/backend/access/transam/multixact.c
*** pgsql.orig/src/backend/access/transam/multixact.c	2009-07-13 11:16:21.000000000 +0200
--- pgsql/src/backend/access/transam/multixact.c	2009-07-30 12:31:18.000000000 +0200
*************** ConditionalMultiXactIdWait(MultiXactId m
*** 636,641 ****
--- 636,678 ----
  }
  
  /*
+  * TimedMultiXactIdWait
+  *		As above, but only lock if we can get the lock under lock_timeout.
+  */
+ bool
+ TimedMultiXactIdWait(MultiXactId multi)
+ {
+ 	bool		result = true;
+ 	TransactionId *members;
+ 	int			nmembers;
+ 
+ 	nmembers = GetMultiXactIdMembers(multi, &members);
+ 
+ 	if (nmembers >= 0)
+ 	{
+ 		int			i;
+ 
+ 		for (i = 0; i < nmembers; i++)
+ 		{
+ 			TransactionId member = members[i];
+ 
+ 			debug_elog4(DEBUG2, "ConditionalMultiXactIdWait: trying %d (%u)",
+ 						i, member);
+ 			if (!TransactionIdIsCurrentTransactionId(member))
+ 			{
+ 				result = TimedXactLockTableWait(member);
+ 				if (!result)
+ 					break;
+ 			}
+ 		}
+ 
+ 		pfree(members);
+ 	}
+ 
+ 	return result;
+ }
+ 
+ /*
   * CreateMultiXactId
   *		Make a new MultiXactId
   *
diff -dcrpN pgsql.orig/src/backend/commands/lockcmds.c pgsql/src/backend/commands/lockcmds.c
*** pgsql.orig/src/backend/commands/lockcmds.c	2009-06-13 18:24:48.000000000 +0200
--- pgsql/src/backend/commands/lockcmds.c	2009-07-30 11:09:49.000000000 +0200
*************** LockTableRecurse(Oid reloid, RangeVar *r
*** 65,70 ****
--- 65,71 ----
  {
  	Relation	rel;
  	AclResult	aclresult;
+ 	bool		result;
  
  	/*
  	 * Acquire the lock.  We must do this first to protect against concurrent
*************** LockTableRecurse(Oid reloid, RangeVar *r
*** 72,97 ****
  	 * won't fail.
  	 */
  	if (nowait)
  	{
! 		if (!ConditionalLockRelationOid(reloid, lockmode))
! 		{
! 			/* try to throw error by name; relation could be deleted... */
! 			char	   *relname = rv ? rv->relname : get_rel_name(reloid);
  
! 			if (relname)
! 				ereport(ERROR,
  						(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  						 errmsg("could not obtain lock on relation \"%s\"",
  								relname)));
! 			else
! 				ereport(ERROR,
  						(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					  errmsg("could not obtain lock on relation with OID %u",
  							 reloid)));
- 		}
  	}
- 	else
- 		LockRelationOid(reloid, lockmode);
  
  	/*
  	 * Now that we have the lock, check to see if the relation really exists
--- 73,97 ----
  	 * won't fail.
  	 */
  	if (nowait)
+ 		result = ConditionalLockRelationOid(reloid, lockmode);
+ 	else
+ 		result = TimedLockRelationOid(reloid, lockmode);
+ 	if (!result)
  	{
! 		/* try to throw error by name; relation could be deleted... */
! 		char	   *relname = rv ? rv->relname : get_rel_name(reloid);
  
! 		if (relname)
! 			ereport(ERROR,
  						(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  						 errmsg("could not obtain lock on relation \"%s\"",
  								relname)));
! 		else
! 			ereport(ERROR,
  						(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
  					  errmsg("could not obtain lock on relation with OID %u",
  							 reloid)));
  	}
  
  	/*
  	 * Now that we have the lock, check to see if the relation really exists
diff -dcrpN pgsql.orig/src/backend/port/posix_sema.c pgsql/src/backend/port/posix_sema.c
*** pgsql.orig/src/backend/port/posix_sema.c	2009-06-13 18:24:55.000000000 +0200
--- pgsql/src/backend/port/posix_sema.c	2009-07-30 10:37:20.000000000 +0200
***************
*** 24,29 ****
--- 24,30 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/proc.h" /* for LockTimeout */
  
  
  #ifdef USE_NAMED_POSIX_SEMAPHORES
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 313,315 ****
--- 314,359 ----
  
  	return true;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore only if able to do so under the lock_timeout
+  */
+ bool
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	int			errStatus;
+ 	struct timespec timeout;
+ 	
+ 
+ 	/*
+ 	 * See notes in sysv_sema.c's implementation of PGSemaphoreLock. Just as
+ 	 * that code does for semop(), we handle both the case where sem_wait()
+ 	 * returns errno == EINTR after a signal, and the case where it just keeps
+ 	 * waiting.
+ 	 */
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 		if (LockTimeout)
+ 		{
+ 			timeout.tv_sec = LockTimeout / 1000;
+ 			timeout.tv_nsec = (LockTimeout % 1000) * 1000000;
+ 			errStatus = sem_timedwait(PG_SEM_REF(sema), &timeout);
+ 		}
+ 		else
+ 			errStatus = sem_wait(PG_SEM_REF(sema));
+ 		ImmediateInterruptOK = false;
+ 	} while (errStatus < 0 && errno == EINTR);
+ 
+ 	if (errStatus < 0)
+ 	{
+ 		if (errno == ETIMEDOUT)
+ 			return false;		/* failed to lock it */
+ 		/* Otherwise we got trouble */
+ 		elog(FATAL, "sem_wait failed: %m");
+ 	}
+ 	return true;
+ }
diff -dcrpN pgsql.orig/src/backend/port/sysv_sema.c pgsql/src/backend/port/sysv_sema.c
*** pgsql.orig/src/backend/port/sysv_sema.c	2009-06-13 18:24:55.000000000 +0200
--- pgsql/src/backend/port/sysv_sema.c	2009-07-30 10:37:37.000000000 +0200
***************
*** 30,35 ****
--- 30,36 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/proc.h" /* for LockTimeout */
  
  
  #ifndef HAVE_UNION_SEMUN
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 497,499 ****
--- 498,590 ----
  
  	return true;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore only if able to do so under the lock_timeout
+  */
+ bool
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	int			errStatus;
+ 	struct sembuf sops;
+ 	struct timespec timeout;
+ 
+ 	sops.sem_op = -1;			/* decrement */
+ 	sops.sem_flg = 0;
+ 	sops.sem_num = sema->semNum;
+ 
+ 	/*
+ 	 * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ 	 * from the operation prematurely because we were sent a signal.  So we
+ 	 * try and lock the semaphore again.
+ 	 *
+ 	 * Each time around the loop, we check for a cancel/die interrupt.	On
+ 	 * some platforms, if such an interrupt comes in while we are waiting, it
+ 	 * will cause the semop() call to exit with errno == EINTR, allowing us to
+ 	 * service the interrupt (if not in a critical section already) during the
+ 	 * next loop iteration.
+ 	 *
+ 	 * Once we acquire the lock, we do NOT check for an interrupt before
+ 	 * returning.  The caller needs to be able to record ownership of the lock
+ 	 * before any interrupt can be accepted.
+ 	 *
+ 	 * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
+ 	 * and entering the semop() call.  If a cancel/die interrupt occurs in
+ 	 * that window, we would fail to notice it until after we acquire the lock
+ 	 * (or get another interrupt to escape the semop()).  We can avoid this
+ 	 * problem by temporarily setting ImmediateInterruptOK to true before we
+ 	 * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
+ 	 * execute directly.  However, there is a huge pitfall: there is another
+ 	 * window of a few instructions after the semop() before we are able to
+ 	 * reset ImmediateInterruptOK.	If an interrupt occurs then, we'll lose
+ 	 * control, which means that the lock has been acquired but our caller did
+ 	 * not get a chance to record the fact. Therefore, we only set
+ 	 * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
+ 	 * caller does not need to record acquiring the lock.  (This is currently
+ 	 * true for lockmanager locks, since the process that granted us the lock
+ 	 * did all the necessary state updates. It's not true for SysV semaphores
+ 	 * used to implement LW locks or emulate spinlocks --- but the wait time
+ 	 * for such locks should not be very long, anyway.)
+ 	 *
+ 	 * On some platforms, signals marked SA_RESTART (which is most, for us)
+ 	 * will not interrupt the semop(); it will just keep waiting.  Therefore
+ 	 * it's necessary for cancel/die interrupts to be serviced directly by the
+ 	 * signal handler.	On these platforms the behavior is really the same
+ 	 * whether the signal arrives just before the semop() begins, or while it
+ 	 * is waiting.	The loop on EINTR is thus important only for other types
+ 	 * of interrupts.
+ 	 */
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 		if (LockTimeout)
+ 		{
+ 			timeout.tv_sec = LockTimeout / 1000;
+ 			timeout.tv_nsec = (LockTimeout % 1000) * 1000000;
+ 			errStatus = semtimedop(sema->semId, &sops, 1, &timeout);
+ 		}
+ 		else
+ 			errStatus = semop(sema->semId, &sops, 1);
+ 		ImmediateInterruptOK = false;
+ 	} while (errStatus < 0 && errno == EINTR);
+ 
+ 	if (errStatus < 0)
+ 	{
+ 		/* Expect EAGAIN or EWOULDBLOCK (platform-dependent) */
+ #ifdef EAGAIN
+ 		if (errno == EAGAIN)
+ 			return false;		/* failed to lock it */
+ #endif
+ #if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN))
+ 		if (errno == EWOULDBLOCK)
+ 			return false;		/* failed to lock it */
+ #endif
+ 		/* Otherwise we got trouble */
+ 		elog(FATAL, "semop(id=%d) failed: %m", sema->semId);
+ 	}
+ 	return true;
+ }
+ 
diff -dcrpN pgsql.orig/src/backend/port/win32_sema.c pgsql/src/backend/port/win32_sema.c
*** pgsql.orig/src/backend/port/win32_sema.c	2009-06-13 18:24:55.000000000 +0200
--- pgsql/src/backend/port/win32_sema.c	2009-07-30 10:37:57.000000000 +0200
***************
*** 16,21 ****
--- 16,22 ----
  #include "miscadmin.h"
  #include "storage/ipc.h"
  #include "storage/pg_sema.h"
+ #include "storage/proc.h" /* for LockTimeout */
  
  static HANDLE *mySemSet;		/* IDs of sema sets acquired so far */
  static int	numSems;			/* number of sema sets acquired so far */
*************** PGSemaphoreTryLock(PGSemaphore sema)
*** 205,207 ****
--- 206,267 ----
  	/* keep compiler quiet */
  	return false;
  }
+ 
+ /*
+  * PGSemaphoreTimedLock
+  *
+  * Lock a semaphore only if able to do so under the lock_timeout
+  * Serve the interrupt if interruptOK is true.
+  */
+ bool
+ PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK)
+ {
+ 	DWORD		ret;
+ 	HANDLE		wh[2];
+ 
+ 	wh[0] = *sema;
+ 	wh[1] = pgwin32_signal_event;
+ 
+ 	/*
+ 	 * As in other implementations of PGSemaphoreLock, we need to check for
+ 	 * cancel/die interrupts each time through the loop.  But here, there is
+ 	 * no hidden magic about whether the syscall will internally service a
+ 	 * signal --- we do that ourselves.
+ 	 */
+ 	do
+ 	{
+ 		ImmediateInterruptOK = interruptOK;
+ 		CHECK_FOR_INTERRUPTS();
+ 
+ 		errno = 0;
+ 		ret = WaitForMultipleObjectsEx(2, wh, FALSE, LockTimeout ? LockTimeout : INFINITE, TRUE);
+ 
+ 		if (ret == WAIT_OBJECT_0)
+ 		{
+ 			/* We got it! */
+ 			return true;
+ 		}
+ 		else if (ret == WAIT_TIMEOUT)
+ 		{
+ 			/* Can't get it */
+ 			errno = EAGAIN;
+ 			return false;
+ 		}
+ 		else if (ret == WAIT_OBJECT_0 + 1)
+ 		{
+ 			/* Signal event is set - we have a signal to deliver */
+ 			pgwin32_dispatch_queued_signals();
+ 			errno = EINTR;
+ 		}
+ 		else
+ 			/* Otherwise we are in trouble */
+ 			errno = EIDRM;
+ 
+ 		ImmediateInterruptOK = false;
+ 	} while (errno == EINTR);
+ 
+ 	if (errno != 0)
+ 		ereport(FATAL,
+ 				(errmsg("could not lock semaphore: error code %d", (int) GetLastError())));
+ }
+ 
diff -dcrpN pgsql.orig/src/backend/storage/lmgr/lmgr.c pgsql/src/backend/storage/lmgr/lmgr.c
*** pgsql.orig/src/backend/storage/lmgr/lmgr.c	2009-01-02 17:15:28.000000000 +0100
--- pgsql/src/backend/storage/lmgr/lmgr.c	2009-07-30 12:28:44.000000000 +0200
***************
*** 21,26 ****
--- 21,27 ----
  #include "catalog/catalog.h"
  #include "miscadmin.h"
  #include "storage/lmgr.h"
+ #include "storage/proc.h"
  #include "storage/procarray.h"
  #include "utils/inval.h"
  
*************** LockRelationOid(Oid relid, LOCKMODE lock
*** 76,82 ****
  
  	SetLocktagRelationOid(&tag, relid);
  
! 	res = LockAcquire(&tag, lockmode, false, false);
  
  	/*
  	 * Now that we have the lock, check for invalidation messages, so that we
--- 77,83 ----
  
  	SetLocktagRelationOid(&tag, relid);
  
! 	res = LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  
  	/*
  	 * Now that we have the lock, check for invalidation messages, so that we
*************** ConditionalLockRelationOid(Oid relid, LO
*** 108,114 ****
  
  	SetLocktagRelationOid(&tag, relid);
  
! 	res = LockAcquire(&tag, lockmode, false, true);
  
  	if (res == LOCKACQUIRE_NOT_AVAIL)
  		return false;
--- 109,144 ----
  
  	SetLocktagRelationOid(&tag, relid);
  
! 	res = LockAcquire(&tag, lockmode, false, true, INFINITE_TIMEOUT);
! 
! 	if (res == LOCKACQUIRE_NOT_AVAIL)
! 		return false;
! 
! 	/*
! 	 * Now that we have the lock, check for invalidation messages; see notes
! 	 * in LockRelationOid.
! 	 */
! 	if (res != LOCKACQUIRE_ALREADY_HELD)
! 		AcceptInvalidationMessages();
! 
! 	return true;
! }
! 
! /*
!  *		LockTimeoutRelationOid
!  *
!  * As LockRelationOid, but only lock if we can under lock_timeout.
!  * Returns TRUE iff the lock was acquired.
!  */
! bool
! TimedLockRelationOid(Oid relid, LOCKMODE lockmode)
! {
! 	LOCKTAG		tag;
! 	LockAcquireResult res;
! 
! 	SetLocktagRelationOid(&tag, relid);
! 
! 	res = LockAcquire(&tag, lockmode, false, false, LockTimeout);
  
  	if (res == LOCKACQUIRE_NOT_AVAIL)
  		return false;
*************** LockRelation(Relation relation, LOCKMODE
*** 171,177 ****
  						 relation->rd_lockInfo.lockRelId.dbId,
  						 relation->rd_lockInfo.lockRelId.relId);
  
! 	res = LockAcquire(&tag, lockmode, false, false);
  
  	/*
  	 * Now that we have the lock, check for invalidation messages; see notes
--- 201,207 ----
  						 relation->rd_lockInfo.lockRelId.dbId,
  						 relation->rd_lockInfo.lockRelId.relId);
  
! 	res = LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  
  	/*
  	 * Now that we have the lock, check for invalidation messages; see notes
*************** ConditionalLockRelation(Relation relatio
*** 198,204 ****
  						 relation->rd_lockInfo.lockRelId.dbId,
  						 relation->rd_lockInfo.lockRelId.relId);
  
! 	res = LockAcquire(&tag, lockmode, false, true);
  
  	if (res == LOCKACQUIRE_NOT_AVAIL)
  		return false;
--- 228,234 ----
  						 relation->rd_lockInfo.lockRelId.dbId,
  						 relation->rd_lockInfo.lockRelId.relId);
  
! 	res = LockAcquire(&tag, lockmode, false, true, INFINITE_TIMEOUT);
  
  	if (res == LOCKACQUIRE_NOT_AVAIL)
  		return false;
*************** LockRelationIdForSession(LockRelId *reli
*** 250,256 ****
  
  	SET_LOCKTAG_RELATION(tag, relid->dbId, relid->relId);
  
! 	(void) LockAcquire(&tag, lockmode, true, false);
  }
  
  /*
--- 280,286 ----
  
  	SET_LOCKTAG_RELATION(tag, relid->dbId, relid->relId);
  
! 	(void) LockAcquire(&tag, lockmode, true, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** LockRelationForExtension(Relation relati
*** 285,291 ****
  								relation->rd_lockInfo.lockRelId.dbId,
  								relation->rd_lockInfo.lockRelId.relId);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 315,321 ----
  								relation->rd_lockInfo.lockRelId.dbId,
  								relation->rd_lockInfo.lockRelId.relId);
  
! 	(void) LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** LockPage(Relation relation, BlockNumber 
*** 319,325 ****
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 349,355 ----
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	(void) LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** ConditionalLockPage(Relation relation, B
*** 338,344 ****
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	return (LockAcquire(&tag, lockmode, false, true) != LOCKACQUIRE_NOT_AVAIL);
  }
  
  /*
--- 368,374 ----
  					 relation->rd_lockInfo.lockRelId.relId,
  					 blkno);
  
! 	return (LockAcquire(&tag, lockmode, false, true, INFINITE_TIMEOUT) != LOCKACQUIRE_NOT_AVAIL);
  }
  
  /*
*************** LockTuple(Relation relation, ItemPointer
*** 375,381 ****
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 405,411 ----
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	(void) LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** ConditionalLockTuple(Relation relation, 
*** 395,401 ****
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	return (LockAcquire(&tag, lockmode, false, true) != LOCKACQUIRE_NOT_AVAIL);
  }
  
  /*
--- 425,451 ----
  					  ItemPointerGetBlockNumber(tid),
  					  ItemPointerGetOffsetNumber(tid));
  
! 	return (LockAcquire(&tag, lockmode, false, true, INFINITE_TIMEOUT) != LOCKACQUIRE_NOT_AVAIL);
! }
! 
! /*
!  *		TimedLockTuple
!  *
!  * As above, but only lock if we can get the lock under lock_timeout.
!  * Returns TRUE iff the lock was acquired.
!  */
! bool
! TimedLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode)
! {
! 	LOCKTAG		tag;
! 
! 	SET_LOCKTAG_TUPLE(tag,
! 					  relation->rd_lockInfo.lockRelId.dbId,
! 					  relation->rd_lockInfo.lockRelId.relId,
! 					  ItemPointerGetBlockNumber(tid),
! 					  ItemPointerGetOffsetNumber(tid));
! 
! 	return (LockAcquire(&tag, lockmode, false, false, LockTimeout) != LOCKACQUIRE_NOT_AVAIL);
  }
  
  /*
*************** XactLockTableInsert(TransactionId xid)
*** 429,435 ****
  
  	SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
  }
  
  /*
--- 479,485 ----
  
  	SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** XactLockTableWait(TransactionId xid)
*** 473,479 ****
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		(void) LockAcquire(&tag, ShareLock, false, false);
  
  		LockRelease(&tag, ShareLock, false);
  
--- 523,529 ----
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		(void) LockAcquire(&tag, ShareLock, false, false, INFINITE_TIMEOUT);
  
  		LockRelease(&tag, ShareLock, false);
  
*************** ConditionalXactLockTableWait(Transaction
*** 501,507 ****
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		if (LockAcquire(&tag, ShareLock, false, true) == LOCKACQUIRE_NOT_AVAIL)
  			return false;
  
  		LockRelease(&tag, ShareLock, false);
--- 551,589 ----
  
  		SET_LOCKTAG_TRANSACTION(tag, xid);
  
! 		if (LockAcquire(&tag, ShareLock, false, true, INFINITE_TIMEOUT) == LOCKACQUIRE_NOT_AVAIL)
! 			return false;
! 
! 		LockRelease(&tag, ShareLock, false);
! 
! 		if (!TransactionIdIsInProgress(xid))
! 			break;
! 		xid = SubTransGetParent(xid);
! 	}
! 
! 	return true;
! }
! 
! 
! /*
!  *		TimedXactLockTableWait
!  *
!  * As above, but only lock if we can get the lock under lock_timeout.
!  * Returns TRUE if the lock was acquired.
!  */
! bool
! TimedXactLockTableWait(TransactionId xid)
! {
! 	LOCKTAG		tag;
! 
! 	for (;;)
! 	{
! 		Assert(TransactionIdIsValid(xid));
! 		Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));
! 
! 		SET_LOCKTAG_TRANSACTION(tag, xid);
! 
! 		if (LockAcquire(&tag, ShareLock, false, false, LockTimeout) == LOCKACQUIRE_NOT_AVAIL)
  			return false;
  
  		LockRelease(&tag, ShareLock, false);
*************** VirtualXactLockTableInsert(VirtualTransa
*** 531,537 ****
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false);
  }
  
  /*
--- 613,619 ----
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** VirtualXactLockTableWait(VirtualTransact
*** 549,555 ****
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	(void) LockAcquire(&tag, ShareLock, false, false);
  
  	LockRelease(&tag, ShareLock, false);
  }
--- 631,637 ----
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	(void) LockAcquire(&tag, ShareLock, false, false, INFINITE_TIMEOUT);
  
  	LockRelease(&tag, ShareLock, false);
  }
*************** ConditionalVirtualXactLockTableWait(Virt
*** 569,575 ****
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	if (LockAcquire(&tag, ShareLock, false, true) == LOCKACQUIRE_NOT_AVAIL)
  		return false;
  
  	LockRelease(&tag, ShareLock, false);
--- 651,657 ----
  
  	SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
  
! 	if (LockAcquire(&tag, ShareLock, false, true, INFINITE_TIMEOUT) == LOCKACQUIRE_NOT_AVAIL)
  		return false;
  
  	LockRelease(&tag, ShareLock, false);
*************** LockDatabaseObject(Oid classid, Oid obji
*** 598,604 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  }
  
  /*
--- 680,686 ----
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  }
  
  /*
*************** LockSharedObject(Oid classid, Oid objid,
*** 636,642 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false);
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
--- 718,724 ----
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, false, false, INFINITE_TIMEOUT);
  
  	/* Make sure syscaches are up-to-date with any changes we waited for */
  	AcceptInvalidationMessages();
*************** LockSharedObjectForSession(Oid classid, 
*** 678,684 ****
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, true, false);
  }
  
  /*
--- 760,766 ----
  					   objid,
  					   objsubid);
  
! 	(void) LockAcquire(&tag, lockmode, true, false, INFINITE_TIMEOUT);
  }
  
  /*
diff -dcrpN pgsql.orig/src/backend/storage/lmgr/lock.c pgsql/src/backend/storage/lmgr/lock.c
*** pgsql.orig/src/backend/storage/lmgr/lock.c	2009-06-13 18:24:57.000000000 +0200
--- pgsql/src/backend/storage/lmgr/lock.c	2009-07-30 14:32:24.000000000 +0200
*************** PROCLOCK_PRINT(const char *where, const 
*** 254,260 ****
  static uint32 proclock_hash(const void *key, Size keysize);
  static void RemoveLocalLock(LOCALLOCK *locallock);
  static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
! static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
  static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
  			PROCLOCK *proclock, LockMethod lockMethodTable);
  static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
--- 254,261 ----
  static uint32 proclock_hash(const void *key, Size keysize);
  static void RemoveLocalLock(LOCALLOCK *locallock);
  static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
! static int  WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner,
! 			int lock_timeout);
  static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
  			PROCLOCK *proclock, LockMethod lockMethodTable);
  static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
*************** LockAcquireResult
*** 467,473 ****
  LockAcquire(const LOCKTAG *locktag,
  			LOCKMODE lockmode,
  			bool sessionLock,
! 			bool dontWait)
  {
  	LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
  	LockMethod	lockMethodTable;
--- 468,475 ----
  LockAcquire(const LOCKTAG *locktag,
  			LOCKMODE lockmode,
  			bool sessionLock,
! 			bool dontWait,
! 			int lock_timeout)
  {
  	LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
  	LockMethod	lockMethodTable;
*************** LockAcquire(const LOCKTAG *locktag,
*** 745,750 ****
--- 747,754 ----
  	}
  	else
  	{
+ 		int	wait_result;
+ 
  		Assert(status == STATUS_FOUND);
  
  		/*
*************** LockAcquire(const LOCKTAG *locktag,
*** 794,800 ****
  										 locktag->locktag_type,
  										 lockmode);
  
! 		WaitOnLock(locallock, owner);
  
  		TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
  										locktag->locktag_field2,
--- 798,804 ----
  										 locktag->locktag_type,
  										 lockmode);
  
! 		wait_result = WaitOnLock(locallock, owner, lock_timeout);
  
  		TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
  										locktag->locktag_field2,
*************** LockAcquire(const LOCKTAG *locktag,
*** 813,818 ****
--- 817,847 ----
  		 * Check the proclock entry status, in case something in the ipc
  		 * communication doesn't work correctly.
  		 */
+ 		if (wait_result == STATUS_WAITING)
+ 		{
+ 			if (proclock->holdMask == 0)
+ 			{
+ 				SHMQueueDelete(&proclock->lockLink);
+ 				SHMQueueDelete(&proclock->procLink);
+ 				if (!hash_search_with_hash_value(LockMethodProcLockHash,
+ 												 (void *) &(proclock->tag),
+ 												 proclock_hashcode,
+ 												 HASH_REMOVE,
+ 												 NULL))
+ 					elog(PANIC, "proclock table corrupted");
+ 			}
+ 			else
+ 				PROCLOCK_PRINT("LockAcquire: TIMED OUT", proclock);
+ 			lock->nRequested--;
+ 			lock->requested[lockmode]--;
+ 			LOCK_PRINT("LockAcquire: TIMED OUT", lock, lockmode);
+ 			Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
+ 			Assert(lock->nGranted <= lock->nRequested);
+ 			LWLockRelease(partitionLock);
+ 			if (locallock->nLocks == 0)
+ 				RemoveLocalLock(locallock);
+ 			return LOCKACQUIRE_NOT_AVAIL;
+ 		}
  		if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
  		{
  			PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
*************** GrantAwaitedLock(void)
*** 1105,1118 ****
   * Caller must have set MyProc->heldLocks to reflect locks already held
   * on the lockable object by this process.
   *
   * The appropriate partition lock must be held at entry.
   */
! static void
! WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
  {
  	LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
  	LockMethod	lockMethodTable = LockMethods[lockmethodid];
  	char	   *volatile new_status = NULL;
  
  	LOCK_PRINT("WaitOnLock: sleeping on lock",
  			   locallock->lock, locallock->tag.mode);
--- 1134,1153 ----
   * Caller must have set MyProc->heldLocks to reflect locks already held
   * on the lockable object by this process.
   *
+  * Result: (returns value of ProcSleep())
+  *      STATUS_OK if we acquired the lock
+  *      STATUS_ERROR if not (deadlock)
+  *      STATUS_WAITING if not (timeout)
+  *
   * The appropriate partition lock must be held at entry.
   */
! static int
! WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, int lock_timeout)
  {
  	LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
  	LockMethod	lockMethodTable = LockMethods[lockmethodid];
  	char	   *volatile new_status = NULL;
+ 	int		wait_status;
  
  	LOCK_PRINT("WaitOnLock: sleeping on lock",
  			   locallock->lock, locallock->tag.mode);
*************** WaitOnLock(LOCALLOCK *locallock, Resourc
*** 1154,1161 ****
  	 */
  	PG_TRY();
  	{
! 		if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
  		{
  			/*
  			 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
  			 * now.
--- 1189,1208 ----
  	 */
  	PG_TRY();
  	{
! 		wait_status = ProcSleep(locallock, lockMethodTable, lock_timeout);
! 		switch (wait_status)
  		{
+ 		case STATUS_OK:
+ 			/* Got lock */
+ 			break;
+ 		case STATUS_WAITING:
+ 			/*
+ 			 * We failed as a result of a timeout. Quit now.
+ 			 */
+ 			LOCK_PRINT("WaitOnLock: timeout on lock",
+ 					   locallock->lock, locallock->tag.mode);
+ 			break;
+ 		default:
  			/*
  			 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
  			 * now.
*************** WaitOnLock(LOCALLOCK *locallock, Resourc
*** 1202,1207 ****
--- 1249,1256 ----
  
  	LOCK_PRINT("WaitOnLock: wakeup on lock",
  			   locallock->lock, locallock->tag.mode);
+ 
+ 	return wait_status;
  }
  
  /*
diff -dcrpN pgsql.orig/src/backend/storage/lmgr/lwlock.c pgsql/src/backend/storage/lmgr/lwlock.c
*** pgsql.orig/src/backend/storage/lmgr/lwlock.c	2009-01-02 17:15:28.000000000 +0100
--- pgsql/src/backend/storage/lmgr/lwlock.c	2009-07-30 10:45:39.000000000 +0200
*************** LWLockConditionalAcquire(LWLockId lockid
*** 554,559 ****
--- 554,756 ----
  }
  
  /*
+  * LWLockTimedAcquire - acquire a lightweight lock in the specified mode
+  *
+  * If the lock is not available, sleep until it is or until lock_timeout
+  * whichever is sooner
+  *
+  * Side effect: cancel/die interrupts are held off until lock release.
+  */
+ bool
+ LWLockTimedAcquire(LWLockId lockid, LWLockMode mode)
+ {
+ 	volatile LWLock *lock = &(LWLockArray[lockid].lock);
+ 	PGPROC	   *proc = MyProc;
+ 	bool		retry = false;
+ 	int			extraWaits = 0;
+ 	bool		timeout;
+ 
+ 	PRINT_LWDEBUG("LWLockAcquire", lockid, lock);
+ 
+ #ifdef LWLOCK_STATS
+ 	/* Set up local count state first time through in a given process */
+ 	if (counts_for_pid != MyProcPid)
+ 	{
+ 		int		   *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
+ 		int			numLocks = LWLockCounter[1];
+ 
+ 		sh_acquire_counts = calloc(numLocks, sizeof(int));
+ 		ex_acquire_counts = calloc(numLocks, sizeof(int));
+ 		block_counts = calloc(numLocks, sizeof(int));
+ 		counts_for_pid = MyProcPid;
+ 		on_shmem_exit(print_lwlock_stats, 0);
+ 	}
+ 	/* Count lock acquisition attempts */
+ 	if (mode == LW_EXCLUSIVE)
+ 		ex_acquire_counts[lockid]++;
+ 	else
+ 		sh_acquire_counts[lockid]++;
+ #endif   /* LWLOCK_STATS */
+ 
+ 	/*
+ 	 * We can't wait if we haven't got a PGPROC.  This should only occur
+ 	 * during bootstrap or shared memory initialization.  Put an Assert here
+ 	 * to catch unsafe coding practices.
+ 	 */
+ 	Assert(!(proc == NULL && IsUnderPostmaster));
+ 
+ 	/* Ensure we will have room to remember the lock */
+ 	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
+ 		elog(ERROR, "too many LWLocks taken");
+ 
+ 	/*
+ 	 * Lock out cancel/die interrupts until we exit the code section protected
+ 	 * by the LWLock.  This ensures that interrupts will not interfere with
+ 	 * manipulations of data structures in shared memory.
+ 	 */
+ 	HOLD_INTERRUPTS();
+ 
+ 	/*
+ 	 * Loop here to try to acquire lock after each time we are signaled by
+ 	 * LWLockRelease.
+ 	 *
+ 	 * NOTE: it might seem better to have LWLockRelease actually grant us the
+ 	 * lock, rather than retrying and possibly having to go back to sleep. But
+ 	 * in practice that is no good because it means a process swap for every
+ 	 * lock acquisition when two or more processes are contending for the same
+ 	 * lock.  Since LWLocks are normally used to protect not-very-long
+ 	 * sections of computation, a process needs to be able to acquire and
+ 	 * release the same lock many times during a single CPU time slice, even
+ 	 * in the presence of contention.  The efficiency of being able to do that
+ 	 * outweighs the inefficiency of sometimes wasting a process dispatch
+ 	 * cycle because the lock is not free when a released waiter finally gets
+ 	 * to run.	See pgsql-hackers archives for 29-Dec-01.
+ 	 */
+ 	for (;;)
+ 	{
+ 		bool		mustwait;
+ 
+ 		/* Acquire mutex.  Time spent holding mutex should be short! */
+ 		SpinLockAcquire(&lock->mutex);
+ 
+ 		/* If retrying, allow LWLockRelease to release waiters again */
+ 		if (retry)
+ 			lock->releaseOK = true;
+ 
+ 		/* If I can get the lock, do so quickly. */
+ 		if (mode == LW_EXCLUSIVE)
+ 		{
+ 			if (lock->exclusive == 0 && lock->shared == 0)
+ 			{
+ 				lock->exclusive++;
+ 				mustwait = false;
+ 			}
+ 			else
+ 				mustwait = true;
+ 		}
+ 		else
+ 		{
+ 			if (lock->exclusive == 0)
+ 			{
+ 				lock->shared++;
+ 				mustwait = false;
+ 			}
+ 			else
+ 				mustwait = true;
+ 		}
+ 
+ 		if (!mustwait)
+ 			break;				/* got the lock */
+ 
+ 		/*
+ 		 * Add myself to wait queue.
+ 		 *
+ 		 * If we don't have a PGPROC structure, there's no way to wait. This
+ 		 * should never occur, since MyProc should only be null during shared
+ 		 * memory initialization.
+ 		 */
+ 		if (proc == NULL)
+ 			elog(PANIC, "cannot wait without a PGPROC structure");
+ 
+ 		proc->lwWaiting = true;
+ 		proc->lwExclusive = (mode == LW_EXCLUSIVE);
+ 		proc->lwWaitLink = NULL;
+ 		if (lock->head == NULL)
+ 			lock->head = proc;
+ 		else
+ 			lock->tail->lwWaitLink = proc;
+ 		lock->tail = proc;
+ 
+ 		/* Can release the mutex now */
+ 		SpinLockRelease(&lock->mutex);
+ 
+ 		/*
+ 		 * Wait until awakened.
+ 		 *
+ 		 * Since we share the process wait semaphore with the regular lock
+ 		 * manager and ProcWaitForSignal, and we may need to acquire an LWLock
+ 		 * while one of those is pending, it is possible that we get awakened
+ 		 * for a reason other than being signaled by LWLockRelease. If so,
+ 		 * loop back and wait again.  Once we've gotten the LWLock,
+ 		 * re-increment the sema by the number of additional signals received,
+ 		 * so that the lock manager or signal manager will see the received
+ 		 * signal when it next waits.
+ 		 */
+ 		LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
+ 
+ #ifdef LWLOCK_STATS
+ 		block_counts[lockid]++;
+ #endif
+ 
+ 		TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
+ 
+ 		for (;;)
+ 		{
+ 			/* "false" means cannot accept cancel/die interrupt here. */
+ 			timeout = !PGSemaphoreTimedLock(&proc->sem, false);
+ 			if (timeout)
+ 				break;
+ 			if (!proc->lwWaiting)
+ 				break;
+ 			extraWaits++;
+ 		}
+ 
+ 		TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
+ 
+ 		if (timeout)
+ 		{
+ 			LOG_LWDEBUG("LWLockTimedAcquire", lockid, "timed out");
+ 			break;
+ 		}
+ 
+ 		LOG_LWDEBUG("LWLockTimedAcquire", lockid, "awakened");
+ 
+ 		/* Now loop back and try to acquire lock again. */
+ 		retry = true;
+ 	}
+ 
+ 	/* We are done updating shared state of the lock itself. */
+ 	SpinLockRelease(&lock->mutex);
+ 
+ 	if (timeout)
+ 		goto out;
+ 
+ 	TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode);
+ 
+ 	/* Add lock to list of locks held by this backend */
+ 	held_lwlocks[num_held_lwlocks++] = lockid;
+ 
+ out:
+ 	/*
+ 	 * Fix the process wait semaphore's count for any absorbed wakeups.
+ 	 */
+ 	while (extraWaits-- > 0)
+ 		PGSemaphoreUnlock(&proc->sem);
+ 
+ 	return !timeout;
+ }
+ 
+ /*
   * LWLockRelease - release a previously acquired lock
   */
  void
diff -dcrpN pgsql.orig/src/backend/storage/lmgr/proc.c pgsql/src/backend/storage/lmgr/proc.c
*** pgsql.orig/src/backend/storage/lmgr/proc.c	2009-06-13 18:24:57.000000000 +0200
--- pgsql/src/backend/storage/lmgr/proc.c	2009-07-30 11:35:54.000000000 +0200
***************
*** 50,55 ****
--- 50,56 ----
  /* GUC variables */
  int			DeadlockTimeout = 1000;
  int			StatementTimeout = 0;
+ int			LockTimeout = 0;
  bool		log_lock_waits = false;
  
  /* Pointer to this process's PGPROC struct, if any */
*************** ProcQueueInit(PROC_QUEUE *queue)
*** 717,723 ****
   * The lock table's partition lock must be held at entry, and will be held
   * at exit.
   *
!  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
   *
   * ASSUME: that no one will fiddle with the queue until after
   *		we release the partition lock.
--- 718,727 ----
   * The lock table's partition lock must be held at entry, and will be held
   * at exit.
   *
!  * Result:
!  *	STATUS_OK if we acquired the lock
!  *	STATUS_ERROR if not (deadlock)
!  *	STATUS_WAITING if not (timeout)
   *
   * ASSUME: that no one will fiddle with the queue until after
   *		we release the partition lock.
*************** ProcQueueInit(PROC_QUEUE *queue)
*** 728,734 ****
   * semaphore is normally zero, so when we try to acquire it, we sleep.
   */
  int
! ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
  {
  	LOCKMODE	lockmode = locallock->tag.mode;
  	LOCK	   *lock = locallock->lock;
--- 732,738 ----
   * semaphore is normally zero, so when we try to acquire it, we sleep.
   */
  int
! ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, int lock_timeout)
  {
  	LOCKMODE	lockmode = locallock->tag.mode;
  	LOCK	   *lock = locallock->lock;
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 889,895 ****
  	 */
  	do
  	{
! 		PGSemaphoreLock(&MyProc->sem, true);
  
  		/*
  		 * waitStatus could change from STATUS_WAITING to something else
--- 893,902 ----
  	 */
  	do
  	{
! 		if (lock_timeout == INFINITE_TIMEOUT)
! 			PGSemaphoreLock(&MyProc->sem, true);
! 		else if (!PGSemaphoreTimedLock(&MyProc->sem, true))
! 			break;
  
  		/*
  		 * waitStatus could change from STATUS_WAITING to something else
*************** ProcSleep(LOCALLOCK *locallock, LockMeth
*** 906,912 ****
  		{
  			PGPROC	   *autovac = GetBlockingAutoVacuumPgproc();
  
! 			LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
  
  			/*
  			 * Only do it if the worker is not working to protect against Xid
--- 913,922 ----
  		{
  			PGPROC	   *autovac = GetBlockingAutoVacuumPgproc();
  
! 			if (lock_timeout == INFINITE_TIMEOUT)
! 				LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
! 			else if (!LWLockTimedAcquire(ProcArrayLock, LW_EXCLUSIVE))
! 				break;
  
  			/*
  			 * Only do it if the worker is not working to protect against Xid
diff -dcrpN pgsql.orig/src/backend/utils/adt/lockfuncs.c pgsql/src/backend/utils/adt/lockfuncs.c
*** pgsql.orig/src/backend/utils/adt/lockfuncs.c	2009-01-02 17:15:30.000000000 +0100
--- pgsql/src/backend/utils/adt/lockfuncs.c	2009-07-30 11:40:19.000000000 +0200
*************** pg_advisory_lock_int8(PG_FUNCTION_ARGS)
*** 337,343 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 337,343 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false, INFINITE_TIMEOUT);
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_lock_shared_int8(PG_FUNCTION
*** 353,359 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 353,359 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false, INFINITE_TIMEOUT);
  
  	PG_RETURN_VOID();
  }
*************** pg_try_advisory_lock_int8(PG_FUNCTION_AR
*** 372,378 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	res = LockAcquire(&tag, ExclusiveLock, true, true);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
--- 372,378 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	res = LockAcquire(&tag, ExclusiveLock, true, true, INFINITE_TIMEOUT);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
*************** pg_try_advisory_lock_shared_int8(PG_FUNC
*** 391,397 ****
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	res = LockAcquire(&tag, ShareLock, true, true);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
--- 391,397 ----
  
  	SET_LOCKTAG_INT64(tag, key);
  
! 	res = LockAcquire(&tag, ShareLock, true, true, INFINITE_TIMEOUT);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
*************** pg_advisory_lock_int4(PG_FUNCTION_ARGS)
*** 446,452 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 446,452 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ExclusiveLock, true, false, INFINITE_TIMEOUT);
  
  	PG_RETURN_VOID();
  }
*************** pg_advisory_lock_shared_int4(PG_FUNCTION
*** 463,469 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false);
  
  	PG_RETURN_VOID();
  }
--- 463,469 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	(void) LockAcquire(&tag, ShareLock, true, false, INFINITE_TIMEOUT);
  
  	PG_RETURN_VOID();
  }
*************** pg_try_advisory_lock_int4(PG_FUNCTION_AR
*** 483,489 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	res = LockAcquire(&tag, ExclusiveLock, true, true);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
--- 483,489 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	res = LockAcquire(&tag, ExclusiveLock, true, true, INFINITE_TIMEOUT);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
*************** pg_try_advisory_lock_shared_int4(PG_FUNC
*** 503,509 ****
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	res = LockAcquire(&tag, ShareLock, true, true);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
--- 503,509 ----
  
  	SET_LOCKTAG_INT32(tag, key1, key2);
  
! 	res = LockAcquire(&tag, ShareLock, true, true, INFINITE_TIMEOUT);
  
  	PG_RETURN_BOOL(res != LOCKACQUIRE_NOT_AVAIL);
  }
diff -dcrpN pgsql.orig/src/backend/utils/misc/guc.c pgsql/src/backend/utils/misc/guc.c
*** pgsql.orig/src/backend/utils/misc/guc.c	2009-07-23 14:40:12.000000000 +0200
--- pgsql/src/backend/utils/misc/guc.c	2009-07-30 10:08:25.000000000 +0200
*************** static struct config_int ConfigureNamesI
*** 1539,1544 ****
--- 1539,1554 ----
  	},
  
  	{
+ 		{"lock_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT,
+ 			gettext_noop("Sets the maximum allowed timeout for any lock taken by a statement."),
+ 			gettext_noop("A value of 0 turns off the timeout."),
+ 			GUC_UNIT_MS
+ 		},
+ 		&LockTimeout,
+ 		0, 0, INT_MAX, NULL, NULL
+ 	},
+ 
+ 	{
  		{"vacuum_freeze_min_age", PGC_USERSET, CLIENT_CONN_STATEMENT,
  			gettext_noop("Minimum age at which VACUUM should freeze a table row."),
  			NULL
diff -dcrpN pgsql.orig/src/include/access/multixact.h pgsql/src/include/access/multixact.h
*** pgsql.orig/src/include/access/multixact.h	2009-01-02 17:15:37.000000000 +0100
--- pgsql/src/include/access/multixact.h	2009-07-30 12:25:55.000000000 +0200
*************** extern bool MultiXactIdIsRunning(MultiXa
*** 48,53 ****
--- 48,54 ----
  extern bool MultiXactIdIsCurrent(MultiXactId multi);
  extern void MultiXactIdWait(MultiXactId multi);
  extern bool ConditionalMultiXactIdWait(MultiXactId multi);
+ extern bool TimedMultiXactIdWait(MultiXactId multi);
  extern void MultiXactIdSetOldestMember(void);
  extern int	GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids);
  
diff -dcrpN pgsql.orig/src/include/storage/lmgr.h pgsql/src/include/storage/lmgr.h
*** pgsql.orig/src/include/storage/lmgr.h	2009-06-13 18:25:05.000000000 +0200
--- pgsql/src/include/storage/lmgr.h	2009-07-30 12:25:19.000000000 +0200
*************** extern void RelationInitLockInfo(Relatio
*** 25,30 ****
--- 25,31 ----
  /* Lock a relation */
  extern void LockRelationOid(Oid relid, LOCKMODE lockmode);
  extern bool ConditionalLockRelationOid(Oid relid, LOCKMODE lockmode);
+ extern bool TimedLockRelationOid(Oid relid, LOCKMODE lockmode);
  extern void UnlockRelationId(LockRelId *relid, LOCKMODE lockmode);
  extern void UnlockRelationOid(Oid relid, LOCKMODE lockmode);
  
*************** extern void UnlockPage(Relation relation
*** 48,53 ****
--- 49,56 ----
  extern void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode);
  extern bool ConditionalLockTuple(Relation relation, ItemPointer tid,
  					 LOCKMODE lockmode);
+ extern bool TimedLockTuple(Relation relation, ItemPointer tid,
+ 					 LOCKMODE lockmode);
  extern void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode);
  
  /* Lock an XID (used to wait for a transaction to finish) */
*************** extern void XactLockTableInsert(Transact
*** 55,60 ****
--- 58,64 ----
  extern void XactLockTableDelete(TransactionId xid);
  extern void XactLockTableWait(TransactionId xid);
  extern bool ConditionalXactLockTableWait(TransactionId xid);
+ extern bool TimedXactLockTableWait(TransactionId xid);
  
  /* Lock a VXID (used to wait for a transaction to finish) */
  extern void VirtualXactLockTableInsert(VirtualTransactionId vxid);
diff -dcrpN pgsql.orig/src/include/storage/lock.h pgsql/src/include/storage/lock.h
*** pgsql.orig/src/include/storage/lock.h	2009-04-14 10:28:46.000000000 +0200
--- pgsql/src/include/storage/lock.h	2009-07-30 11:28:44.000000000 +0200
*************** extern uint32 LockTagHashCode(const LOCK
*** 476,482 ****
  extern LockAcquireResult LockAcquire(const LOCKTAG *locktag,
  			LOCKMODE lockmode,
  			bool sessionLock,
! 			bool dontWait);
  extern bool LockRelease(const LOCKTAG *locktag,
  			LOCKMODE lockmode, bool sessionLock);
  extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks);
--- 476,483 ----
  extern LockAcquireResult LockAcquire(const LOCKTAG *locktag,
  			LOCKMODE lockmode,
  			bool sessionLock,
! 			bool dontWait,
! 			int lock_timeout);
  extern bool LockRelease(const LOCKTAG *locktag,
  			LOCKMODE lockmode, bool sessionLock);
  extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks);
diff -dcrpN pgsql.orig/src/include/storage/lwlock.h pgsql/src/include/storage/lwlock.h
*** pgsql.orig/src/include/storage/lwlock.h	2009-03-04 10:27:30.000000000 +0100
--- pgsql/src/include/storage/lwlock.h	2009-07-30 11:36:44.000000000 +0200
*************** extern bool Trace_lwlocks;
*** 92,97 ****
--- 92,98 ----
  extern LWLockId LWLockAssign(void);
  extern void LWLockAcquire(LWLockId lockid, LWLockMode mode);
  extern bool LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode);
+ extern bool LWLockTimedAcquire(LWLockId lockid, LWLockMode mode);
  extern void LWLockRelease(LWLockId lockid);
  extern void LWLockReleaseAll(void);
  extern bool LWLockHeldByMe(LWLockId lockid);
diff -dcrpN pgsql.orig/src/include/storage/pg_sema.h pgsql/src/include/storage/pg_sema.h
*** pgsql.orig/src/include/storage/pg_sema.h	2009-01-02 17:15:39.000000000 +0100
--- pgsql/src/include/storage/pg_sema.h	2009-07-30 10:36:19.000000000 +0200
*************** extern void PGSemaphoreUnlock(PGSemaphor
*** 80,83 ****
--- 80,86 ----
  /* Lock a semaphore only if able to do so without blocking */
  extern bool PGSemaphoreTryLock(PGSemaphore sema);
  
+ /* Lock a semaphore only if able to do so under the lock_timeout */
+ extern bool PGSemaphoreTimedLock(PGSemaphore sema, bool interruptOK);
+ 
  #endif   /* PG_SEMA_H */
diff -dcrpN pgsql.orig/src/include/storage/proc.h pgsql/src/include/storage/proc.h
*** pgsql.orig/src/include/storage/proc.h	2009-02-26 12:23:28.000000000 +0100
--- pgsql/src/include/storage/proc.h	2009-07-30 11:15:01.000000000 +0200
*************** typedef struct PROC_HDR
*** 146,155 ****
--- 146,158 ----
   */
  #define NUM_AUXILIARY_PROCS		3
  
+ /* For checking LockTimeout */
+ #define INFINITE_TIMEOUT		0
  
  /* configurable options */
  extern int	DeadlockTimeout;
  extern int	StatementTimeout;
+ extern int	LockTimeout;
  extern bool log_lock_waits;
  
  extern volatile bool cancel_from_timeout;
*************** extern bool HaveNFreeProcs(int n);
*** 168,174 ****
  extern void ProcReleaseLocks(bool isCommit);
  
  extern void ProcQueueInit(PROC_QUEUE *queue);
! extern int	ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable);
  extern PGPROC *ProcWakeup(PGPROC *proc, int waitStatus);
  extern void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock);
  extern void LockWaitCancel(void);
--- 171,177 ----
  extern void ProcReleaseLocks(bool isCommit);
  
  extern void ProcQueueInit(PROC_QUEUE *queue);
! extern int	ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, int lock_timeout);
  extern PGPROC *ProcWakeup(PGPROC *proc, int waitStatus);
  extern void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock);
  extern void LockWaitCancel(void);
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to