* Andres Freund (and...@2ndquadrant.com) wrote:
> Seems more consistent with the rest of the code too. But anyway, I am
> fine with fixing it either way.

Patch attached which mirrors what GetRunningTransactionData() (the other
function called from LogStandbySnapshot) does, more-or-less- uses a
static variable to keep track of memory allocated for the data being
returned to the caller.

Looks like this will need to be back-patched to 9.0.  Barring objections
or better ideas, I'll do that in a few hours.

        Thanks,

                Stephen
colordiff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
new file mode 100644
index 8cd871f..1fb7884
*** a/src/backend/storage/lmgr/lock.c
--- b/src/backend/storage/lmgr/lock.c
*************** GetLockStatusData(void)
*** 3401,3415 ****
   * Returns a list of currently held AccessExclusiveLocks, for use
   * by GetRunningTransactionData().
   */
  xl_standby_lock *
  GetRunningTransactionLocks(int *nlocks)
  {
  	PROCLOCK   *proclock;
  	HASH_SEQ_STATUS seqstat;
  	int			i;
  	int			index;
! 	int			els;
! 	xl_standby_lock *accessExclusiveLocks;
  
  	/*
  	 * Acquire lock on the entire shared lock data structure.
--- 3401,3445 ----
   * Returns a list of currently held AccessExclusiveLocks, for use
   * by GetRunningTransactionData().
   */
+ #define INIT_STANDBY_LOCK_ARR_SIZE 32	/* Initial return workspace size */
  xl_standby_lock *
  GetRunningTransactionLocks(int *nlocks)
  {
+ 	/* result workspace */
+ 	static xl_standby_locks *accessExclusiveLocks;
+ 
  	PROCLOCK   *proclock;
  	HASH_SEQ_STATUS seqstat;
  	int			i;
  	int			index;
! 
! 	/*
! 	 * Allocating space for all locks is overkill.	We only need space for
! 	 * access exclusive locks, but we can't count the number of locks without
! 	 * locking all of the lock partitions.	We don't really want to do
! 	 * allocations while holding all those locks, so instead do a bit of
! 	 * allocation up front, to hopefully avoid having to do them later.
! 	 */
! 	if (accessExclusiveLocks == NULL)
! 	{
! 		/*
! 		 * First call
! 		 *
! 		 * Note: We are tracking the allocated space for locks in ->nlocks,
! 		 * not the actual number of locks found, that's handled with index
! 		 * below. Also, allocating the structure will start us off with space
! 		 * for one, so discount that from the malloc request.
! 		 */
! 		accessExclusiveLocks = (xl_standby_locks *)
! 			malloc(sizeof(xl_standby_locks) +
! 			   ((INIT_STANDBY_LOCK_ARR_SIZE - 1) * sizeof(xl_standby_lock)));
! 		if (accessExclusiveLocks == NULL)
! 			ereport(ERROR,
! 					(errcode(ERRCODE_OUT_OF_MEMORY),
! 					 errmsg("out of memory")));
! 
! 		accessExclusiveLocks->nlocks = INIT_STANDBY_LOCK_ARR_SIZE;
! 	}
  
  	/*
  	 * Acquire lock on the entire shared lock data structure.
*************** GetRunningTransactionLocks(int *nlocks)
*** 3419,3433 ****
  	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
  		LWLockAcquire(FirstLockMgrLock + i, LW_SHARED);
  
- 	/* Now we can safely count the number of proclocks */
- 	els = hash_get_num_entries(LockMethodProcLockHash);
- 
- 	/*
- 	 * Allocating enough space for all locks in the lock table is overkill,
- 	 * but it's more convenient and faster than having to enlarge the array.
- 	 */
- 	accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
- 
  	/* Now scan the tables to copy the data */
  	hash_seq_init(&seqstat, LockMethodProcLockHash);
  
--- 3449,3454 ----
*************** GetRunningTransactionLocks(int *nlocks)
*** 3459,3467 ****
  			if (!TransactionIdIsValid(xid))
  				continue;
  
! 			accessExclusiveLocks[index].xid = xid;
! 			accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
! 			accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
  
  			index++;
  		}
--- 3480,3508 ----
  			if (!TransactionIdIsValid(xid))
  				continue;
  
! 			/*
! 			 * Check if we need to allocate more space in our workspace due to
! 			 * the number of access exclusive locks actually found.  If we do
! 			 * run out, double the size of our return area to hopefully avoid
! 			 * having to do this again any time soon.
! 			 */
! 			if (index >= accessExclusiveLocks->nlocks)
! 			{
! 				accessExclusiveLocks->nlocks *= 2;
! 
! 				accessExclusiveLocks = (xl_standby_locks *)
! 					realloc(accessExclusiveLocks,
! 							sizeof(xl_standby_locks) +
! 							((accessExclusiveLocks->nlocks - 1) *
! 							 sizeof(xl_standby_lock)));
! 				if (accessExclusiveLocks == NULL)
! 					ereport(ERROR,
! 							(errcode(ERRCODE_OUT_OF_MEMORY),
! 							 errmsg("out of memory")));
! 			}
! 			accessExclusiveLocks->locks[index].xid = xid;
! 			accessExclusiveLocks->locks[index].dbOid = lock->tag.locktag_field1;
! 			accessExclusiveLocks->locks[index].relOid = lock->tag.locktag_field2;
  
  			index++;
  		}
*************** GetRunningTransactionLocks(int *nlocks)
*** 3477,3484 ****
  	for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
  		LWLockRelease(FirstLockMgrLock + i);
  
  	*nlocks = index;
! 	return accessExclusiveLocks;
  }
  
  /* Provide the textual name of any lock mode */
--- 3518,3526 ----
  	for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
  		LWLockRelease(FirstLockMgrLock + i);
  
+ 	/* Pass back out the number of locks actually found. */
  	*nlocks = index;
! 	return accessExclusiveLocks->locks;
  }
  
  /* Provide the textual name of any lock mode */

Attachment: signature.asc
Description: Digital signature

Reply via email to