diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c895876..91e3924 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -254,6 +254,20 @@ static HTAB *LockMethodLockHash;
 static HTAB *LockMethodProcLockHash;
 static HTAB *LockMethodLocalHash;
 
+/* Initial size of local lock hash */
+#define LOCKMETHODLOCALHASH_INIT_SIZE 16
+
+/*
+ * Attempt to shrink the LockMethodLocalHash after this many calls to
+ * LockRelaseAll()
+ */
+#define LOCKMETHODLOCALHASH_SIZE_CHECK_FREQ 10
+
+/*
+ * Counters to track bloat in the LockMethodLocalHash table
+ */
+static unsigned int lock_release_count = 0;
+static uint64 locks_released = 0;
 
 /* private state for error cleanup */
 static LOCALLOCK *StrongLockInProgress;
@@ -339,6 +353,7 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
 #endif							/* not LOCK_DEBUG */
 
 
+static void CreateLocalLockHash(long size, bool copyOldLocks);
 static uint32 proclock_hash(const void *key, Size keysize);
 static void RemoveLocalLock(LOCALLOCK *locallock);
 static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
@@ -441,17 +456,66 @@ InitLocks(void)
 	 * ought to be empty in the postmaster, but for safety let's zap it.)
 	 */
 	if (LockMethodLocalHash)
+	{
 		hash_destroy(LockMethodLocalHash);
+		LockMethodLocalHash = NULL;
+	}
+
+	CreateLocalLockHash(LOCKMETHODLOCALHASH_INIT_SIZE, false);
+}
+
+/*
+ * CreateLocalLockHash
+ *		Build or rebuild the LockMethodLocalHash hash table.  If copyOldLocks
+ *		is true we populate the new table with the locks from the old version
+ *		and then destroy it.
+ */
+static void
+CreateLocalLockHash(long size, bool copyOldLocks)
+{
+	static HTAB *htab;
+	HASHCTL		info;
 
 	info.keysize = sizeof(LOCALLOCKTAG);
 	info.entrysize = sizeof(LOCALLOCK);
 
-	LockMethodLocalHash = hash_create("LOCALLOCK hash",
-									  16,
-									  &info,
-									  HASH_ELEM | HASH_BLOBS);
-}
+	htab = hash_create("LOCALLOCK hash", size, &info, HASH_ELEM | HASH_BLOBS);
+
+	if (copyOldLocks)
+	{
+		HASH_SEQ_STATUS status;
+		LOCALLOCK  *locallock;
+
+		hash_seq_init(&status, LockMethodLocalHash);
+
+		/* scan over the old table and add all the locks into the new table */
+		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		{
+			LOCALLOCK  *new_lock;
+			bool		found;
+
+			new_lock = hash_search(htab,
+								   (void *) &locallock->tag,
+								   HASH_ENTER, &found);
 
+			Assert(!found);
+			memcpy(new_lock, locallock, sizeof(LOCALLOCK));
+		}
+
+		hash_destroy(LockMethodLocalHash);
+	}
+	else
+	{
+		/*
+		 * Ensure that if not copying old locks that the table contains no
+		 * locks.
+		 */
+		Assert(LockMethodLocalHash == NULL ||
+			   hash_get_num_entries(LockMethodLocalHash) == 0);
+	}
+
+	LockMethodLocalHash = htab;
+}
 
 /*
  * Fetch the lock method table associated with a given lock
@@ -2097,6 +2161,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 	PROCLOCK   *proclock;
 	int			partition;
 	bool		have_fast_path_lwlock = false;
+	long		total_locks;
 
 	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
 		elog(ERROR, "unrecognized lock method: %d", lockmethodid);
@@ -2118,6 +2183,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 
 	numLockModes = lockMethodTable->numLockModes;
 
+	total_locks = hash_get_num_entries(LockMethodLocalHash);
+
 	/*
 	 * First we run through the locallock table and get rid of unwanted
 	 * entries, then we scan the process's proclocks and get rid of those. We
@@ -2349,6 +2416,48 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 		LWLockRelease(partitionLock);
 	}							/* loop over partitions */
 
+	/* track average locks */
+	locks_released += total_locks;
+	lock_release_count++;
+
+	/* determine if it's worth shrinking the LockMethodLocalHash table */
+	if (lock_release_count >= LOCKMETHODLOCALHASH_SIZE_CHECK_FREQ)
+	{
+		long		avglocks = (long) locks_released / lock_release_count;
+
+		/*
+		 * The hash_seq_search can become inefficient when the hash table has
+		 * grown significantly larger than the default size due to the backend
+		 * having run queries which obtained a large numbers of locks at once.
+		 * Here we'll check for that and shrink the table if we deem it a
+		 * worthwhile thing to do.
+		 *
+		 * We need only bother checking this if the hash_seq_search is
+		 * possibly becoming inefficient.  We check this by looking if the
+		 * curBucket is larger than the initial size of the table.  We then
+		 * only bother shrinking the table if the average locks for the
+		 * previous few transactions is lower than half this value.
+		 */
+		if (status.curBucket > LOCKMETHODLOCALHASH_INIT_SIZE &&
+			avglocks < status.curBucket / 2)
+		{
+			long		newsize = LOCKMETHODLOCALHASH_INIT_SIZE;
+
+			while (newsize < avglocks)
+				newsize *= 2;
+
+			/*
+			 * If we're releasing all locks then the table will be empty, so
+			 * no need to copy out the old locks into the new table.
+			 */
+			CreateLocalLockHash(newsize, !allLocks);
+		}
+
+		/* Reset the counters */
+		locks_released = 0;
+		lock_release_count = 0;
+	}
+
 #ifdef LOCK_DEBUG
 	if (*(lockMethodTable->trace_flag))
 		elog(LOG, "LockReleaseAll done");
