Here is another preliminary result I would like to share. As always you
will find corresponding patch in attachment. It has work in progress
quality.

The idea was suggested by colleague of mine Aleksander Lebedev.

freeList is partitioned like in "no lock" patch. When there is no
enough free items in a freeList we borrow AN items from a global list.
When freeList become too large we return AN items back to global list.
This global list is also partitioned into PN partitions. Each partition
is protected by a spinlock. 

This way we have less lock contention than in "lwlock" or "spinlock
array" versions since we borrow multiple free elements, not one a time.
Also in worst case only AN*(NUM_LOCK_PARTITIONS-1) free items are not
used instead of (Total/NUM_LOCK_PARTITIONS)*(NUM_LOCK_PARTITIONS-1).

On 60-core server amount of TPS depends on AN and PN like this:

       |        |        |        |        |        |        
       | AN = 1 | AN = 2 | AN = 4 | AN = 8 | AN =16 | AN =32 
-------|--------|--------|--------|--------|--------|--------
       |  733.0 | 1120.6 | 1605.5 | 1842.5 | 1545.5 | 1237.0 
PN = 1 |  740.3 | 1127.0 | 1634.2 | 1800.8 | 1573.5 | 1245.1 
       |  742.9 | 1102.1 | 1647.2 | 1853.6 | 1533.4 | 1251.9 
-------|--------|--------|--------|--------|--------|--------
       | 1052.0 | 1438.1 | 1755.6 | 1981.0 | 2022.0 | 1816.8 
PN = 2 | 1044.8 | 1453.1 | 1784.0 | 1958.3 | 2033.2 | 1819.2 
       | 1028.7 | 1419.8 | 1809.2 | 1981.2 | 2028.2 | 1790.2 
-------|--------|--------|--------|--------|--------|--------
       | 1182.0 | 1521.5 | 1813.2 | 1932.6 | 2035.2 | 1948.4 
PN = 4 | 1212.4 | 1535.4 | 1816.8 | 1927.0 | 2018.7 | 2014.6 
       | 1189.4 | 1528.9 | 1816.9 | 1942.6 | 2011.9 | 2018.3 
-------|--------|--------|--------|--------|--------|--------
       | 1148.1 | 1522.2 | 1795.4 | 1926.6 | 2031.7 | 2015.6 
PN = 8 | 1175.6 | 1529.4 | 1807.6 | 1913.5 | 2007.3 | 2062.0 
       | 1169.9 | 1528.0 | 1796.3 | 1926.0 | 2011.1 | 2042.8 
-------|--------|--------|--------|--------|--------|--------
       | 1117.7 | 1491.0 | 1803.9 | 1925.3 | 2029.4 | 2056.2 
PN =16 | 1132.8 | 1481.0 | 1809.6 | 1968.1 | 2033.8 | 2068.5 
       | 1131.4 | 1481.8 | 1819.4 | 1946.2 | 2071.1 | 2073.8 

AN = GLOBAL_FREE_LIST_ALLOC_NUMBER
PN = GLOBAL_FREE_LIST_PARTITIONS_NUM

There is no performance degradation on Core i7. By increasing PN or AN
any further we don't gain any more TPS.

As you may see this version is about 30% faster than "lwlock" or
"spinlock array" and 3.1 times faster than master. Still it's about 2.5
slower than "no locks" version which I find frustrating.

Next I will try to speedup this version by modifying global_free_list_*
procedures. Current implementations are not most efficient ones. Also
I'm planning to explore approaches which involve lock free algorithms.

I would like to know your opinion about such approach. For instance
could we spare AN*(NUM_LOCK_PARTITIONS-1) items in a worst case or we
can't by same reason we can't do it for (Total / NUM_LOCK_PARTITIONS) *
(NUM_LOCK_PARTITIONS-1)?
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 78f15f0..91fcc05 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -265,7 +265,7 @@ InitShmemIndex(void)
  */
 HTAB *
 ShmemInitHash(const char *name, /* table string name for shmem index */
-			  long init_size,	/* initial table size */
+			  long init_size,	/* initial table size */ // AALEKSEEV: is ignored, refactor!
 			  long max_size,	/* max size of the table */
 			  HASHCTL *infoP,	/* info about key and bucket size */
 			  int hash_flags)	/* info about infoP */
@@ -299,7 +299,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
 	/* Pass location of hashtable header to hash_create */
 	infoP->hctl = (HASHHDR *) location;
 
-	return hash_create(name, init_size, infoP, hash_flags);
+	return hash_create(name, max_size, infoP, hash_flags);
 }
 
 /*
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index eacffc4..8375c3b 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -87,6 +87,8 @@
 #include "access/xact.h"
 #include "storage/shmem.h"
 #include "storage/spin.h"
+#include "storage/lock.h"
+#include "storage/lwlock.h"
 #include "utils/dynahash.h"
 #include "utils/memutils.h"
 
@@ -118,6 +120,13 @@ typedef HASHELEMENT *HASHBUCKET;
 /* A hash segment is an array of bucket headers */
 typedef HASHBUCKET *HASHSEGMENT;
 
+// AALEKSEEV: TODO: comment, should be power of two
+#define GLOBAL_FREE_LIST_PARTITIONS_NUM 16
+#define GLOBAL_FREE_LIST_PARTITIONS_MASK (GLOBAL_FREE_LIST_PARTITIONS_NUM-1)
+
+// AALEKSEEV: TODO: comment
+#define GLOBAL_FREE_LIST_ALLOC_NUMBER 16
+
 /*
  * Header structure for a hash table --- contains all changeable info
  *
@@ -129,11 +138,20 @@ typedef HASHBUCKET *HASHSEGMENT;
 struct HASHHDR
 {
 	/* In a partitioned table, take this lock to touch nentries or freeList */
-	slock_t		mutex;			/* unused if not partitioned table */
+	slock_t		mutex[GLOBAL_FREE_LIST_PARTITIONS_NUM];			/* unused if not partitioned table */
+	HASHELEMENT * globalFreeList[GLOBAL_FREE_LIST_PARTITIONS_NUM];
+
+	// AALEKSEEV: fix comments
 
 	/* These fields change during entry addition/deletion */
-	long		nentries;		/* number of entries in hash table */
-	HASHELEMENT *freeList;		/* linked list of free elements */
+	/* number of entries in hash table */
+	long		nentries[NUM_LOCK_PARTITIONS];
+
+	// AALEKSEEV: TODO: comment
+	long		totalAllocated[NUM_LOCK_PARTITIONS];
+
+	/* linked list of free elements */
+	HASHELEMENT *freeList[NUM_LOCK_PARTITIONS];
 
 	/* These fields can change, but not in a partitioned table */
 	/* Also, dsize can't change in a shared table, even if unpartitioned */
@@ -166,6 +184,9 @@ struct HASHHDR
 
 #define IS_PARTITIONED(hctl)  ((hctl)->num_partitions != 0)
 
+// AALEKSEEV: add comment
+#define PARTITION_IDX(hctl, hashcode) (IS_PARTITIONED(hctl) ? LockHashPartition(hashcode) : 0)
+
 /*
  * Top control structure for a hashtable --- in a shared table, each backend
  * has its own copy (OK since no fields change at runtime)
@@ -219,10 +240,10 @@ static long hash_accesses,
  */
 static void *DynaHashAlloc(Size size);
 static HASHSEGMENT seg_alloc(HTAB *hashp);
-static bool element_alloc(HTAB *hashp, int nelem);
+static bool element_alloc(HTAB *hashp, int nelem, int glob_part_idx);
 static bool dir_realloc(HTAB *hashp);
 static bool expand_table(HTAB *hashp);
-static HASHBUCKET get_hash_entry(HTAB *hashp);
+static HASHBUCKET get_hash_entry(HTAB *hashp, int partition_idx);
 static void hdefault(HTAB *hashp);
 static int	choose_nelem_alloc(Size entrysize);
 static bool init_htab(HTAB *hashp, long nelem);
@@ -260,6 +281,109 @@ string_compare(const char *key1, const char *key2, Size keysize)
 	return strncmp(key1, key2, keysize - 1);
 }
 
+// AALEKSEEV: acquire elements - WE KNOW WE NEED THEM.
+// return true on success, false on failure
+static bool
+global_free_list_acquire_from_partition(HTAB* hashp, int partition_idx, int glob_partition_idx)
+{
+	int nallocated;
+	HASHELEMENT *firstElement,
+				*lastElement;
+	HASHHDR    *hctl = hashp->hctl;
+
+	Assert(IS_PARTITIONED(hctl));
+	Assert(hctl->freeList[partition_idx] == NULL);
+	Assert(hctl->nentries[partition_idx] == hctl->totalAllocated[partition_idx]);
+
+	SpinLockAcquire(&hctl->mutex[glob_partition_idx]);
+	firstElement = hctl->globalFreeList[glob_partition_idx];
+	if(!firstElement)
+	{
+		SpinLockRelease(&hctl->mutex[glob_partition_idx]);
+		return false;
+	}
+
+	nallocated = 1;
+	lastElement = firstElement;
+	while(nallocated < GLOBAL_FREE_LIST_ALLOC_NUMBER)
+	{
+		if(!lastElement->link)
+			break;
+
+		lastElement = lastElement->link;
+		nallocated++;
+	}
+
+	hctl->globalFreeList[glob_partition_idx] = lastElement->link;
+	SpinLockRelease(&hctl->mutex[glob_partition_idx]);
+
+	hctl->totalAllocated[partition_idx] += nallocated;
+	lastElement->link = hctl->freeList[partition_idx];
+	hctl->freeList[partition_idx] = firstElement;
+
+	return true;
+}
+
+static bool
+global_free_list_acquire(HTAB* hashp, int partition_idx)
+{
+	int idx,
+		start = partition_idx & GLOBAL_FREE_LIST_PARTITIONS_MASK;
+
+	idx = start;
+	do {
+		if(global_free_list_acquire_from_partition(hashp, partition_idx, idx))
+			return true;
+		idx = (idx + 1) & GLOBAL_FREE_LIST_PARTITIONS_MASK;
+	} while(idx != start);
+
+	return false;
+}
+
+// AALEKSEEV: relesase elements IF POSSIBLE.
+static void
+global_free_list_release(HTAB* hashp, int partition_idx)
+{
+	int nreleased;
+	int global_free_list_part = partition_idx & GLOBAL_FREE_LIST_PARTITIONS_MASK;
+	HASHELEMENT *releasedFirst,
+				*releasedLast;
+	HASHHDR    *hctl = hashp->hctl;
+
+	if(!IS_PARTITIONED(hctl))
+		return;
+
+	/* keep at least GLOBAL_FREE_LIST_ALLOC_NUMBER items in each freeList */
+	if(hctl->totalAllocated[partition_idx] <= GLOBAL_FREE_LIST_ALLOC_NUMBER)
+		return;
+
+	if(hctl->totalAllocated[partition_idx] - hctl->nentries[partition_idx] <= GLOBAL_FREE_LIST_ALLOC_NUMBER)
+		return;
+
+	Assert(hctl->freeList[partition_idx] != NULL);
+
+	releasedFirst = hctl->freeList[partition_idx];
+	releasedLast = releasedFirst;
+	nreleased = 1;
+
+	while(nreleased < GLOBAL_FREE_LIST_ALLOC_NUMBER)
+	{
+		Assert(releasedLast->link != NULL);
+
+		releasedLast = releasedLast->link;
+		nreleased++;
+	}
+
+	Assert(releasedLast->link != NULL);
+	hctl->freeList[partition_idx] = releasedLast->link;
+	hctl->totalAllocated[partition_idx] -= nreleased;
+
+	SpinLockAcquire(&hctl->mutex[global_free_list_part]);
+	releasedLast->link = hctl->globalFreeList[global_free_list_part];
+	hctl->globalFreeList[global_free_list_part] = releasedFirst;
+	SpinLockRelease(&hctl->mutex[global_free_list_part]);
+}
+
 
 /************************** CREATE ROUTINES **********************/
 
@@ -282,6 +406,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 {
 	HTAB	   *hashp;
 	HASHHDR    *hctl;
+	int 		i, nelem_alloc;
 
 	/*
 	 * For shared hash tables, we have a local hash header (HTAB struct) that
@@ -408,7 +533,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 		if (!hashp->hctl)
 			ereport(ERROR,
 					(errcode(ERRCODE_OUT_OF_MEMORY),
-					 errmsg("out of memory")));
+					 errmsg("out of memory (3)"))); // AALEKSEEV: fix string
 	}
 
 	hashp->frozen = false;
@@ -482,10 +607,31 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 	if ((flags & HASH_SHARED_MEM) ||
 		nelem < hctl->nelem_alloc)
 	{
-		if (!element_alloc(hashp, (int) nelem))
-			ereport(ERROR,
-					(errcode(ERRCODE_OUT_OF_MEMORY),
-					 errmsg("out of memory")));
+		if(IS_PARTITIONED(hashp->hctl))
+		{
+			nelem_alloc = nelem / GLOBAL_FREE_LIST_PARTITIONS_NUM;
+			if(nelem_alloc == 0)
+				nelem_alloc = 1;
+
+			Assert(nelem_alloc > 0);
+			for(i = 0; i < GLOBAL_FREE_LIST_PARTITIONS_NUM; i++)
+				if (!element_alloc(hashp, nelem_alloc, i))
+					ereport(ERROR,
+							(errcode(ERRCODE_OUT_OF_MEMORY),
+							 errmsg("out of memory (1.1)"))); // AALEKSEEV: fix string
+
+
+			for(i = 0; i < NUM_LOCK_PARTITIONS; i++)
+				global_free_list_acquire(hashp, i);
+		}
+		else
+		{
+			if (!element_alloc(hashp, nelem, 0))
+				ereport(ERROR,
+						(errcode(ERRCODE_OUT_OF_MEMORY),
+						 errmsg("out of memory (1.2)"))); // AALEKSEEV: fix string
+
+		}
 	}
 
 	if (flags & HASH_FIXED_SIZE)
@@ -503,8 +649,9 @@ hdefault(HTAB *hashp)
 
 	MemSet(hctl, 0, sizeof(HASHHDR));
 
-	hctl->nentries = 0;
-	hctl->freeList = NULL;
+	// AALEKSEEV: redundant!
+	// hctl->nentries = 0;
+	// hctl->freeList = NULL;
 
 	hctl->dsize = DEF_DIRSIZE;
 	hctl->nsegs = 0;
@@ -572,12 +719,14 @@ init_htab(HTAB *hashp, long nelem)
 	HASHSEGMENT *segp;
 	int			nbuckets;
 	int			nsegs;
+	int i;
 
 	/*
 	 * initialize mutex if it's a partitioned table
 	 */
 	if (IS_PARTITIONED(hctl))
-		SpinLockInit(&hctl->mutex);
+		for(i = 0; i < GLOBAL_FREE_LIST_PARTITIONS_NUM; i++)
+			SpinLockInit(&(hctl->mutex[i]));
 
 	/*
 	 * Divide number of elements by the fill factor to determine a desired
@@ -648,7 +797,8 @@ init_htab(HTAB *hashp, long nelem)
 			"HIGH MASK       ", hctl->high_mask,
 			"LOW  MASK       ", hctl->low_mask,
 			"NSEGS           ", hctl->nsegs,
-			"NENTRIES        ", hctl->nentries);
+			// AALEKSEEV: fix this
+			"NENTRIES        ", hctl->nentries[0]);
 #endif
 	return true;
 }
@@ -769,7 +919,8 @@ hash_stats(const char *where, HTAB *hashp)
 			where, hashp->hctl->accesses, hashp->hctl->collisions);
 
 	fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
-			hashp->hctl->nentries, (long) hashp->hctl->keysize,
+		// AALEKSEEV: fix this
+			hashp->hctl->nentries[0], (long) hashp->hctl->keysize,
 			hashp->hctl->max_bucket, hashp->hctl->nsegs);
 	fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
 			where, hash_accesses, hash_collisions);
@@ -863,6 +1014,7 @@ hash_search_with_hash_value(HTAB *hashp,
 	HASHBUCKET	currBucket;
 	HASHBUCKET *prevBucketPtr;
 	HashCompareFunc match;
+	int partition_idx = PARTITION_IDX(hctl, hashvalue);
 
 #if HASH_STATISTICS
 	hash_accesses++;
@@ -885,7 +1037,7 @@ hash_search_with_hash_value(HTAB *hashp,
 		 * order of these tests is to try to check cheaper conditions first.
 		 */
 		if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
-			hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+			hctl->nentries[0] / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
 			!has_seq_scans(hashp))
 			(void) expand_table(hashp);
 	}
@@ -942,21 +1094,25 @@ hash_search_with_hash_value(HTAB *hashp,
 			if (currBucket != NULL)
 			{
 				/* if partitioned, must lock to touch nentries and freeList */
-				if (IS_PARTITIONED(hctl))
-					SpinLockAcquire(&hctl->mutex);
+				// AALEKSEEV: remove this
+				// if (IS_PARTITIONED(hctl))
+					// SpinLockAcquire(&hctl->mutex);
 
-				Assert(hctl->nentries > 0);
-				hctl->nentries--;
+				Assert(hctl->nentries[partition_idx] > 0);
+				hctl->nentries[partition_idx]--;
 
 				/* remove record from hash bucket's chain. */
 				*prevBucketPtr = currBucket->link;
 
 				/* add the record to the freelist for this table.  */
-				currBucket->link = hctl->freeList;
-				hctl->freeList = currBucket;
+				currBucket->link = hctl->freeList[partition_idx];
+				hctl->freeList[partition_idx] = currBucket;
 
-				if (IS_PARTITIONED(hctl))
-					SpinLockRelease(&hctl->mutex);
+				global_free_list_release(hashp, partition_idx);
+
+				// AALEKSEEV: remove this
+				// if (IS_PARTITIONED(hctl))
+					// SpinLockRelease(&hctl->mutex);
 
 				/*
 				 * better hope the caller is synchronizing access to this
@@ -982,7 +1138,7 @@ hash_search_with_hash_value(HTAB *hashp,
 				elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
 					 hashp->tabname);
 
-			currBucket = get_hash_entry(hashp);
+			currBucket = get_hash_entry(hashp, partition_idx);
 			if (currBucket == NULL)
 			{
 				/* out of memory */
@@ -996,7 +1152,7 @@ hash_search_with_hash_value(HTAB *hashp,
 				else
 					ereport(ERROR,
 							(errcode(ERRCODE_OUT_OF_MEMORY),
-							 errmsg("out of memory")));
+							 errmsg("out of memory (2)"))); // AALEKSEEV: fix string
 			}
 
 			/* link into hashbucket chain */
@@ -1175,39 +1331,39 @@ hash_update_hash_key(HTAB *hashp,
  * create a new entry if possible
  */
 static HASHBUCKET
-get_hash_entry(HTAB *hashp)
+get_hash_entry(HTAB *hashp, int partition_idx)
 {
 	HASHHDR *hctl = hashp->hctl;
 	HASHBUCKET	newElement;
+	bool alloc_result;
 
 	for (;;)
 	{
 		/* if partitioned, must lock to touch nentries and freeList */
-		if (IS_PARTITIONED(hctl))
-			SpinLockAcquire(&hctl->mutex);
+		// if (IS_PARTITIONED(hctl))
+			// SpinLockAcquire(&hctl->mutex);
 
 		/* try to get an entry from the freelist */
-		newElement = hctl->freeList;
+		newElement = hctl->freeList[partition_idx];
 		if (newElement != NULL)
 			break;
 
 		/* no free elements.  allocate another chunk of buckets */
-		if (IS_PARTITIONED(hctl))
-			SpinLockRelease(&hctl->mutex);
+		if(IS_PARTITIONED(hctl))
+			alloc_result = global_free_list_acquire(hashp, partition_idx);
+		else
+			alloc_result = element_alloc(hashp, hctl->nelem_alloc, 0);
 
-		if (!element_alloc(hashp, hctl->nelem_alloc))
-		{
-			/* out of memory */
-			return NULL;
-		}
+		if(!alloc_result)
+			return NULL; /* out of memory */
 	}
 
 	/* remove entry from freelist, bump nentries */
-	hctl->freeList = newElement->link;
-	hctl->nentries++;
+	hctl->freeList[partition_idx] = newElement->link;
+	hctl->nentries[partition_idx]++;
 
-	if (IS_PARTITIONED(hctl))
-		SpinLockRelease(&hctl->mutex);
+	// if (IS_PARTITIONED(hctl))
+		// SpinLockRelease(&hctl->mutex);
 
 	return newElement;
 }
@@ -1218,11 +1374,21 @@ get_hash_entry(HTAB *hashp)
 long
 hash_get_num_entries(HTAB *hashp)
 {
+	int i;
+	long sum = hashp->hctl->nentries[0];
+	
 	/*
 	 * We currently don't bother with the mutex; it's only sensible to call
 	 * this function if you've got lock on all partitions of the table.
 	 */
-	return hashp->hctl->nentries;
+
+	if(!IS_PARTITIONED(hashp->hctl))
+		return sum;
+
+	for(i = 1; i < NUM_LOCK_PARTITIONS; i++)
+		sum += hashp->hctl->nentries[i];
+
+	return sum;
 }
 
 /*
@@ -1530,7 +1696,7 @@ seg_alloc(HTAB *hashp)
  * allocate some new elements and link them into the free list
  */
 static bool
-element_alloc(HTAB *hashp, int nelem)
+element_alloc(HTAB *hashp, int nelem, int glob_part_idx)
 {
 	HASHHDR *hctl = hashp->hctl;
 	Size		elementSize;
@@ -1562,15 +1728,23 @@ element_alloc(HTAB *hashp, int nelem)
 	}
 
 	/* if partitioned, must lock to touch freeList */
-	if (IS_PARTITIONED(hctl))
-		SpinLockAcquire(&hctl->mutex);
+	// if (IS_PARTITIONED(hctl))
+		// SpinLockAcquire(&hctl->mutex);
 
 	/* freelist could be nonempty if two backends did this concurrently */
-	firstElement->link = hctl->freeList;
-	hctl->freeList = prevElement;
+	if(IS_PARTITIONED(hctl))
+	{
+		firstElement->link = hctl->globalFreeList[glob_part_idx];
+		hctl->globalFreeList[glob_part_idx] = prevElement;
+	}
+	else
+	{
+		firstElement->link = hctl->freeList[0];
+		hctl->freeList[0] = prevElement;
+	}
 
-	if (IS_PARTITIONED(hctl))
-		SpinLockRelease(&hctl->mutex);
+	// if (IS_PARTITIONED(hctl))
+		// SpinLockRelease(&hctl->mutex);
 
 	return true;
 }
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index ff34529..051f032 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -128,13 +128,14 @@ extern char *MainLWLockNames[];
  * having this file include lock.h or bufmgr.h would be backwards.
  */
 
-/* Number of partitions of the shared buffer mapping hashtable */
-#define NUM_BUFFER_PARTITIONS  128
-
 /* Number of partitions the shared lock tables are divided into */
-#define LOG2_NUM_LOCK_PARTITIONS  4
+#define LOG2_NUM_LOCK_PARTITIONS  4 // 4
 #define NUM_LOCK_PARTITIONS  (1 << LOG2_NUM_LOCK_PARTITIONS)
 
+ /* Number of partitions of the shared buffer mapping hashtable */
+ // AALEKSEEV: refactor
+#define NUM_BUFFER_PARTITIONS NUM_LOCK_PARTITIONS
+
 /* Number of partitions the shared predicate lock tables are divided into */
 #define LOG2_NUM_PREDICATELOCK_PARTITIONS  4
 #define NUM_PREDICATELOCK_PARTITIONS  (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to