diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README
index 4082581..2652cae 100644
--- a/src/backend/access/hash/README
+++ b/src/backend/access/hash/README
@@ -187,19 +187,21 @@ track of available overflow pages.
 
 The reader algorithm is:
 
-	read/sharelock meta page
-    loop:
+	pin meta page and take buffer content lock in shared mode
+	loop:
 		compute bucket number for target hash key
-		release meta page
+		release meta page buffer content lock
 		if (correct bucket page is already locked)
 			break
 		release any existing bucket page lock (if a concurrent split happened)
-		share-lock bucket page
+		take heavyweight bucket lock
+		retake meta page buffer content lock in shared mode
 -- then, per read request:
-	read/sharelock current page of bucket
+	release pin on metapage
+	read current page of bucket and take shared buffer content lock
 		step to next page if necessary (no chaining of locks)
 	get tuple
-	release current page
+	release buffer content lock and pin on current page
 -- at scan shutdown:
 	release bucket share-lock
 
@@ -225,24 +227,26 @@ as it was before.
 
 The insertion algorithm is rather similar:
 
-	read/sharelock meta page
-    loop:
+	pin meta page and take buffer content lock in shared mode
+	loop:
 		compute bucket number for target hash key
-		release meta page
+		release meta page buffer content lock
 		if (correct bucket page is already locked)
 			break
 		release any existing bucket page lock (if a concurrent split happened)
-		share-lock bucket page
+		take heavyweight bucket lock in shared mode
+		retake meta page buffer content lock in shared mode
 -- (so far same as reader)
-	read/exclusive-lock current page of bucket
+	release pin on metapage
+	pin current page of bucket and take exclusive buffer content lock
 	if full, release, read/exclusive-lock next page; repeat as needed
 	>> see below if no space in any page of bucket
 	insert tuple at appropriate place in page
-	write/release current page
-	release bucket share-lock
-	read/exclusive-lock meta page
+	mark current page dirty and release buffer content lock and pin
+	release heavyweight share-lock
+	pin meta page and take buffer content lock in shared mode
 	increment tuple count, decide if split needed
-	write/release meta page
+	mark meta page dirty and release buffer content lock and pin
 	done if no split needed, else enter Split algorithm below
 
 To speed searches, the index entries within any individual index page are
@@ -267,17 +271,15 @@ index is overfull (has a higher-than-wanted ratio of tuples to buckets).
 The algorithm attempts, but does not necessarily succeed, to split one
 existing bucket in two, thereby lowering the fill ratio:
 
-	exclusive-lock page 0 (assert the right to begin a split)
-	read/exclusive-lock meta page
+	pin meta page and take buffer content lock in exclusive mode
 	check split still needed
-	if split not needed anymore, drop locks and exit
+	if split not needed anymore, drop buffer content lock and pin and exit
 	decide which bucket to split
 	Attempt to X-lock old bucket number (definitely could fail)
 	Attempt to X-lock new bucket number (shouldn't fail, but...)
-	if above fail, drop locks and exit
+	if above fail, drop locks and pin and exit
 	update meta page to reflect new number of buckets
-	write/release meta page
-	release X-lock on page 0
+	mark meta page dirty and release buffer content lock and pin
 	-- now, accesses to all other buckets can proceed.
 	Perform actual split of bucket, moving tuples as needed
 	>> see below about acquiring needed extra space
@@ -313,20 +315,20 @@ go-round.
 The fourth operation is garbage collection (bulk deletion):
 
 	next bucket := 0
-	read/sharelock meta page
+	pin metapage and take buffer content lock in exclusive mode
 	fetch current max bucket number
-	release meta page
+	release meta page buffer content lock and pin
 	while next bucket <= max bucket do
 		Acquire X lock on target bucket
 		Scan and remove tuples, compact free space as needed
 		Release X lock
 		next bucket ++
 	end loop
-	exclusive-lock meta page
+	pin metapage and take buffer content lock in exclusive mode
 	check if number of buckets changed
-	if so, release lock and return to for-each-bucket loop
+	if so, release content lock and pin and return to for-each-bucket loop
 	else update metapage tuple count
-	write/release meta page
+	mark meta page dirty and release buffer content lock and pin
 
 Note that this is designed to allow concurrent splits.  If a split occurs,
 tuples relocated into the new bucket will be visited twice by the scan,
@@ -357,25 +359,25 @@ overflow page to the free pool.
 
 Obtaining an overflow page:
 
-	read/exclusive-lock meta page
+	take metapage content lock in exclusive mode
 	determine next bitmap page number; if none, exit loop
-	release meta page lock
-	read/exclusive-lock bitmap page
+	release meta page content lock
+	pin bitmap page and take content lock in exclusive mode
 	search for a free page (zero bit in bitmap)
 	if found:
 		set bit in bitmap
-		write/release bitmap page
-		read/exclusive-lock meta page
+		mark bitmap page dirty and release content lock
+		take metapage buffer content lock in exclusive mode
 		if first-free-bit value did not change,
-			update it and write meta page
-		release meta page
+			update it and mark meta page dirty
+		release meta page buffer content lock
 		return page number
 	else (not found):
-	release bitmap page
+	release bitmap page buffer content lock
 	loop back to try next bitmap page, if any
 -- here when we have checked all bitmap pages; we hold meta excl. lock
 	extend index to add another overflow page; update meta information
-	write/release meta page
+	mark meta page dirty and release buffer content lock
 	return page number
 
 It is slightly annoying to release and reacquire the metapage lock
@@ -425,17 +427,17 @@ algorithm is:
 
 	delink overflow page from bucket chain
 	(this requires read/update/write/release of fore and aft siblings)
-	read/share-lock meta page
+	pin meta page and take buffer content lock in shared mode
 	determine which bitmap page contains the free space bit for page
-	release meta page
-	read/exclusive-lock bitmap page
+	relase meta page buffer content lock
+	pin bitmap page and take buffer content lock in exclusie mode
 	update bitmap bit
-	write/release bitmap page
+	mark bitmap page dirty and release buffer content lock and pin
 	if page number is less than what we saw as first-free-bit in meta:
-	read/exclusive-lock meta page
+	retake meta page buffer content lock in exclusive mode
 	if page number is still less than first-free-bit,
-		update first-free-bit field and write meta page
-	release meta page
+		update first-free-bit field and mark meta page dirty
+	release meta page buffer content lock and pin
 
 We have to do it this way because we must clear the bitmap bit before
 changing the first-free-bit field (hashm_firstfree).  It is possible that
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index c0b6eb0..3d067e7 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -559,12 +559,6 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	if (_hash_has_active_scan(rel, old_bucket))
 		goto fail;
 
-	/*
-	 * It's normally a bad idea to grab a heavyweight lock while holding
-	 * a buffer content lock, both because of deadlock risk and because
-	 * content locks should be held only briefly.  But since we are only
-	 * trylocking here it should be OK.
-	 */
 	if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE))
 		goto fail;
 
@@ -581,12 +575,6 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	if (_hash_has_active_scan(rel, new_bucket))
 		elog(ERROR, "scan in progress on supposedly new bucket");
 
-	/*
-	 * It's normally a bad idea to grab a heavyweight lock while holding
-	 * a buffer content lock, both because of deadlock risk and because
-	 * content locks should be held only briefly.  But since we are only
-	 * trylocking here it should be OK.
-	 */
 	if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))
 		elog(ERROR, "could not get lock on supposedly new bucket");
 
