diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 7612e5b..d7d21b5 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -287,10 +287,10 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
 		/*
 		 * An insertion into the current index page could have happened while
 		 * we didn't have read lock on it.  Re-find our position by looking
-		 * for the TID we previously returned.  (Because we hold pin on the
-		 * bucket, no deletions or splits could have occurred; therefore we
-		 * can expect that the TID still exists in the current index page, at
-		 * an offset >= where we were.)
+		 * for the TID we previously returned.  (Because we hold a pin on the
+		 * primary bucket page, no deletions or splits could have occurred;
+		 * therefore we can expect that the TID still exists in the current
+		 * index page, at an offset >= where we were.)
 		 */
 		OffsetNumber maxoffnum;
 
@@ -569,7 +569,7 @@ loop_top:
 						  local_metapage.hashm_maxbucket,
 						  local_metapage.hashm_highmask,
 						  local_metapage.hashm_lowmask, &tuples_removed,
-						  &num_index_tuples, bucket_has_garbage, true,
+						  &num_index_tuples, bucket_has_garbage,
 						  callback, callback_state);
 
 		_hash_relbuf(rel, bucket_buf);
@@ -656,15 +656,21 @@ hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 /*
  * Helper function to perform deletion of index entries from a bucket.
  *
- * This expects that the caller has acquired a cleanup lock on the target
- * bucket (primary page of a bucket) and it is reponsibility of caller to
- * release that lock.
+ * This function expects that the caller has acquired a cleanup lock on the
+ * primary bucket page, and will with a write lock again held on the primary
+ * bucket page.  The lock won't necessarily be held continuously, though,
+ * because we'll release it when visiting overflow pages.
  *
- * During scan of overflow pages, first we need to lock the next bucket and
- * then release the lock on current bucket.  This ensures that any concurrent
- * scan started after we start cleaning the bucket will always be behind the
- * cleanup.  Allowing scans to cross vacuum will allow it to remove tuples
- * required for sanctity of scan.
+ * It would be very bad if this function cleaned a page while some other
+ * backend was in the midst of scanning it, because hashgettuple assumes
+ * that the next valid TID will be greater than or equal to the current
+ * valid TID.  There can't be any concurrent scans in progress when we first
+ * enter this function because of the cleanup lock we hold on the primary
+ * bucket page, but as soon as we release that lock, there might be.  We
+ * handle that by conspiring to prevent those scans from passing our cleanup
+ * scan.  To do that, we lock the next page in the bucket chain before
+ * releasing the lock on the previous page.  (This type of lock chaining is
+ * not ideal, so we might want to look for a better solution at some point.)
  *
  * We need to retain a pin on the primary bucket to ensure that no concurrent
  * split can start.
@@ -674,7 +680,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 				  BlockNumber bucket_blkno, BufferAccessStrategy bstrategy,
 				  uint32 maxbucket, uint32 highmask, uint32 lowmask,
 				  double *tuples_removed, double *num_index_tuples,
-				  bool bucket_has_garbage, bool delay,
+				  bool bucket_has_garbage,
 				  IndexBulkDeleteCallback callback, void *callback_state)
 {
 	BlockNumber blkno;
@@ -702,8 +708,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 		bool		retain_pin = false;
 		bool		curr_page_dirty = false;
 
-		if (delay)
-			vacuum_delay_point();
+		vacuum_delay_point();
 
 		page = BufferGetPage(buf);
 		opaque = (HashPageOpaque) PageGetSpecialPointer(page);
@@ -714,17 +719,20 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 			 offno <= maxoffno;
 			 offno = OffsetNumberNext(offno))
 		{
-			IndexTuple	itup;
 			ItemPointer htup;
+			ItemId		itemid;
+			IndexTuple	itup;
 			Bucket		bucket;
+			bool		kill_tuple = false;
 
-			itup = (IndexTuple) PageGetItem(page,
-											PageGetItemId(page, offno));
+			itemid = PageGetItemId(page, offno);
+			itup = (IndexTuple) PageGetItem(page, itemid);
 			htup = &(itup->t_tid);
-			if (callback && callback(htup, callback_state))
+			if (ItemIdIsDead(itemid))
+				kill_tuple = true;
+			else if (callback && callback(htup, callback_state))
 			{
-				/* mark the item for deletion */
-				deletable[ndeletable++] = offno;
+				kill_tuple = true;
 				if (tuples_removed)
 					*tuples_removed += 1;
 			}
@@ -745,13 +753,21 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 					 * comments in _hash_expandtable.
 					 */
 					Assert(bucket == new_bucket);
-					deletable[ndeletable++] = offno;
+					kill_tuple = true;
 				}
-				else if (num_index_tuples)
+			}
+
+			if (kill_tuple)
+			{
+				/* mark the item for deletion */
+				deletable[ndeletable++] = offno;
+			}
+			else
+			{
+				/* we're keeping it, so count it */
+				if (num_index_tuples)
 					*num_index_tuples += 1;
 			}
-			else if (num_index_tuples)
-				*num_index_tuples += 1;
 		}
 
 		/* retain the pin on primary bucket page till end of bucket scan */
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 7bc6b26..83eba9f 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -667,7 +667,7 @@ restart_expand:
 		hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
 						  metap->hashm_maxbucket, metap->hashm_highmask,
 						  metap->hashm_lowmask, NULL,
-						  NULL, true, false, NULL, NULL);
+						  NULL, true, NULL, NULL);
 
 		_hash_relbuf(rel, buf_oblkno);
 
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 21954c2..a1e3dbc 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -68,7 +68,7 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
 static void
 _hash_readnext(Relation rel,
 			   Buffer *bufp, Page *pagep, HashPageOpaque *opaquep,
-			   bool primary_buc_page)
+			   bool primary_page)
 {
 	BlockNumber blkno;
 
@@ -81,7 +81,7 @@ _hash_readnext(Relation rel,
 	 * buckets where split is in progress, before a new bucket's split in
 	 * progress flag (LH_BUCKET_NEW_PAGE_SPLIT) is cleared.
 	 */
-	if (primary_buc_page)
+	if (primary_page)
 		_hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
 	else
 		_hash_relbuf(rel, *bufp);
@@ -103,7 +103,7 @@ _hash_readnext(Relation rel,
 static void
 _hash_readprev(Relation rel,
 			   Buffer *bufp, Page *pagep, HashPageOpaque *opaquep,
-			   bool primary_buc_page)
+			   bool primary_page)
 {
 	BlockNumber blkno;
 
@@ -113,7 +113,7 @@ _hash_readprev(Relation rel,
 	 * Retain the pin on primary bucket page till the end of scan. See
 	 * comments in _hash_readnext to know the reason of retaining pin.
 	 */
-	if (primary_buc_page)
+	if (primary_page)
 		_hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
 	else
 		_hash_relbuf(rel, *bufp);
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9a5e983..26d539b 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -377,10 +377,11 @@ extern Bucket _hash_get_oldbucket_newbucket(Relation rel, Bucket old_bucket,
 
 /* hash.c */
 extern void hashbucketcleanup(Relation rel, Bucket cur_bucket,
- Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy,
+				  Buffer bucket_buf, BlockNumber bucket_blkno,
+				  BufferAccessStrategy bstrategy,
 				  uint32 maxbucket, uint32 highmask, uint32 lowmask,
 				  double *tuples_removed, double *num_index_tuples,
-				  bool bucket_has_garbage, bool delay,
+				  bool bucket_has_garbage,
 				  IndexBulkDeleteCallback callback, void *callback_state);
 
 #endif   /* HASH_H */
