diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 8158508d8c..cabb5c52ad 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -18,6 +18,7 @@
  */
 #include "postgres.h"
 
+#include "access/generic_xlog.h"
 #include "access/nbtree.h"
 #include "access/relscan.h"
 #include "access/xlog.h"
@@ -805,6 +806,56 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	return stats;
 }
 
+static bool
+btneednocleanup(Relation rel)
+{
+	Buffer		metabuf;
+	Page		metapg;
+	BTPageOpaque metaopaque;
+	bool need_no_cleanup;
+
+	metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
+	metapg = BufferGetPage(metabuf);
+	metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
+
+	need_no_cleanup = P_NEED_NO_CLEANUP(metaopaque);
+	_bt_relbuf(rel, metabuf);
+	return need_no_cleanup;
+}
+
+static void
+btsetneednocleanup(Relation rel, bool need_no_cleanup)
+{
+	Buffer		metabuf;
+	Page		metapg;
+	BTPageOpaque metaopaque;
+
+	metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
+	metapg = BufferGetPage(metabuf);
+	metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
+	if (need_no_cleanup && !P_NEED_NO_CLEANUP(metaopaque))
+	{
+		metaopaque->btpo_flags |= BTP_NEED_NO_CLEANUP;
+		MarkBufferDirtyHint(metabuf, true);
+	}
+	else if (!need_no_cleanup && P_NEED_NO_CLEANUP(metaopaque))
+	{
+		GenericXLogState *state;
+		Page		pg;
+		BTPageOpaque opaque;
+		XLogRecPtr	recptr;
+		state = GenericXLogStart(rel);
+		pg = GenericXLogRegisterBuffer(state, metabuf, 0);
+		opaque = (BTPageOpaque) PageGetSpecialPointer(pg);
+		opaque->btpo_flags &= ~BTP_NEED_NO_CLEANUP;
+		recptr = GenericXLogFinish(state);
+		PageSetLSN(metapg, recptr);
+		MarkBufferDirty(metabuf);
+	}
+
+	_bt_relbuf(rel, metabuf);
+}
+
 /*
  * Post-VACUUM cleanup.
  *
@@ -828,6 +879,8 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 	 */
 	if (stats == NULL)
 	{
+		if (btneednocleanup(info->index))
+			return NULL;
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
 		btvacuumscan(info, stats, NULL, NULL, 0);
 	}
@@ -896,6 +949,9 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 												  "_bt_pagedel",
 												  ALLOCSET_DEFAULT_SIZES);
 
+	/* Reset "need no cleanup" flag */
+	btsetneednocleanup(rel, false);
+
 	/*
 	 * The outer loop iterates over all index pages except the metapage, in
 	 * physical order (we hope the kernel will cooperate in providing
@@ -979,6 +1035,9 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	/* update statistics */
 	stats->num_pages = num_pages;
 	stats->pages_free = vstate.totFreePages;
+
+	if (stats->pages_deleted == stats->pages_free)
+		btsetneednocleanup(rel, true);
 }
 
 /*
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 2b0b1da763..7a53c733ba 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -76,6 +76,7 @@ typedef BTPageOpaqueData *BTPageOpaque;
 #define BTP_SPLIT_END	(1 << 5)	/* rightmost page of split group */
 #define BTP_HAS_GARBAGE (1 << 6)	/* page has LP_DEAD tuples */
 #define BTP_INCOMPLETE_SPLIT (1 << 7)	/* right sibling's downlink is missing */
+#define BTP_NEED_NO_CLEANUP (1 << 14)	/* tree has dead pages, so cleanup is needed */
 
 /*
  * The max allowed value of a cycle ID is a bit less than 64K.  This is
@@ -182,6 +183,7 @@ typedef struct BTMetaPageData
 #define P_IGNORE(opaque)		(((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
 #define P_HAS_GARBAGE(opaque)	(((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
 #define P_INCOMPLETE_SPLIT(opaque)	(((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
+#define P_NEED_NO_CLEANUP(opaque)	(((opaque)->btpo_flags & BTP_NEED_NO_CLEANUP) != 0)
 
 /*
  *	Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
