diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index ea9150b..ff95e9c 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -6455,6 +6455,25 @@ btcostestimate(PG_FUNCTION_ARGS)
 	costs.indexTotalCost += costs.num_sa_scans * descentCost;
 
 	/*
+	 * If there are no indexBoundQuals, then adjust the startup cost to
+	 * account for the cost of scanning non-matching rows.  This is
+	 * important for when we apply the effect of LIMIT clauses to the
+	 * eventual path costs.
+	 */
+	if (indexBoundQuals == NULL)
+	{
+		/*
+		 * Our problem here is that we do not know how many non-matching
+		 * rows will need to be scanned.  In the best case there will be
+		 * zero non-matching rows scanned, though in the worst case it
+		 * will include all non-matching rows.  So lets take a risk
+		 * viewpoint and assume that half of the estimated index cost
+		 * will be startup cost.
+		 */
+		costs.indexStartupCost = costs.indexTotalCost / 2;
+	}
+
+	/*
 	 * If we can get an estimate of the first column's ordering correlation C
 	 * from pg_statistic, estimate the index correlation as C for a
 	 * single-column index, or C * 0.75 for multiple columns. (The idea here
