diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 3a58f1e..d5c4043 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -417,30 +417,34 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
 /*
  * Number of shared CLOG buffers.
  *
- * Testing during the PostgreSQL 9.2 development cycle revealed that on a
+ * Testing during the PostgreSQL 9.6 development cycle revealed that on a
  * large multi-processor system, it was possible to have more CLOG page
- * requests in flight at one time than the number of CLOG buffers which existed
- * at that time, which was hardcoded to 8.  Further testing revealed that
- * performance dropped off with more than 32 CLOG buffers, possibly because
- * the linear buffer search algorithm doesn't scale well.
+ * requests in flight at one time than the number of CLOG buffers which
+ * existed at that time, which was 32 assuming there are enough shared_buffers.
+ * Further testing revealed that either performance stayed same or dropped off
+ * with more than 64 CLOG buffers, possibly because the linear buffer search
+ * algorithm doesn't scale well or some other locking bottlenecks in the
+ * system mask the improvement.
  *
- * Unconditionally increasing the number of CLOG buffers to 32 did not seem
+ * Unconditionally increasing the number of CLOG buffers to 64 did not seem
  * like a good idea, because it would increase the minimum amount of shared
  * memory required to start, which could be a problem for people running very
  * small configurations.  The following formula seems to represent a reasonable
  * compromise: people with very low values for shared_buffers will get fewer
- * CLOG buffers as well, and everyone else will get 32.
+ * CLOG buffers as well, and everyone else will get 64.
  *
  * It is likely that some further work will be needed here in future releases;
  * for example, on a 64-core server, the maximum number of CLOG requests that
  * can be simultaneously in flight will be even larger.  But that will
  * apparently require more than just changing the formula, so for now we take
- * the easy way out.
+ * the easy way out.  It could also happen that after removing other locking
+ * bottlenecks, further increase in CLOG buffers can help, but that's not the
+ * case now.
  */
 Size
 CLOGShmemBuffers(void)
 {
-	return Min(32, Max(4, NBuffers / 512));
+	return Min(64, Max(4, NBuffers / 512));
 }
 
 /*
