diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml
index 51359d6..44fc1ee 100644
--- a/doc/src/sgml/high-availability.sgml
+++ b/doc/src/sgml/high-availability.sgml
@@ -1202,6 +1202,21 @@ synchronous_standby_names = 'FIRST 2 (s1, s2, s3)'
    </para>
 
    <para>
+    In term of performance there is difference between two synchronous
+    replication method. Generally quorum-based synchronous replication
+    tends to be higher performance than priority-based synchronous
+    replication. Because in quorum-based synchronous replication, the
+    transaction can resume as soon as received the specified number of
+    acknowledgement from synchronous standby servers without distinction
+    of standby servers. On the other hand in priority-based synchronous
+    replication, the standby server that the primary server must wait for
+    is fixed until a synchronous standby fails. Therefore, if a server on
+    low-performance machine a has high priority and is chosen as a
+    synchronous standby server it can reduce performance for database
+    applications.
+   </para>
+   
+   <para>
     <productname>PostgreSQL</> allows the application developer
     to specify the durability level required via replication. This can be
     specified for the system overall, though it can also be specified for
@@ -1246,12 +1261,22 @@ synchronous_standby_names = 'FIRST 2 (s1, s2, s3)'
     The best solution for high availability is to ensure you keep as many
     synchronous standbys as requested. This can be achieved by naming multiple
     potential synchronous standbys using <varname>synchronous_standby_names</>.
-    The standbys whose names appear earlier in the list will be used as
-    synchronous standbys. Standbys listed after these will take over
-    the role of synchronous standby if one of current ones should fail.
+    For example in priority-based synchronous replication, the standbys whose
+    names appear earlier in the list will be used as synchronous standbys,
+    as described in <xref linkend="synchronous-replication-multiple-standbys">.
+    Standbys listed after these will take over the role of synchronous standby
+    if one of current ones should fail.
    </para>
 
    <para>
+    Whichever the synchronous replication method you choose, there is no
+    difference between two synchronous replication method, priority-based and
+    quorum-based, in term of high availability. Because in both replication
+    method the transaction can be proceeded as long as at least the specified
+    number of synchronous standby is running.
+  </para>
+
+   <para>
     When a standby first attaches to the primary, it will not yet be properly
     synchronized. This is described as <literal>catchup</> mode. Once
     the lag between standby and primary reaches zero for the first time
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 20a1441..8fba28f 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -53,6 +53,9 @@
  * in the list. All the standbys appearing in the list are considered as
  * candidates for quorum synchronous standbys.
  *
+ * The method is optional. When neither FIRST nor ANY is specified in
+ * synchronous_standby_names it's equivalent to specifying FIRST.
+ *
  * Before the standbys chosen from synchronous_standby_names can
  * become the synchronous standbys they must have caught up with
  * the primary; that may take some time. Once caught up,
@@ -385,6 +388,11 @@ SyncRepInitConfig(void)
 	priority = SyncRepGetStandbyPriority();
 	if (MyWalSnd->sync_standby_priority != priority)
 	{
+		/*
+		 * Update priority of this WalSender, but note that in
+		 * quroum-based sync replication, the value of
+		 * sync_standby_priority has no effect.
+		 */
 		LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
 		MyWalSnd->sync_standby_priority = priority;
 		LWLockRelease(SyncRepLock);
@@ -599,6 +607,10 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
 /*
  * Calculate the Nth latest Write, Flush and Apply positions among sync
  * standbys.
+ *
+ * XXX it costs O(n log n) but since we suppose the n is not large,
+ * maybe less than 10 in most cases, we can optimize it by another
+ * sorting algorithm.
  */
 static void
 SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
@@ -629,6 +641,7 @@ SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
 		i++;
 	}
 
+	/* Sort each array in descending order */
 	qsort(write_array, len, sizeof(XLogRecPtr), cmp_lsn);
 	qsort(flush_array, len, sizeof(XLogRecPtr), cmp_lsn);
 	qsort(apply_array, len, sizeof(XLogRecPtr), cmp_lsn);
@@ -688,6 +701,10 @@ SyncRepGetSyncStandbys(bool	*am_sync)
  * Return the list of all the candidates for quorum sync standbys,
  * or NIL if no such standby is connected.
  *
+ * In quorum-based sync replication we select the quorum sync
+ * standby without theirs priority. The all running active standbys
+ * are considered as a candidate for quorum sync standbys
+ *
  * The caller must hold SyncRepLock. This function must be called only in
  * a quorum-based sync replication.
  *
