Author: attilio
Date: Tue Oct 11 13:45:04 2011
New Revision: 226255
URL: http://svn.freebsd.org/changeset/base/226255

Log:
  Adaptive spinning for locking primitives, in read-mode, have some tuning
  SYSCTLs which are inappropriate for a daily use of the machine (mostly
  useful only by a developer which wants to run benchmarks on it).
  Remove them before the release as long as we do not want to ship with
  them in.
  
  Now that the SYSCTLs are gone, instead than use static storage for some
  constants, use real numeric constants in order to avoid eventual compiler
  dumbiness and the risk to share a storage (and then a cache-line) among
  CPUs when doing adaptive spinning together.
  
  Please note that this patch is not a MFC, but an 'edge case' as commit
  directly to STABLE_9.
  
  Approved by:  re (kib)

Modified:
  stable/9/sys/kern/kern_lock.c
  stable/9/sys/kern/kern_rwlock.c
  stable/9/sys/kern/kern_sx.c

Modified: stable/9/sys/kern/kern_lock.c
==============================================================================
--- stable/9/sys/kern/kern_lock.c       Tue Oct 11 13:42:42 2011        
(r226254)
+++ stable/9/sys/kern/kern_lock.c       Tue Oct 11 13:45:04 2011        
(r226255)
@@ -61,6 +61,11 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 #define        SQ_EXCLUSIVE_QUEUE      0
 #define        SQ_SHARED_QUEUE         1
 
+#ifdef ADAPTIVE_LOCKMGRS
+#define        ALK_RETRIES             10
+#define        ALK_LOOPS               10000
+#endif
+
 #ifndef INVARIANTS
 #define        _lockmgr_assert(lk, what, file, line)
 #define        TD_LOCKS_INC(td)
@@ -155,14 +160,6 @@ struct lock_class lock_class_lockmgr = {
 #endif
 };
 
-#ifdef ADAPTIVE_LOCKMGRS
-static u_int alk_retries = 10;
-static u_int alk_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, 
"");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
-#endif
-
 static __inline struct thread *
 lockmgr_xholder(struct lock *lk)
 {
@@ -573,14 +570,14 @@ __lockmgr_args(struct lock *lk, u_int fl
                                continue;
                        } else if (LK_CAN_ADAPT(lk, flags) &&
                            (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
-                           spintries < alk_retries) {
+                           spintries < ALK_RETRIES) {
                                if (flags & LK_INTERLOCK) {
                                        class->lc_unlock(ilk);
                                        flags &= ~LK_INTERLOCK;
                                }
                                GIANT_SAVE();
                                spintries++;
-                               for (i = 0; i < alk_loops; i++) {
+                               for (i = 0; i < ALK_LOOPS; i++) {
                                        if (LOCK_LOG_TEST(&lk->lock_object, 0))
                                                CTR4(KTR_LOCK,
                                    "%s: shared spinning on %p with %u and %u",
@@ -592,7 +589,7 @@ __lockmgr_args(struct lock *lk, u_int fl
                                        cpu_spinwait();
                                }
                                GIANT_RESTORE();
-                               if (i != alk_loops)
+                               if (i != ALK_LOOPS)
                                        continue;
                        }
 #endif
@@ -789,7 +786,7 @@ __lockmgr_args(struct lock *lk, u_int fl
                                continue;
                        } else if (LK_CAN_ADAPT(lk, flags) &&
                            (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
-                           spintries < alk_retries) {
+                           spintries < ALK_RETRIES) {
                                if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
                                    !atomic_cmpset_ptr(&lk->lk_lock, x,
                                    x | LK_EXCLUSIVE_SPINNERS))
@@ -800,7 +797,7 @@ __lockmgr_args(struct lock *lk, u_int fl
                                }
                                GIANT_SAVE();
                                spintries++;
-                               for (i = 0; i < alk_loops; i++) {
+                               for (i = 0; i < ALK_LOOPS; i++) {
                                        if (LOCK_LOG_TEST(&lk->lock_object, 0))
                                                CTR4(KTR_LOCK,
                                    "%s: shared spinning on %p with %u and %u",
@@ -811,7 +808,7 @@ __lockmgr_args(struct lock *lk, u_int fl
                                        cpu_spinwait();
                                }
                                GIANT_RESTORE();
-                               if (i != alk_loops)
+                               if (i != ALK_LOOPS)
                                        continue;
                        }
 #endif

Modified: stable/9/sys/kern/kern_rwlock.c
==============================================================================
--- stable/9/sys/kern/kern_rwlock.c     Tue Oct 11 13:42:42 2011        
(r226254)
+++ stable/9/sys/kern/kern_rwlock.c     Tue Oct 11 13:45:04 2011        
(r226255)
@@ -56,11 +56,8 @@ __FBSDID("$FreeBSD$");
 #endif
 
 #ifdef ADAPTIVE_RWLOCKS
-static int rowner_retries = 10;
-static int rowner_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
+#define        ROWNER_RETRIES  10
+#define        ROWNER_LOOPS    10000
 #endif
 
 #ifdef DDB
@@ -380,15 +377,15 @@ _rw_rlock(struct rwlock *rw, const char 
                                }
                                continue;
                        }
-               } else if (spintries < rowner_retries) {
+               } else if (spintries < ROWNER_RETRIES) {
                        spintries++;
-                       for (i = 0; i < rowner_loops; i++) {
+                       for (i = 0; i < ROWNER_LOOPS; i++) {
                                v = rw->rw_lock;
                                if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
                                        break;
                                cpu_spinwait();
                        }
-                       if (i != rowner_loops)
+                       if (i != ROWNER_LOOPS)
                                continue;
                }
 #endif
@@ -691,7 +688,7 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
                        continue;
                }
                if ((v & RW_LOCK_READ) && RW_READERS(v) &&
-                   spintries < rowner_retries) {
+                   spintries < ROWNER_RETRIES) {
                        if (!(v & RW_LOCK_WRITE_SPINNER)) {
                                if (!atomic_cmpset_ptr(&rw->rw_lock, v,
                                    v | RW_LOCK_WRITE_SPINNER)) {
@@ -699,15 +696,15 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
                                }
                        }
                        spintries++;
-                       for (i = 0; i < rowner_loops; i++) {
+                       for (i = 0; i < ROWNER_LOOPS; i++) {
                                if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
                                        break;
                                cpu_spinwait();
                        }
 #ifdef KDTRACE_HOOKS
-                       spin_cnt += rowner_loops - i;
+                       spin_cnt += ROWNER_LOOPS - i;
 #endif
-                       if (i != rowner_loops)
+                       if (i != ROWNER_LOOPS)
                                continue;
                }
 #endif

Modified: stable/9/sys/kern/kern_sx.c
==============================================================================
--- stable/9/sys/kern/kern_sx.c Tue Oct 11 13:42:42 2011        (r226254)
+++ stable/9/sys/kern/kern_sx.c Tue Oct 11 13:45:04 2011        (r226255)
@@ -71,6 +71,11 @@ CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS)
 #define        SQ_EXCLUSIVE_QUEUE      0
 #define        SQ_SHARED_QUEUE         1
 
+#ifdef ADAPTIVE_SX
+#define        ASX_RETRIES             10
+#define        ASX_LOOPS               10000
+#endif
+
 /*
  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
  * drop Giant anytime we have to sleep or if we adaptively spin.
@@ -133,14 +138,6 @@ struct lock_class lock_class_sx = {
 #define        _sx_assert(sx, what, file, line)
 #endif
 
-#ifdef ADAPTIVE_SX
-static u_int asx_retries = 10;
-static u_int asx_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
-SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
-SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
-#endif
-
 void
 assert_sx(struct lock_object *lock, int what)
 {
@@ -529,10 +526,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                        }
                                        continue;
                                }
-                       } else if (SX_SHARERS(x) && spintries < asx_retries) {
+                       } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
                                GIANT_SAVE();
                                spintries++;
-                               for (i = 0; i < asx_loops; i++) {
+                               for (i = 0; i < ASX_LOOPS; i++) {
                                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                                CTR4(KTR_LOCK,
                                    "%s: shared spinning on %p with %u and %u",
@@ -546,7 +543,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                        spin_cnt++;
 #endif
                                }
-                               if (i != asx_loops)
+                               if (i != ASX_LOOPS)
                                        continue;
                        }
                }
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to