Author: mjg
Date: Thu Mar 16 07:10:08 2017
New Revision: 315386
URL: https://svnweb.freebsd.org/changeset/base/315386

Log:
  MFC r313853,r313859:
  
  locks: remove SCHEDULER_STOPPED checks from primitives for modules
  
  They all fallback to the slow path if necessary and the check is there.
  
  This means a panicked kernel executing code from modules will be able to
  succeed doing actual lock/unlock, but this was already the case for core code
  which has said primitives inlined.
  
  ==
  
  Introduce SCHEDULER_STOPPED_TD for use when the thread pointer was already 
read
  
  Sprinkle in few places.

Modified:
  stable/11/sys/kern/kern_condvar.c
  stable/11/sys/kern/kern_mutex.c
  stable/11/sys/kern/kern_rwlock.c
  stable/11/sys/kern/kern_sx.c
  stable/11/sys/kern/kern_synch.c
  stable/11/sys/sys/systm.h
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/kern_condvar.c
==============================================================================
--- stable/11/sys/kern/kern_condvar.c   Thu Mar 16 07:09:47 2017        
(r315385)
+++ stable/11/sys/kern/kern_condvar.c   Thu Mar 16 07:10:08 2017        
(r315386)
@@ -122,7 +122,7 @@ _cv_wait(struct cv *cvp, struct lock_obj
            "Waiting on \"%s\"", cvp->cv_description);
        class = LOCK_CLASS(lock);
 
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return;
 
        sleepq_lock(cvp);
@@ -176,7 +176,7 @@ _cv_wait_unlock(struct cv *cvp, struct l
            ("cv_wait_unlock cannot be used with Giant"));
        class = LOCK_CLASS(lock);
 
-       if (SCHEDULER_STOPPED()) {
+       if (SCHEDULER_STOPPED_TD(td)) {
                class->lc_unlock(lock);
                return;
        }
@@ -227,7 +227,7 @@ _cv_wait_sig(struct cv *cvp, struct lock
            "Waiting on \"%s\"", cvp->cv_description);
        class = LOCK_CLASS(lock);
 
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return (0);
 
        sleepq_lock(cvp);
@@ -287,7 +287,7 @@ _cv_timedwait_sbt(struct cv *cvp, struct
            "Waiting on \"%s\"", cvp->cv_description);
        class = LOCK_CLASS(lock);
 
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return (0);
 
        sleepq_lock(cvp);
@@ -349,7 +349,7 @@ _cv_timedwait_sig_sbt(struct cv *cvp, st
            "Waiting on \"%s\"", cvp->cv_description);
        class = LOCK_CLASS(lock);
 
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return (0);
 
        sleepq_lock(cvp);

Modified: stable/11/sys/kern/kern_mutex.c
==============================================================================
--- stable/11/sys/kern/kern_mutex.c     Thu Mar 16 07:09:47 2017        
(r315385)
+++ stable/11/sys/kern/kern_mutex.c     Thu Mar 16 07:10:08 2017        
(r315386)
@@ -228,9 +228,6 @@ __mtx_lock_flags(volatile uintptr_t *c, 
        struct mtx *m;
        uintptr_t tid, v;
 
-       if (SCHEDULER_STOPPED())
-               return;
-
        m = mtxlock2mtx(c);
 
        KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@@ -263,9 +260,6 @@ __mtx_unlock_flags(volatile uintptr_t *c
 {
        struct mtx *m;
 
-       if (SCHEDULER_STOPPED())
-               return;
-
        m = mtxlock2mtx(c);
 
        KASSERT(m->mtx_lock != MTX_DESTROYED,

Modified: stable/11/sys/kern/kern_rwlock.c
==============================================================================
--- stable/11/sys/kern/kern_rwlock.c    Thu Mar 16 07:09:47 2017        
(r315385)
+++ stable/11/sys/kern/kern_rwlock.c    Thu Mar 16 07:10:08 2017        
(r315386)
@@ -267,9 +267,6 @@ _rw_wlock_cookie(volatile uintptr_t *c, 
        struct rwlock *rw;
        uintptr_t tid, v;
 
-       if (SCHEDULER_STOPPED())
-               return;
-
        rw = rwlock2rw(c);
 
        KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@@ -335,9 +332,6 @@ _rw_wunlock_cookie(volatile uintptr_t *c
 {
        struct rwlock *rw;
 
-       if (SCHEDULER_STOPPED())
-               return;
-
        rw = rwlock2rw(c);
 
        KASSERT(rw->rw_lock != RW_DESTROYED,

Modified: stable/11/sys/kern/kern_sx.c
==============================================================================
--- stable/11/sys/kern/kern_sx.c        Thu Mar 16 07:09:47 2017        
(r315385)
+++ stable/11/sys/kern/kern_sx.c        Thu Mar 16 07:10:08 2017        
(r315386)
@@ -293,8 +293,6 @@ _sx_xlock(struct sx *sx, int opts, const
        uintptr_t tid, x;
        int error = 0;
 
-       if (SCHEDULER_STOPPED())
-               return (0);
        KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
            ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
            curthread, sx->lock_object.lo_name, file, line));
@@ -358,8 +356,6 @@ void
 _sx_xunlock(struct sx *sx, const char *file, int line)
 {
 
-       if (SCHEDULER_STOPPED())
-               return;
        KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
        _sx_assert(sx, SA_XLOCKED, file, line);

Modified: stable/11/sys/kern/kern_synch.c
==============================================================================
--- stable/11/sys/kern/kern_synch.c     Thu Mar 16 07:09:47 2017        
(r315385)
+++ stable/11/sys/kern/kern_synch.c     Thu Mar 16 07:10:08 2017        
(r315386)
@@ -162,7 +162,7 @@ _sleep(void *ident, struct lock_object *
        else
                class = NULL;
 
-       if (SCHEDULER_STOPPED()) {
+       if (SCHEDULER_STOPPED_TD(td)) {
                if (lock != NULL && priority & PDROP)
                        class->lc_unlock(lock);
                return (0);
@@ -250,7 +250,7 @@ msleep_spin_sbt(void *ident, struct mtx 
        KASSERT(p != NULL, ("msleep1"));
        KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
 
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return (0);
 
        sleepq_lock(ident);
@@ -411,7 +411,7 @@ mi_switch(int flags, struct thread *newt
         */
        if (kdb_active)
                kdb_switch();
-       if (SCHEDULER_STOPPED())
+       if (SCHEDULER_STOPPED_TD(td))
                return;
        if (flags & SW_VOL) {
                td->td_ru.ru_nvcsw++;

Modified: stable/11/sys/sys/systm.h
==============================================================================
--- stable/11/sys/sys/systm.h   Thu Mar 16 07:09:47 2017        (r315385)
+++ stable/11/sys/sys/systm.h   Thu Mar 16 07:10:08 2017        (r315386)
@@ -128,7 +128,11 @@ void       kassert_panic(const char *fmt, ...)
  * Otherwise, the kernel will deadlock since the scheduler isn't
  * going to run the thread that holds any lock we need.
  */
-#define        SCHEDULER_STOPPED() __predict_false(curthread->td_stopsched)
+#define        SCHEDULER_STOPPED_TD(td)  ({                                    
\
+       MPASS((td) == curthread);                                       \
+       __predict_false((td)->td_stopsched);                            \
+})
+#define        SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread)
 
 /*
  * Align variables.
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to