Author: attilio Date: Wed Sep 12 22:10:53 2012 New Revision: 240424 URL: http://svn.freebsd.org/changeset/base/240424
Log: Improve check coverage about idle threads. Idle threads are not allowed to acquire any lock but spinlocks. Deny any attempt to do so by panicing at the locking operation when INVARIANTS is on. Then, remove the check on blocking on a turnstile. The check in sleepqueues is left because they are not allowed to use tsleep() either which could happen still. Reviewed by: bde, jhb, kib MFC after: 1 week Modified: head/sys/kern/kern_lock.c head/sys/kern/kern_mutex.c head/sys/kern/kern_rmlock.c head/sys/kern/kern_rwlock.c head/sys/kern/kern_sx.c head/sys/kern/subr_turnstile.c Modified: head/sys/kern/kern_lock.c ============================================================================== --- head/sys/kern/kern_lock.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/kern_lock.c Wed Sep 12 22:10:53 2012 (r240424) @@ -477,6 +477,9 @@ __lockmgr_args(struct lock *lk, u_int fl KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", __func__, file, line)); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, + lk->lock_object.lo_name, file, line)); class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; if (panicstr != NULL) { Modified: head/sys/kern/kern_mutex.c ============================================================================== --- head/sys/kern/kern_mutex.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/kern_mutex.c Wed Sep 12 22:10:53 2012 (r240424) @@ -201,6 +201,9 @@ _mtx_lock_flags(struct mtx *m, int opts, if (SCHEDULER_STOPPED()) return; MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", + curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, @@ -301,6 +304,9 @@ mtx_trylock_flags_(struct mtx *m, int op return (1); MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", + curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, Modified: head/sys/kern/kern_rmlock.c ============================================================================== --- head/sys/kern/kern_rmlock.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/kern_rmlock.c Wed Sep 12 22:10:53 2012 (r240424) @@ -498,6 +498,9 @@ void _rm_wlock_debug(struct rmlock *rm, if (SCHEDULER_STOPPED()) return; + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", + curthread, rm->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); @@ -540,6 +543,9 @@ _rm_rlock_debug(struct rmlock *rm, struc if (SCHEDULER_STOPPED()) return (1); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", + curthread, rm->lock_object.lo_name, file, line)); if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE)) WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER, file, line, NULL); Modified: head/sys/kern/kern_rwlock.c ============================================================================== --- head/sys/kern/kern_rwlock.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/kern_rwlock.c Wed Sep 12 22:10:53 2012 (r240424) @@ -242,6 +242,9 @@ _rw_wlock(struct rwlock *rw, const char if (SCHEDULER_STOPPED()) return; MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", + curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, @@ -260,6 +263,9 @@ _rw_try_wlock(struct rwlock *rw, const c if (SCHEDULER_STOPPED()) return (1); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", + curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); @@ -333,6 +339,9 @@ _rw_rlock(struct rwlock *rw, const char if (SCHEDULER_STOPPED()) return; + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", + curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); KASSERT(rw_wowner(rw) != curthread, @@ -521,6 +530,10 @@ _rw_try_rlock(struct rwlock *rw, const c if (SCHEDULER_STOPPED()) return (1); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", + curthread, rw->lock_object.lo_name, file, line)); + for (;;) { x = rw->rw_lock; KASSERT(rw->rw_lock != RW_DESTROYED, Modified: head/sys/kern/kern_sx.c ============================================================================== --- head/sys/kern/kern_sx.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/kern_sx.c Wed Sep 12 22:10:53 2012 (r240424) @@ -250,6 +250,9 @@ _sx_slock(struct sx *sx, int opts, const if (SCHEDULER_STOPPED()) return (0); MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("sx_slock() by idle thread %p on sx %s @ %s:%d", + curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_slock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); @@ -271,6 +274,10 @@ sx_try_slock_(struct sx *sx, const char if (SCHEDULER_STOPPED()) return (1); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", + curthread, sx->lock_object.lo_name, file, line)); + for (;;) { x = sx->sx_lock; KASSERT(x != SX_LOCK_DESTROYED, @@ -297,6 +304,9 @@ _sx_xlock(struct sx *sx, int opts, const if (SCHEDULER_STOPPED()) return (0); MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("sx_xlock() by idle thread %p on sx %s @ %s:%d", + curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xlock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, @@ -321,6 +331,9 @@ sx_try_xlock_(struct sx *sx, const char return (1); MPASS(curthread != NULL); + KASSERT(!TD_IS_IDLETHREAD(curthread), + ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", + curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); Modified: head/sys/kern/subr_turnstile.c ============================================================================== --- head/sys/kern/subr_turnstile.c Wed Sep 12 22:05:54 2012 (r240423) +++ head/sys/kern/subr_turnstile.c Wed Sep 12 22:10:53 2012 (r240424) @@ -684,7 +684,6 @@ turnstile_wait(struct turnstile *ts, str if (owner) MPASS(owner->td_proc->p_magic == P_MAGIC); MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); - KASSERT(!TD_IS_IDLETHREAD(td), ("idle threads cannot block on locks")); /* * If the lock does not already have a turnstile, use this thread's _______________________________________________ svn-src-head@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"