On 18.02.2017 23:06, Mateusz Guzik wrote:
> Author: mjg
> Date: Sat Feb 18 22:06:03 2017
> New Revision: 313928
> URL: https://svnweb.freebsd.org/changeset/base/313928
> 
> Log:
>   locks: clean up trylock primitives
>   
>   In particular thius reduces accesses of the lock itself.
> 
> Modified:
>   head/sys/kern/kern_mutex.c
>   head/sys/kern/kern_rwlock.c
>   head/sys/kern/kern_sx.c
> 

This commit broke (at least) sx_try_xlock_().

Michal


FreeBSD/arm (tegra124) (ttyu0)

login: panic: vmspace_fork: lock failed
cpuid = 2
KDB: stack backtrace:
db_trace_self() at db_trace_self
         pc = 0xc0728e60  lr = 0xc025ad04 (db_fetch_ksymtab+0x16c)
         sp = 0xf3578b48  fp = 0xf3578c60
db_fetch_ksymtab() at db_fetch_ksymtab+0x16c
         pc = 0xc025ad04  lr = 0xc0460a14 (vpanic+0x13c)
         sp = 0xf3578c68  fp = 0xf3578c88
         r4 = 0x00000100  r5 = 0xc66f8a80
         r6 = 0xc0819663  r7 = 0x00000001
vpanic() at vpanic+0x13c
         pc = 0xc0460a14  lr = 0xc04608d8 (vpanic)
         sp = 0xf3578c90  fp = 0xf3578ca4
         r4 = 0xc0819663  r5 = 0xf3578cac
         r6 = 0x00000000  r7 = 0x00000000
         r8 = 0x00000014  r9 = 0xf3578d5c
        r10 = 0xc69f8ba0
vpanic() at vpanic
         pc = 0xc04608d8  lr = 0xc07055e4 (vmspace_fork+0x104)
         sp = 0xf3578cac  fp = 0xf3578ce8
         r4 = 0xf3578ca4  r5 = 0xc04608d8
         r6 = 0xf3578cac  r7 = 0xc6c51c98
         r8 = 0x00000000  r9 = 0x00000000
        r10 = 0xc69f8ba0
vmspace_fork() at vmspace_fork+0x104
         pc = 0xc07055e4  lr = 0xc0428928 (fork1+0x590)
         sp = 0xf3578cf0  fp = 0xf3578d50
         r4 = 0xc0907f90  r5 = 0xc66f8a84
         r6 = 0x00000000  r7 = 0x00000000
         r8 = 0x00000014  r9 = 0xf3578d5c
        r10 = 0xc4eda710
fork1() at fork1+0x590
         pc = 0xc0428928  lr = 0xc0428378 (sys_fork+0x3c)
         sp = 0xf3578d58  fp = 0xf3578d80
         r4 = 0xc66f8a80  r5 = 0xf3578d5c
         r6 = 0x00000000  r7 = 0xc66ea388
         r8 = 0x00000000  r9 = 0xf3578da0
        r10 = 0x00033f5c
sys_fork() at sys_fork+0x3c
         pc = 0xc0428378  lr = 0xc0748a68 (swi_handler+0x330)
         sp = 0xf3578d88  fp = 0xf3578e40
         r4 = 0xc0ade110  r5 = 0xc66f8a80
swi_handler() at swi_handler+0x330
         pc = 0xc0748a68  lr = 0xc072b83c (swi_exit)
         sp = 0xf3578e48  fp = 0xbfbfdb70
         r4 = 0x20618000  r5 = 0x20618000
         r6 = 0x00000000  r7 = 0x00000002
         r8 = 0x2062cb10  r9 = 0x00033f54
        r10 = 0x00033f5c
swi_exit() at swi_exit
         pc = 0xc072b83c  lr = 0xc072b83c (swi_exit)
         sp = 0xf3578e48  fp = 0xbfbfdb70
KDB: enter: panic
[ thread pid 702 tid 100099 ]
Stopped at      kdb_enter+0x58: ldrb    r15, [r15, r15, ror r15]!
db> reboot

> Modified: head/sys/kern/kern_mutex.c
> ==============================================================================
> --- head/sys/kern/kern_mutex.c        Sat Feb 18 21:59:19 2017        
> (r313927)
> +++ head/sys/kern/kern_mutex.c        Sat Feb 18 22:06:03 2017        
> (r313928)
> @@ -374,13 +374,18 @@ int
>  _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int 
> line)
>  {
>       struct mtx *m;
> +     struct thread *td;
> +     uintptr_t tid, v;
>  #ifdef LOCK_PROFILING
>       uint64_t waittime = 0;
>       int contested = 0;
>  #endif
>       int rval;
> +     bool recursed;
>  
> -     if (SCHEDULER_STOPPED())
> +     td = curthread;
> +     tid = (uintptr_t)td;
> +     if (SCHEDULER_STOPPED_TD(td))
>               return (1);
>  
>       m = mtxlock2mtx(c);
> @@ -394,13 +399,21 @@ _mtx_trylock_flags_(volatile uintptr_t *
>           ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
>           file, line));
>  
> -     if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
> -         (opts & MTX_RECURSE) != 0)) {
> -             m->mtx_recurse++;
> -             atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
> -             rval = 1;
> -     } else
> -             rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
> +     rval = 1;
> +     recursed = false;
> +     v = MTX_UNOWNED;
> +     if (!_mtx_obtain_lock_fetch(m, &v, tid)) {
> +             if (v == tid &&
> +                 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
> +                 (opts & MTX_RECURSE) != 0)) {
> +                      m->mtx_recurse++;
> +                      atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
> +                      recursed = true;
> +             } else {
> +                     rval = 0;
> +             }
> +     }
> +
>       opts &= ~MTX_RECURSE;
>  
>       LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
> @@ -408,10 +421,9 @@ _mtx_trylock_flags_(volatile uintptr_t *
>               WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | 
> LOP_TRYLOCK,
>                   file, line);
>               TD_LOCKS_INC(curthread);
> -             if (m->mtx_recurse == 0)
> +             if (!recursed)
>                       LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
>                           m, contested, waittime, file, line);
> -
>       }
>  
>       return (rval);
> 
> Modified: head/sys/kern/kern_rwlock.c
> ==============================================================================
> --- head/sys/kern/kern_rwlock.c       Sat Feb 18 21:59:19 2017        
> (r313927)
> +++ head/sys/kern/kern_rwlock.c       Sat Feb 18 22:06:03 2017        
> (r313928)
> @@ -293,9 +293,14 @@ int
>  __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
>  {
>       struct rwlock *rw;
> +     struct thread *td;
> +     uintptr_t tid, v;
>       int rval;
> +     bool recursed;
>  
> -     if (SCHEDULER_STOPPED())
> +     td = curthread;
> +     tid = (uintptr_t)td;
> +     if (SCHEDULER_STOPPED_TD(td))
>               return (1);
>  
>       rw = rwlock2rw(c);
> @@ -306,20 +311,23 @@ __rw_try_wlock(volatile uintptr_t *c, co
>       KASSERT(rw->rw_lock != RW_DESTROYED,
>           ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
>  
> -     if (rw_wlocked(rw) &&
> -         (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
> -             rw->rw_recurse++;
> -             atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
> -             rval = 1;
> -     } else
> -             rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
> -                 (uintptr_t)curthread);
> +     rval = 1;
> +     recursed = false;
> +     v = RW_UNLOCKED;
> +     if (!atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) {
> +             if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
> +                     rw->rw_recurse++;
> +                     atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
> +             } else {
> +                     rval = 0;
> +             }
> +     }
>  
>       LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
>       if (rval) {
>               WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
>                   file, line);
> -             if (!rw_recursed(rw))
> +             if (!recursed)
>                       LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
>                           rw, 0, 0, file, line, LOCKSTAT_WRITER);
>               TD_LOCKS_INC(curthread);
> @@ -637,13 +645,13 @@ __rw_try_rlock(volatile uintptr_t *c, co
>           ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
>           curthread, rw->lock_object.lo_name, file, line));
>  
> +     x = rw->rw_lock;
>       for (;;) {
> -             x = rw->rw_lock;
>               KASSERT(rw->rw_lock != RW_DESTROYED,
>                   ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
>               if (!(x & RW_LOCK_READ))
>                       break;
> -             if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
> +             if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + 
> RW_ONE_READER)) {
>                       LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
>                           line);
>                       WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
> 
> Modified: head/sys/kern/kern_sx.c
> ==============================================================================
> --- head/sys/kern/kern_sx.c   Sat Feb 18 21:59:19 2017        (r313927)
> +++ head/sys/kern/kern_sx.c   Sat Feb 18 22:06:03 2017        (r313928)
> @@ -269,13 +269,13 @@ sx_try_slock_(struct sx *sx, const char 
>           ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
>           curthread, sx->lock_object.lo_name, file, line));
>  
> +     x = sx->sx_lock;
>       for (;;) {
> -             x = sx->sx_lock;
>               KASSERT(x != SX_LOCK_DESTROYED,
>                   ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
>               if (!(x & SX_LOCK_SHARED))
>                       break;
> -             if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
> +             if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + 
> SX_ONE_SHARER)) {
>                       LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, 
> line);
>                       WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
>                       LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
> @@ -322,9 +322,14 @@ _sx_xlock(struct sx *sx, int opts, const
>  int
>  sx_try_xlock_(struct sx *sx, const char *file, int line)
>  {
> +     struct thread *td;
> +     uintptr_t tid, x;
>       int rval;
> +     bool recursed;
>  
> -     if (SCHEDULER_STOPPED())
> +     td = curthread;
> +     tid = (uintptr_t)td;
> +     if (SCHEDULER_STOPPED_TD(td))
>               return (1);
>  
>       KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
> @@ -333,19 +338,23 @@ sx_try_xlock_(struct sx *sx, const char 
>       KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
>           ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
>  
> -     if (sx_xlocked(sx) &&
> -         (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
> -             sx->sx_recurse++;
> -             atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
> -             rval = 1;
> -     } else
> -             rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
> -                 (uintptr_t)curthread);
> +     rval = 1;
> +     recursed = false;
> +     x = SX_LOCK_UNLOCKED;
> +     if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) {
> +             if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
> +                     sx->sx_recurse++;
> +                     atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
> +             } else {
> +                     rval = 0;
> +             }
> +     }
> +
>       LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
>       if (rval) {
>               WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
>                   file, line);
> -             if (!sx_recursed(sx))
> +             if (!recursed)
>                       LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
>                           sx, 0, 0, file, line, LOCKSTAT_WRITER);
>               TD_LOCKS_INC(curthread);
> 
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to