On Fri, May 24, 2019 at 11:15:09PM +0300, Imre Deak wrote: > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c > index 967352d32af1..9e2a4ab6c731 100644 > --- a/kernel/locking/lockdep.c > +++ b/kernel/locking/lockdep.c > @@ -3637,6 +3637,11 @@ print_lock_nested_lock_not_held(struct task_struct > *curr, > > static int __lock_is_held(const struct lockdep_map *lock, int read); > > +static int hlock_reference(int reference) > +{ > + return reference ? : 1; > +} > + > /* > * This gets called for every mutex_lock*()/spin_lock*() operation. > * We maintain the dependency maps and validate the locking attempt: > @@ -3702,17 +3707,15 @@ static int __lock_acquire(struct lockdep_map *lock, > unsigned int subclass, > if (depth) { > hlock = curr->held_locks + depth - 1; > if (hlock->class_idx == class_idx && nest_lock) { > - if (hlock->references) { > - /* > - * Check: unsigned int references overflow. > - */ > - if (DEBUG_LOCKS_WARN_ON(hlock->references == > UINT_MAX))
What tree is this against? Afaict this is still 12 bits ?! > - return 0; > + /* > + * Check: unsigned int references overflow. > + */ > + if > (DEBUG_LOCKS_WARN_ON(hlock_reference(hlock->references) > > + UINT_MAX - > hlock_reference(references))) Idem. Also very weird overflow check.. > + return 0; > > - hlock->references++; > - } else { > - hlock->references = 2; > - } > + hlock->references = hlock_reference(hlock->references) + > + hlock_reference(references); > > return 2; > } > -- > 2.17.1 >