On Mon, May 27, 2019 at 05:14:38PM +0200, Peter Zijlstra wrote:
> On Fri, May 24, 2019 at 11:15:09PM +0300, Imre Deak wrote:
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 967352d32af1..9e2a4ab6c731 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -3637,6 +3637,11 @@ print_lock_nested_lock_not_held(struct task_struct 
> > *curr,
> >  
> >  static int __lock_is_held(const struct lockdep_map *lock, int read);
> >  
> > +static int hlock_reference(int reference)
> > +{
> > +   return reference ? : 1;
> > +}
> > +
> >  /*
> >   * This gets called for every mutex_lock*()/spin_lock*() operation.
> >   * We maintain the dependency maps and validate the locking attempt:
> > @@ -3702,17 +3707,15 @@ static int __lock_acquire(struct lockdep_map *lock, 
> > unsigned int subclass,
> >     if (depth) {
> >             hlock = curr->held_locks + depth - 1;
> >             if (hlock->class_idx == class_idx && nest_lock) {
> > -                   if (hlock->references) {
> > -                           /*
> > -                            * Check: unsigned int references overflow.
> > -                            */
> > -                           if (DEBUG_LOCKS_WARN_ON(hlock->references == 
> > UINT_MAX))
> 
> What tree is this against? Afaict this is still 12 bits ?!
> 
> > -                                   return 0;
> > +                   /*
> > +                    * Check: unsigned int references overflow.
> > +                    */
> > +                   if 
> > (DEBUG_LOCKS_WARN_ON(hlock_reference(hlock->references) >
> > +                                           UINT_MAX - 
> > hlock_reference(references)))
> 
> Idem. Also very weird overflow check..
> 
> > +                           return 0;
> >  
> > -                           hlock->references++;
> > -                   } else {
> > -                           hlock->references = 2;
> > -                   }
> > +                   hlock->references = hlock_reference(hlock->references) +
> > +                                       hlock_reference(references);
> >  
> >                     return 2;
> >             }

I think you wanted something like this: ?

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3731,6 +3731,11 @@ print_lock_nested_lock_not_held(struct t
 
 static int __lock_is_held(const struct lockdep_map *lock, int read);
 
+static inline int hlock_references(struct held_lock *hlock)
+{
+       return hlock->references ? : 1;
+}
+
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
@@ -3796,17 +3801,14 @@ static int __lock_acquire(struct lockdep
        if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
-                       if (hlock->references) {
-                               /*
-                                * Check: unsigned int references:12, overflow.
-                                */
-                               if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 
<< 12)-1))
-                                       return 0;
-
-                               hlock->references++;
-                       } else {
-                               hlock->references = 2;
-                       }
+                       if (!references)
+                               references++;
+
+                       hlock->references = hlock_references(hlock) + 
references;
+
+                       /* Overflow */
+                       if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
+                               return 0;
 
                        return 2;
                }

Reply via email to