On Thu, Dec 03, 2020 at 08:59:13PM -0500, Waiman Long wrote:
> On 12/3/20 3:11 PM, Eric W. Biederman wrote:

> > +static inline int __down_read_interruptible(struct rw_semaphore *sem)
> > +{
> > +   if (!rwsem_read_trylock(sem)) {
> > +           if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
> > +                   return -EINTR;
> > +           DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
> > +   } else {
> > +           rwsem_set_reader_owned(sem);
> > +   }
> > +   return 0;
> > +}
> > +
> >   static inline int __down_read_killable(struct rw_semaphore *sem)
> >   {
> >     if (!rwsem_read_trylock(sem)) {
> > @@ -1495,6 +1507,20 @@ void __sched down_read(struct rw_semaphore *sem)
> >   }
> >   EXPORT_SYMBOL(down_read);
> > +int __sched down_read_interruptible(struct rw_semaphore *sem)
> > +{
> > +   might_sleep();
> > +   rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
> > +
> > +   if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, 
> > __down_read_interruptible)) {
> > +           rwsem_release(&sem->dep_map, _RET_IP_);
> > +           return -EINTR;
> > +   }
> > +
> > +   return 0;
> > +}
> > +EXPORT_SYMBOL(down_read_interruptible);
> > +
> >   int __sched down_read_killable(struct rw_semaphore *sem)
> >   {
> >     might_sleep();
> 
> Acked-by: Waiman Long <long...@redhat.com>

Yeah, that seems correct.. There's an unfortunate amount of copy-paste
there though.

Do we want to follow that up with something like this?

---
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -275,7 +275,25 @@ static inline bool rwsem_read_trylock(st
        long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, 
&sem->count);
        if (WARN_ON_ONCE(cnt < 0))
                rwsem_set_nonspinnable(sem);
-       return !(cnt & RWSEM_READ_FAILED_MASK);
+
+       if (!(cnt & RWSEM_READ_FAILED_MASK)) {
+               rwsem_set_reader_owned(sem);
+               return true;
+       }
+
+       return false;
+}
+
+static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
+{
+       long tmp = RWSEM_UNLOCKED_VALUE;
+
+       if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 
RWSEM_WRITER_LOCKED)) {
+               rwsem_set_owner(sem);
+               return true;
+       }
+
+       return false;
 }
 
 /*
@@ -1335,38 +1353,29 @@ static struct rw_semaphore *rwsem_downgr
 /*
  * lock for reading
  */
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int __down_read_common(struct rw_semaphore *sem, int state)
 {
        if (!rwsem_read_trylock(sem)) {
-               rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
+               if (IS_ERR(rwsem_down_read_slowpath(sem, state)))
+                       return -EINTR;
                DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
-       } else {
-               rwsem_set_reader_owned(sem);
        }
+       return 0;
 }
 
-static inline int __down_read_interruptible(struct rw_semaphore *sem)
+static __always_inline void __down_read(struct rw_semaphore *sem)
 {
-       if (!rwsem_read_trylock(sem)) {
-               if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
-                       return -EINTR;
-               DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
-       } else {
-               rwsem_set_reader_owned(sem);
-       }
-       return 0;
+       __down_read_common(sem, TASK_UNINTERRUPTIBLE);
 }
 
-static inline int __down_read_killable(struct rw_semaphore *sem)
+static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
 {
-       if (!rwsem_read_trylock(sem)) {
-               if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
-                       return -EINTR;
-               DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
-       } else {
-               rwsem_set_reader_owned(sem);
-       }
-       return 0;
+       return __down_read_common(sem, TASK_INTERRUPTIBLE);
+}
+
+static __always_inline int __down_read_killable(struct rw_semaphore *sem)
+{
+       return __down_read_common(sem, TASK_KILLABLE);
 }
 
 static inline int __down_read_trylock(struct rw_semaphore *sem)
@@ -1392,44 +1401,29 @@ static inline int __down_read_trylock(st
 /*
  * lock for writing
  */
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_common(struct rw_semaphore *sem, int state)
 {
-       long tmp = RWSEM_UNLOCKED_VALUE;
-
-       if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
-                                                     RWSEM_WRITER_LOCKED)))
-               rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
-       else
-               rwsem_set_owner(sem);
+       if (unlikely(!rwsem_write_trylock(sem))) {
+               if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
+                       return -EINTR;
+       }
+       return 0;
 }
 
-static inline int __down_write_killable(struct rw_semaphore *sem)
+static __always_inline void __down_write(struct rw_semaphore *sem)
 {
-       long tmp = RWSEM_UNLOCKED_VALUE;
+       __down_write_common(sem, TASK_UNINTERRUPTIBLE);
+}
 
-       if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
-                                                     RWSEM_WRITER_LOCKED))) {
-               if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
-                       return -EINTR;
-       } else {
-               rwsem_set_owner(sem);
-       }
-       return 0;
+static __always_inline int __down_write_killable(struct rw_semaphore *sem)
+{
+       return __down_write_common(sem, TASK_KILLABLE);
 }
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       long tmp;
-
        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
-
-       tmp  = RWSEM_UNLOCKED_VALUE;
-       if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
-                                           RWSEM_WRITER_LOCKED)) {
-               rwsem_set_owner(sem);
-               return true;
-       }
-       return false;
+       return rwsem_write_trylock(sem);
 }
 
 /*

Reply via email to