sk_lock can be taken while reclaiming memory (in nfsd for loop-back
NFS mounts, and presumably in nfs), and memory can be allocated while
holding sk_lock, at least via:

 inet_listen -> inet_csk_listen_start ->reqsk_queue_alloc

So to avoid deadlocks, always set PF_FSTRANS while holding sk_lock.

This deadlock was found by lockdep.

Signed-off-by: NeilBrown <ne...@suse.de>
---
 include/net/sock.h |    1 +
 net/core/sock.c    |    2 ++
 2 files changed, 3 insertions(+)

diff --git a/include/net/sock.h b/include/net/sock.h
index b9586a137cad..27c355637e44 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -324,6 +324,7 @@ struct sock {
 #define sk_v6_rcv_saddr        __sk_common.skc_v6_rcv_saddr
 
        socket_lock_t           sk_lock;
+       unsigned int            sk_pflags; /* process flags before taking lock 
*/
        struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
diff --git a/net/core/sock.c b/net/core/sock.c
index cf9bd24e4099..8bc677ef072e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2341,6 +2341,7 @@ void lock_sock_nested(struct sock *sk, int subclass)
        /*
         * The sk_lock has mutex_lock() semantics here:
         */
+       current_set_flags_nested(&sk->sk_pflags, PF_FSTRANS);
        mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
        local_bh_enable();
 }
@@ -2352,6 +2353,7 @@ void release_sock(struct sock *sk)
         * The sk_lock has mutex_unlock() semantics:
         */
        mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+       current_restore_flags_nested(&sk->sk_pflags, PF_FSTRANS);
 
        spin_lock_bh(&sk->sk_lock.slock);
        if (sk->sk_backlog.tail)


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to