This is a note to let you know that I've just added the patch titled

    SUNRPC: Ensure that we wait for connections to complete before retrying

to the 4.1-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     
sunrpc-ensure-that-we-wait-for-connections-to-complete-before-retrying.patch
and it can be found in the queue-4.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From 0fdea1e8a2853f79d39b8555cc9de16a7e0ab26f Mon Sep 17 00:00:00 2001
From: Trond Myklebust <[email protected]>
Date: Wed, 16 Sep 2015 23:43:17 -0400
Subject: SUNRPC: Ensure that we wait for connections to complete before retrying

From: Trond Myklebust <[email protected]>

commit 0fdea1e8a2853f79d39b8555cc9de16a7e0ab26f upstream.

Commit 718ba5b87343, moved the responsibility for unlocking the socket to
xs_tcp_setup_socket, meaning that the socket will be unlocked before we
know that it has finished trying to connect. The following patch is based on
an initial patch by Russell King to ensure that we delay clearing the
XPRT_CONNECTING flag until we either know that we failed to initiate
a connection attempt, or the connection attempt itself failed.

Fixes: 718ba5b87343 ("SUNRPC: Add helpers to prevent socket create from racing")
Reported-by: Russell King <[email protected]>
Reported-by: Russell King <[email protected]>
Tested-by: Russell King <[email protected]>
Tested-by: Benjamin Coddington <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 include/linux/sunrpc/xprtsock.h |    3 +++
 net/sunrpc/xprtsock.c           |   11 ++++++++---
 2 files changed, 11 insertions(+), 3 deletions(-)

--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -42,6 +42,7 @@ struct sock_xprt {
        /*
         * Connection of transports
         */
+       unsigned long           sock_state;
        struct delayed_work     connect_worker;
        struct sockaddr_storage srcaddr;
        unsigned short          srcport;
@@ -76,6 +77,8 @@ struct sock_xprt {
  */
 #define TCP_RPC_REPLY          (1UL << 6)
 
+#define XPRT_SOCK_CONNECTING   1U
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1434,6 +1434,7 @@ out:
 static void xs_tcp_state_change(struct sock *sk)
 {
        struct rpc_xprt *xprt;
+       struct sock_xprt *transport;
 
        read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
@@ -1445,13 +1446,12 @@ static void xs_tcp_state_change(struct s
                        sock_flag(sk, SOCK_ZAPPED),
                        sk->sk_shutdown);
 
+       transport = container_of(xprt, struct sock_xprt, xprt);
        trace_rpc_socket_state_change(xprt, sk->sk_socket);
        switch (sk->sk_state) {
        case TCP_ESTABLISHED:
                spin_lock(&xprt->transport_lock);
                if (!xprt_test_and_set_connected(xprt)) {
-                       struct sock_xprt *transport = container_of(xprt,
-                                       struct sock_xprt, xprt);
 
                        /* Reset TCP record info */
                        transport->tcp_offset = 0;
@@ -1460,6 +1460,8 @@ static void xs_tcp_state_change(struct s
                        transport->tcp_flags =
                                TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
                        xprt->connect_cookie++;
+                       clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+                       xprt_clear_connecting(xprt);
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
@@ -1495,6 +1497,9 @@ static void xs_tcp_state_change(struct s
                smp_mb__after_atomic();
                break;
        case TCP_CLOSE:
+               if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
+                                       &transport->sock_state))
+                       xprt_clear_connecting(xprt);
                xs_sock_mark_closed(xprt);
        }
  out:
@@ -2111,6 +2116,7 @@ static int xs_tcp_finish_connecting(stru
        /* Tell the socket layer to start connecting... */
        xprt->stat.connect_count++;
        xprt->stat.connect_start = jiffies;
+       set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
        ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
        switch (ret) {
        case 0:
@@ -2175,7 +2181,6 @@ static void xs_tcp_setup_socket(struct w
        case -EINPROGRESS:
        case -EALREADY:
                xprt_unlock_connect(xprt, transport);
-               xprt_clear_connecting(xprt);
                return;
        case -EINVAL:
                /* Happens, for instance, if the user specified a link


Patches currently in stable-queue which might be from 
[email protected] are

queue-4.1/nfs41-flexfiles-zero-out-ds-write-wcc.patch
queue-4.1/nfs-nfs_set_pgio_error-sometimes-misses-errors.patch
queue-4.1/nfs-fix-a-null-pointer-dereference-of-migration-recovery-ops-for-v4.2-client.patch
queue-4.1/sunrpc-xs_reset_transport-must-mark-the-connection-as-disconnected.patch
queue-4.1/sunrpc-lock-the-transport-layer-on-shutdown.patch
queue-4.1/nfs-don-t-let-the-ctime-override-attribute-barriers.patch
queue-4.1/sunrpc-fix-a-thinko-in-xs_connect.patch
queue-4.1/nfsv4-don-t-set-setattr-for-o_rdonly-o_excl.patch
queue-4.1/nfs41-flexfiles-update-inode-after-write-finishes.patch
queue-4.1/nfsv4.1-pnfs-fix-borken-function-_same_data_server_addrs_locked.patch
queue-4.1/nfsv4-force-a-post-op-attribute-update-when-holding-a-delegation.patch
queue-4.1/nfsv4-pnfs-ensure-we-don-t-miss-a-file-extension.patch
queue-4.1/nfsv4.1-flexfiles-fix-a-protocol-error-in-layoutreturn.patch
queue-4.1/revert-nfsv4-remove-incorrect-check-in-can_open_delegated.patch
queue-4.1/sunrpc-ensure-that-we-wait-for-connections-to-complete-before-retrying.patch
queue-4.1/nfsv4.1-fix-a-protocol-issue-with-close-stateids.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to