All the accessing of the subrequest lists is now done in process context,
possibly in a workqueue, but not now in a BH context, so we don't need the
lock against BH interference when taking the netfs_io_request::lock
spinlock.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cach...@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/buffered_read.c |  4 ++--
 fs/netfs/direct_read.c   |  4 ++--
 fs/netfs/read_collect.c  | 20 ++++++++++----------
 fs/netfs/read_retry.c    |  8 ++++----
 fs/netfs/write_collect.c |  4 ++--
 fs/netfs/write_issue.c   |  4 ++--
 6 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index fa1013020ac9..4ff4b587dc4b 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -200,12 +200,12 @@ static void netfs_read_to_pagecache(struct 
netfs_io_request *rreq)
                subreq->len     = size;
 
                atomic_inc(&rreq->nr_outstanding);
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                subreq->prev_donated = rreq->prev_donated;
                rreq->prev_donated = 0;
                trace_netfs_sreq(subreq, netfs_sreq_trace_added);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
 
                source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
                subreq->source = source;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 54027fd14904..1a20cc3979c7 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -68,12 +68,12 @@ static int netfs_dispatch_unbuffered_reads(struct 
netfs_io_request *rreq)
                subreq->len     = size;
 
                atomic_inc(&rreq->nr_outstanding);
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                subreq->prev_donated = rreq->prev_donated;
                rreq->prev_donated = 0;
                trace_netfs_sreq(subreq, netfs_sreq_trace_added);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
 
                netfs_stat(&netfs_n_rh_download);
                if (rreq->netfs_ops->prepare_read) {
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 454a5bbdd6f8..26e430baeb5a 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -144,7 +144,7 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
        prev_donated = READ_ONCE(subreq->prev_donated);
        next_donated =  READ_ONCE(subreq->next_donated);
        if (prev_donated || next_donated) {
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                prev_donated = subreq->prev_donated;
                next_donated =  subreq->next_donated;
                subreq->start -= prev_donated;
@@ -157,7 +157,7 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
                        next_donated = subreq->next_donated = 0;
                }
                trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
        }
 
        avail = subreq->transferred;
@@ -186,18 +186,18 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
                } else if (fpos < start) {
                        excess = fend - subreq->start;
 
-                       spin_lock_bh(&rreq->lock);
+                       spin_lock(&rreq->lock);
                        /* If we complete first on a folio split with the
                         * preceding subreq, donate to that subreq - otherwise
                         * we get the responsibility.
                         */
                        if (subreq->prev_donated != prev_donated) {
-                               spin_unlock_bh(&rreq->lock);
+                               spin_unlock(&rreq->lock);
                                goto donation_changed;
                        }
 
                        if (list_is_first(&subreq->rreq_link, 
&rreq->subrequests)) {
-                               spin_unlock_bh(&rreq->lock);
+                               spin_unlock(&rreq->lock);
                                pr_err("Can't donate prior to front\n");
                                goto bad;
                        }
@@ -213,7 +213,7 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
 
                        if (subreq->consumed >= subreq->len)
                                goto remove_subreq_locked;
-                       spin_unlock_bh(&rreq->lock);
+                       spin_unlock(&rreq->lock);
                } else {
                        pr_err("fpos > start\n");
                        goto bad;
@@ -241,11 +241,11 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
        /* Donate the remaining downloaded data to one of the neighbouring
         * subrequests.  Note that we may race with them doing the same thing.
         */
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
 
        if (subreq->prev_donated != prev_donated ||
            subreq->next_donated != next_donated) {
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
                cond_resched();
                goto donation_changed;
        }
@@ -296,11 +296,11 @@ static bool netfs_consume_read_data(struct 
netfs_io_subrequest *subreq)
        goto remove_subreq_locked;
 
 remove_subreq:
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
 remove_subreq_locked:
        subreq->consumed = subreq->len;
        list_del(&subreq->rreq_link);
-       spin_unlock_bh(&rreq->lock);
+       spin_unlock(&rreq->lock);
        netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
        return true;
 
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index a2021efa44c0..a33bd06e80f8 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -142,12 +142,12 @@ static void netfs_retry_read_subrequests(struct 
netfs_io_request *rreq)
                        __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
                        subreq->retry_count++;
 
-                       spin_lock_bh(&rreq->lock);
+                       spin_lock(&rreq->lock);
                        list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                        subreq->prev_donated += rreq->prev_donated;
                        rreq->prev_donated = 0;
                        trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
-                       spin_unlock_bh(&rreq->lock);
+                       spin_unlock(&rreq->lock);
 
                        BUG_ON(!len);
 
@@ -217,9 +217,9 @@ static void netfs_retry_read_subrequests(struct 
netfs_io_request *rreq)
                __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
                __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
        }
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
        list_splice_tail_init(&queue, &rreq->subrequests);
-       spin_unlock_bh(&rreq->lock);
+       spin_unlock(&rreq->lock);
 }
 
 /*
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 237018caba27..f026cbc0e2fe 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -238,14 +238,14 @@ static void netfs_collect_write_results(struct 
netfs_io_request *wreq)
 
                cancel:
                        /* Remove if completely consumed. */
-                       spin_lock_bh(&wreq->lock);
+                       spin_lock(&wreq->lock);
 
                        remove = front;
                        list_del_init(&front->rreq_link);
                        front = list_first_entry_or_null(&stream->subrequests,
                                                         struct 
netfs_io_subrequest, rreq_link);
                        stream->front = front;
-                       spin_unlock_bh(&wreq->lock);
+                       spin_unlock(&wreq->lock);
                        netfs_put_subrequest(remove, false,
                                             notes & SAW_FAILURE ?
                                             netfs_sreq_trace_put_cancel :
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 7a14a48e62ee..286bc2aa3ca0 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -203,7 +203,7 @@ static void netfs_prepare_write(struct netfs_io_request 
*wreq,
         * the list.  The collector only goes nextwards and uses the lock to
         * remove entries off of the front.
         */
-       spin_lock_bh(&wreq->lock);
+       spin_lock(&wreq->lock);
        list_add_tail(&subreq->rreq_link, &stream->subrequests);
        if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
                stream->front = subreq;
@@ -214,7 +214,7 @@ static void netfs_prepare_write(struct netfs_io_request 
*wreq,
                }
        }
 
-       spin_unlock_bh(&wreq->lock);
+       spin_unlock(&wreq->lock);
 
        stream->construct = subreq;
 }

Reply via email to