From: David Howells <dhowells@redhat.com>
To: Christian Brauner <christian@brauner.io>,
Steve French <smfrench@gmail.com>,
Matthew Wilcox <willy@infradead.org>
Cc: David Howells <dhowells@redhat.com>,
Jeff Layton <jlayton@kernel.org>,
Gao Xiang <hsiangkao@linux.alibaba.com>,
Dominique Martinet <asmadeus@codewreck.org>,
Marc Dionne <marc.dionne@auristor.com>,
Paulo Alcantara <pc@manguebit.com>,
Shyam Prasad N <sprasad@microsoft.com>,
Tom Talpey <tom@talpey.com>,
Eric Van Hensbergen <ericvh@kernel.org>,
Ilya Dryomov <idryomov@gmail.com>,
netfs@lists.linux.dev, linux-afs@lists.infradead.org,
linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
linux-erofs@lists.ozlabs.org, linux-fsdevel@vger.kernel.org,
linux-mm@kvack.org, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-cachefs@redhat.com
Subject: [PATCH v3 12/33] netfs: Don't use bh spinlock
Date: Wed, 6 Nov 2024 12:35:36 +0000 [thread overview]
Message-ID: <20241106123559.724888-13-dhowells@redhat.com> (raw)
In-Reply-To: <20241106123559.724888-1-dhowells@redhat.com>
All the accessing of the subrequest lists is now done in process context,
possibly in a workqueue, but not now in a BH context, so we don't need the
lock against BH interference when taking the netfs_io_request::lock
spinlock.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
---
fs/netfs/buffered_read.c | 4 ++--
fs/netfs/direct_read.c | 4 ++--
fs/netfs/read_collect.c | 20 ++++++++++----------
fs/netfs/read_retry.c | 8 ++++----
fs/netfs/write_collect.c | 4 ++--
fs/netfs/write_issue.c | 4 ++--
6 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 6fd4f3bef3b4..4a48b79b8807 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -200,12 +200,12 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->len = size;
atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
subreq->prev_donated = rreq->prev_donated;
rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
subreq->source = source;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 54027fd14904..1a20cc3979c7 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -68,12 +68,12 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
subreq->len = size;
atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
subreq->prev_donated = rreq->prev_donated;
rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 146abb2e399a..53ef7e0f3e9c 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -142,7 +142,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
prev_donated = READ_ONCE(subreq->prev_donated);
next_donated = READ_ONCE(subreq->next_donated);
if (prev_donated || next_donated) {
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
prev_donated = subreq->prev_donated;
next_donated = subreq->next_donated;
subreq->start -= prev_donated;
@@ -155,7 +155,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
next_donated = subreq->next_donated = 0;
}
trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
}
avail = subreq->transferred;
@@ -184,18 +184,18 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
} else if (fpos < start) {
excess = fend - subreq->start;
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
/* If we complete first on a folio split with the
* preceding subreq, donate to that subreq - otherwise
* we get the responsibility.
*/
if (subreq->prev_donated != prev_donated) {
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
goto donation_changed;
}
if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
pr_err("Can't donate prior to front\n");
goto bad;
}
@@ -211,7 +211,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
if (subreq->consumed >= subreq->len)
goto remove_subreq_locked;
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
} else {
pr_err("fpos > start\n");
goto bad;
@@ -239,11 +239,11 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
/* Donate the remaining downloaded data to one of the neighbouring
* subrequests. Note that we may race with them doing the same thing.
*/
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
if (subreq->prev_donated != prev_donated ||
subreq->next_donated != next_donated) {
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
cond_resched();
goto donation_changed;
}
@@ -293,11 +293,11 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
goto remove_subreq_locked;
remove_subreq:
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
remove_subreq_locked:
subreq->consumed = subreq->len;
list_del(&subreq->rreq_link);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
return true;
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index d1986cec3db7..264f3cb6a7dc 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -139,12 +139,12 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
subreq->prev_donated += rreq->prev_donated;
rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
BUG_ON(!len);
@@ -215,9 +215,9 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
}
- spin_lock_bh(&rreq->lock);
+ spin_lock(&rreq->lock);
list_splice_tail_init(&queue, &rreq->subrequests);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
}
/*
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 85e8e94da90a..d291b31dd074 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -238,14 +238,14 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
cancel:
/* Remove if completely consumed. */
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
stream->front = front;
- spin_unlock_bh(&wreq->lock);
+ spin_unlock(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index c186221b45c0..10b5300b9448 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -203,7 +203,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -214,7 +214,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock_bh(&wreq->lock);
+ spin_unlock(&wreq->lock);
stream->construct = subreq;
}
next prev parent reply other threads:[~2024-11-06 12:37 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-06 12:35 [PATCH v3 00/33] netfs: Read performance improvements and "single-blob" support David Howells
2024-11-06 12:35 ` [PATCH v3 01/33] kheaders: Ignore silly-rename files David Howells
2024-11-06 12:35 ` [PATCH v3 02/33] netfs: Remove call to folio_index() David Howells
2024-11-06 12:35 ` [PATCH v3 03/33] netfs: Fix a few minor bugs in netfs_page_mkwrite() David Howells
2024-11-06 12:35 ` [PATCH v3 04/33] netfs: Remove unnecessary references to pages David Howells
2024-11-06 12:35 ` [PATCH v3 05/33] netfs: Use a folio_queue allocation and free functions David Howells
2024-11-06 12:35 ` [PATCH v3 06/33] netfs: Add a tracepoint to log the lifespan of folio_queue structs David Howells
2024-11-06 12:35 ` [PATCH v3 07/33] netfs: Abstract out a rolling folio buffer implementation David Howells
2024-11-06 12:35 ` [PATCH v3 08/33] netfs: Make netfs_advance_write() return size_t David Howells
2024-11-06 12:35 ` [PATCH v3 09/33] netfs: Split retry code out of fs/netfs/write_collect.c David Howells
2024-11-06 12:35 ` [PATCH v3 10/33] netfs: Drop the error arg from netfs_read_subreq_terminated() David Howells
2024-11-06 12:35 ` [PATCH v3 11/33] netfs: Drop the was_async " David Howells
2024-11-06 12:35 ` David Howells [this message]
2024-11-06 12:35 ` [PATCH v3 13/33] afs: Don't use mutex for I/O operation lock David Howells
2024-11-06 12:35 ` [PATCH v3 14/33] afs: Fix EEXIST error returned from afs_rmdir() to be ENOTEMPTY David Howells
2024-11-06 12:35 ` [PATCH v3 15/33] afs: Fix directory format encoding struct David Howells
2024-11-06 12:35 ` [PATCH v3 16/33] netfs: Remove some extraneous directory invalidations David Howells
2024-11-06 12:35 ` [PATCH v3 17/33] cachefiles: Add some subrequest tracepoints David Howells
2024-11-06 12:35 ` [PATCH v3 18/33] cachefiles: Add auxiliary data trace David Howells
2024-11-06 12:35 ` [PATCH v3 19/33] afs: Add more tracepoints to do with tracking validity David Howells
2024-11-06 12:35 ` [PATCH v3 20/33] netfs: Add functions to build/clean a buffer in a folio_queue David Howells
2024-11-06 12:35 ` [PATCH v3 21/33] netfs: Add support for caching single monolithic objects such as AFS dirs David Howells
2024-11-06 12:35 ` [PATCH v3 22/33] afs: Make afs_init_request() get a key if not given a file David Howells
2024-11-06 12:35 ` [PATCH v3 23/33] afs: Use netfslib for directories David Howells
2024-11-06 12:35 ` [PATCH v3 24/33] afs: Use netfslib for symlinks, allowing them to be cached David Howells
2024-11-06 12:35 ` [PATCH v3 25/33] afs: Eliminate afs_read David Howells
2024-11-06 12:35 ` [PATCH v3 26/33] afs: Fix cleanup of immediately failed async calls David Howells
2024-11-06 12:35 ` [PATCH v3 27/33] afs: Make {Y,}FS.FetchData an asynchronous operation David Howells
2024-11-06 12:35 ` [PATCH v3 28/33] netfs: Change the read result collector to only use one work item David Howells
2024-11-06 12:35 ` [PATCH v3 29/33] afs: Make afs_mkdir() locally initialise a new directory's content David Howells
2024-11-06 12:35 ` [PATCH v3 30/33] afs: Use the contained hashtable to search a directory David Howells
2024-11-06 12:35 ` [PATCH v3 31/33] afs: Locally initialise the contents of a new symlink on creation David Howells
2024-11-06 12:35 ` [PATCH v3 32/33] afs: Add a tracepoint for afs_read_receive() David Howells
2024-11-06 12:35 ` [PATCH v3 33/33] netfs: Report on NULL folioq in netfs_writeback_unlock_folios() David Howells
2024-11-08 17:03 ` [PATCH v3 28/33] netfs: Change the read result collector to only use one work item David Howells
2024-11-08 17:23 ` [PATCH v3 23/33] afs: Use netfslib for directories David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241106123559.724888-13-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=asmadeus@codewreck.org \
--cc=ceph-devel@vger.kernel.org \
--cc=christian@brauner.io \
--cc=ericvh@kernel.org \
--cc=hsiangkao@linux.alibaba.com \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-afs@lists.infradead.org \
--cc=linux-cachefs@redhat.com \
--cc=linux-cifs@vger.kernel.org \
--cc=linux-erofs@lists.ozlabs.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nfs@vger.kernel.org \
--cc=marc.dionne@auristor.com \
--cc=netdev@vger.kernel.org \
--cc=netfs@lists.linux.dev \
--cc=pc@manguebit.com \
--cc=smfrench@gmail.com \
--cc=sprasad@microsoft.com \
--cc=tom@talpey.com \
--cc=v9fs@lists.linux.dev \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).