* [PATCH 2/4] netfs: Fix missing barriers when accessing stream->subrequests locklessly
2026-04-23 22:22 [PATCH 0/4] netfs: Yet further miscellaneous fixes David Howells
2026-04-23 22:22 ` [PATCH 1/4] netfs: Fix wrong return from netfs_read_sizes() on 32-bit SMP arches David Howells
@ 2026-04-23 22:22 ` David Howells
2026-04-23 22:22 ` [PATCH 3/4] afs: Fix afs_get_link() to take validate_lock around afs_read_single() David Howells
2026-04-23 22:22 ` [PATCH 4/4] afs: Fix RCU handling of symlinks in RCU pathwalk David Howells
3 siblings, 0 replies; 5+ messages in thread
From: David Howells @ 2026-04-23 22:22 UTC (permalink / raw)
To: Christian Brauner
Cc: David Howells, Paulo Alcantara, netfs, linux-afs, linux-cifs,
ceph-devel, linux-fsdevel, linux-kernel
The list of subrequests attached to stream->subrequests is accessed without
locks by netfs_collect_read_results() and netfs_collect_write_results(),
but they access subreq->flags without taking a barrier after getting the
subreq pointer from the list. Relatedly, the functions that build the list
don't use any sort of write barrier when constructing the list to make sure
that the NETFS_SREQ_IN_PROGRESS flag is perceived to be set first if no
lock is taken.
Fix this by:
(1) Add a new list_add_tail_release() function that uses a release barrier
to set the pointer to the new member of the list.
(2) Add a new list_first_entry_acquire() function that uses an acquire
barrier to read the pointer to the first member in a list (or return
NULL).
(3) Use list_add_tail_release() when adding a subreq to ->subrequests.
(4) Make direct-read and read-single use netfs_queue_read() so that they
share the relevant bit of code with buffered-read.
(5) Use list_first_entry_acquire() when initially accessing the front of
the list (when an item is removed, the pointer to the new front iterm
is obtained under the same lock).
Fixes: e2d46f2ec332 ("netfs: Change the read result collector to only use one work item")
Fixes: 288ace2f57c9 ("netfs: New writeback implementation")
Link: https://sashiko.dev/#/patchset/20260326104544.509518-1-dhowells%40redhat.com
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Paulo Alcantara <pc@manguebit.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
---
fs/netfs/buffered_read.c | 9 +++++----
fs/netfs/direct_read.c | 15 +--------------
fs/netfs/internal.h | 3 +++
fs/netfs/read_collect.c | 4 +++-
fs/netfs/read_single.c | 12 +-----------
fs/netfs/write_collect.c | 4 +++-
fs/netfs/write_issue.c | 3 ++-
include/linux/list.h | 37 +++++++++++++++++++++++++++++++++++++
8 files changed, 55 insertions(+), 32 deletions(-)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index dcfe51eec266..5c78ec14e46b 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -156,9 +156,9 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
netfs_cache_read_terminated, subreq);
}
-static void netfs_queue_read(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq,
- bool last_subreq)
+void netfs_queue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ bool last_subreq)
{
struct netfs_io_stream *stream = &rreq->io_streams[0];
@@ -169,7 +169,8 @@ static void netfs_queue_read(struct netfs_io_request *rreq,
* remove entries off of the front.
*/
spin_lock(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ /* Write IN_PROGRESS before pointer to new subreq */
+ list_add_tail_release(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
if (!stream->active) {
stream->collected_to = subreq->start;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index f72e6da88cca..69a1a1e26143 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -47,7 +47,6 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
*/
static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
{
- struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned long long start = rreq->start;
ssize_t size = rreq->len;
int ret = 0;
@@ -66,19 +65,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;
- __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
-
- spin_lock(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &stream->subrequests);
- if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
- if (!stream->active) {
- stream->collected_to = subreq->start;
- /* Store list pointers before active flag */
- smp_store_release(&stream->active, true);
- }
- }
- trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock(&rreq->lock);
+ netfs_queue_read(rreq, subreq, false);
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index d436e20d3418..964479335ff7 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -23,6 +23,9 @@
/*
* buffered_read.c
*/
+void netfs_queue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ bool last_subreq);
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len);
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index eae067e3eaa5..5847796b54ec 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -205,8 +205,10 @@ static void netfs_collect_read_results(struct netfs_io_request *rreq)
* in progress. The issuer thread may be adding stuff to the tail
* whilst we're doing this.
*/
- front = list_first_entry_or_null(&stream->subrequests,
+ front = list_first_entry_acquire(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
+ /* Read first subreq pointer before IN_PROGRESS flag. */
+
while (front) {
size_t transferred;
diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
index d0e23bc42445..30e184caadb2 100644
--- a/fs/netfs/read_single.c
+++ b/fs/netfs/read_single.c
@@ -89,7 +89,6 @@ static void netfs_single_read_cache(struct netfs_io_request *rreq,
*/
static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
{
- struct netfs_io_stream *stream = &rreq->io_streams[0];
struct netfs_io_subrequest *subreq;
int ret = 0;
@@ -102,14 +101,7 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
subreq->len = rreq->len;
subreq->io_iter = rreq->buffer.iter;
- __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
-
- spin_lock(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &stream->subrequests);
- trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- /* Store list pointers before active flag */
- smp_store_release(&stream->active, true);
- spin_unlock(&rreq->lock);
+ netfs_queue_read(rreq, subreq, true);
netfs_single_cache_prepare_read(rreq, subreq);
switch (subreq->source) {
@@ -137,8 +129,6 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
break;
}
- smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
return ret;
cancel:
netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel);
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 4718e5174d65..f0cafa1d5835 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -227,8 +227,10 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
if (!smp_load_acquire(&stream->active))
continue;
- front = list_first_entry_or_null(&stream->subrequests,
+ front = list_first_entry_acquire(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
+ /* Read first subreq pointer before IN_PROGRESS flag. */
+
while (front) {
trace_netfs_collect_sreq(wreq, front);
//_debug("sreq [%x] %llx %zx/%zx",
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 2db688f94125..b0e9690bb90c 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -204,7 +204,8 @@ void netfs_prepare_write(struct netfs_io_request *wreq,
* remove entries off of the front.
*/
spin_lock(&wreq->lock);
- list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ /* Write IN_PROGRESS before pointer to new subreq */
+ list_add_tail_release(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
if (!stream->active) {
stream->collected_to = subreq->start;
diff --git a/include/linux/list.h b/include/linux/list.h
index 00ea8e5fb88b..5af356efd725 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -191,6 +191,29 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
__list_add(new, head->prev, head);
}
+/**
+ * list_add_tail_release - add a new entry with release barrier
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head, using a release barrier to set
+ * the ->next pointer that points to it. This is useful for implementing
+ * queues, in particular one that the elements will be walked through forwards
+ * locklessly.
+ */
+static inline void list_add_tail_release(struct list_head *new,
+ struct list_head *head)
+{
+ struct list_head *prev = head->prev;
+
+ if (__list_add_valid(new, prev, head)) {
+ new->next = head;
+ new->prev = prev;
+ head->prev = new;
+ smp_store_release(&prev->next, new);
+ }
+}
+
/*
* Delete a list entry by making the prev/next entries
* point to each other.
@@ -644,6 +667,20 @@ static inline void list_splice_tail_init(struct list_head *list,
pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
})
+/**
+ * list_first_entry_acquire - get the first element from a list with barrier
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_acquire(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = smp_load_acquire(&head__->next); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
/**
* list_last_entry_or_null - get the last element from a list
* @ptr: the list head to take the element from.
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH 4/4] afs: Fix RCU handling of symlinks in RCU pathwalk
2026-04-23 22:22 [PATCH 0/4] netfs: Yet further miscellaneous fixes David Howells
` (2 preceding siblings ...)
2026-04-23 22:22 ` [PATCH 3/4] afs: Fix afs_get_link() to take validate_lock around afs_read_single() David Howells
@ 2026-04-23 22:22 ` David Howells
3 siblings, 0 replies; 5+ messages in thread
From: David Howells @ 2026-04-23 22:22 UTC (permalink / raw)
To: Christian Brauner
Cc: David Howells, Paulo Alcantara, netfs, linux-afs, linux-cifs,
ceph-devel, linux-fsdevel, linux-kernel, Marc Dionne
The afs filesystem in the kernel doesn't handle RCU pathwalk of symlinks
correctly. The problem is twofold: firstly, it doesn't treat the buffer
pointers as RCU pointers with the appropriate barriering; and secondly, it
can race with another thread updating the contents of the symlink because a
third party updated it on the server.
Fix this by the following means:
(1) Keep a separate copy of the symlink contents with an rcu_head. This
is always going to be a lot smaller than a page, so it can be
kmalloc'd and save quite a bit of memory. It also needs a refcount
for non-RCU pathwalk.
(2) Split the symlink read and write-to-cache routines in afs from those
for directories.
(3) Discard the I/O buffer as soon as the write-to-cache completes as this
is a full page (plus a folio_queue).
(4) If there's no cache, discard the I/O buffer immediately after reading
and copying if there is no cache.
Fixes: 6698c02d64b2 ("afs: Locally initialise the contents of a new symlink on creation")
Closes: https://sashiko.dev/#/patchset/20260326104544.509518-1-dhowells%40redhat.com
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
---
fs/afs/Makefile | 1 +
fs/afs/dir.c | 33 +++++--
fs/afs/fsclient.c | 4 +-
fs/afs/inode.c | 105 +-------------------
fs/afs/internal.h | 35 +++++--
fs/afs/symlink.c | 242 +++++++++++++++++++++++++++++++++++++++++++++
fs/afs/yfsclient.c | 4 +-
7 files changed, 303 insertions(+), 121 deletions(-)
create mode 100644 fs/afs/symlink.c
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index b49b8fe682f3..0d8f1982d596 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -30,6 +30,7 @@ kafs-y := \
server.o \
server_list.o \
super.o \
+ symlink.o \
validation.o \
vlclient.o \
vl_alias.o \
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index aaaa55878ffd..40f6791114ec 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -68,7 +68,7 @@ const struct inode_operations afs_dir_inode_operations = {
};
const struct address_space_operations afs_dir_aops = {
- .writepages = afs_single_writepages,
+ .writepages = afs_dir_writepages,
};
const struct dentry_operations afs_fs_dentry_operations = {
@@ -294,7 +294,7 @@ static ssize_t afs_do_read_single(struct afs_vnode *dvnode, struct file *file)
return ret;
}
-ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file)
+static ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file)
{
ssize_t ret;
@@ -1763,13 +1763,20 @@ static int afs_link(struct dentry *from, struct inode *dir,
return ret;
}
+static void afs_symlink_put(struct afs_operation *op)
+{
+ kfree(op->create.symlink);
+ op->create.symlink = NULL;
+ afs_create_put(op);
+}
+
static const struct afs_operation_ops afs_symlink_operation = {
.issue_afs_rpc = afs_fs_symlink,
.issue_yfs_rpc = yfs_fs_symlink,
.success = afs_create_success,
.aborted = afs_check_for_remote_deletion,
.edit_dir = afs_create_edit_dir,
- .put = afs_create_put,
+ .put = afs_symlink_put,
};
/*
@@ -1779,7 +1786,9 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *content)
{
struct afs_operation *op;
+ struct afs_symlink *symlink;
struct afs_vnode *dvnode = AFS_FS_I(dir);
+ size_t clen = strlen(content);
int ret;
_enter("{%llx:%llu},{%pd},%s",
@@ -1791,12 +1800,20 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto error;
ret = -EINVAL;
- if (strlen(content) >= AFSPATHMAX)
+ if (clen >= AFSPATHMAX)
+ goto error;
+
+ ret = -ENOMEM;
+ symlink = kmalloc_flex(struct afs_symlink, content, clen + 1, GFP_KERNEL);
+ if (!symlink)
goto error;
+ refcount_set(&symlink->ref, 1);
+ memcpy(symlink->content, content, clen + 1);
op = afs_alloc_operation(NULL, dvnode->volume);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
+ kfree(symlink);
goto error;
}
@@ -1808,7 +1825,7 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
op->dentry = dentry;
op->ops = &afs_symlink_operation;
op->create.reason = afs_edit_dir_for_symlink;
- op->create.symlink = content;
+ op->create.symlink = symlink;
op->mtime = current_time(dir);
ret = afs_do_sync_operation(op);
afs_dir_unuse_cookie(dvnode, ret);
@@ -2192,10 +2209,10 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}
/*
- * Write the file contents to the cache as a single blob.
+ * Write the directory contents to the cache as a single blob.
*/
-int afs_single_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int afs_dir_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
struct afs_vnode *dvnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 95494d5f2b8a..a2ffd60889f8 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -886,7 +886,7 @@ void afs_fs_symlink(struct afs_operation *op)
namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
- c_namesz = strlen(op->create.symlink);
+ c_namesz = strlen(op->create.symlink->content);
c_padsz = (4 - (c_namesz & 3)) & 3;
reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
@@ -910,7 +910,7 @@ void afs_fs_symlink(struct afs_operation *op)
bp = (void *) bp + padsz;
}
*bp++ = htonl(c_namesz);
- memcpy(bp, op->create.symlink, c_namesz);
+ memcpy(bp, op->create.symlink->content, c_namesz);
bp = (void *) bp + c_namesz;
if (c_padsz > 0) {
memset(bp, 0, c_padsz);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 5207c4a003f6..ff2b8fc7f3df 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -25,105 +25,6 @@
#include "internal.h"
#include "afs_fs.h"
-void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op)
-{
- size_t size = strlen(op->create.symlink) + 1;
- size_t dsize = 0;
- char *p;
-
- if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size,
- mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0)
- return;
-
- vnode->directory_size = dsize;
- p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
- memcpy(p, op->create.symlink, size);
- kunmap_local(p);
- set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
- netfs_single_mark_inode_dirty(&vnode->netfs.inode);
-}
-
-static void afs_put_link(void *arg)
-{
- struct folio *folio = virt_to_folio(arg);
-
- kunmap_local(arg);
- folio_put(folio);
-}
-
-const char *afs_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback)
-{
- struct afs_vnode *vnode = AFS_FS_I(inode);
- struct folio *folio;
- char *content;
- ssize_t ret;
-
- if (!dentry) {
- /* RCU pathwalk. */
- if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode))
- return ERR_PTR(-ECHILD);
- goto good;
- }
-
- if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
- goto fetch;
-
- ret = afs_validate(vnode, NULL);
- if (ret < 0)
- return ERR_PTR(ret);
-
- if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
- test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
- goto good;
-
-fetch:
- if (down_write_killable(&vnode->validate_lock) < 0)
- return ERR_PTR(-ERESTARTSYS);
- if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
- !test_bit(AFS_VNODE_DIR_READ, &vnode->flags)) {
- ret = afs_read_single(vnode, NULL);
- if (ret < 0) {
- up_write(&vnode->validate_lock);
- return ERR_PTR(ret);
- }
- set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
- }
-
- up_write(&vnode->validate_lock);
-
-good:
- folio = folioq_folio(vnode->directory, 0);
- folio_get(folio);
- content = kmap_local_folio(folio, 0);
- set_delayed_call(callback, afs_put_link, content);
- return content;
-}
-
-int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
-{
- DEFINE_DELAYED_CALL(done);
- const char *content;
- int len;
-
- content = afs_get_link(dentry, d_inode(dentry), &done);
- if (IS_ERR(content)) {
- do_delayed_call(&done);
- return PTR_ERR(content);
- }
-
- len = umin(strlen(content), buflen);
- if (copy_to_user(buffer, content, len))
- len = -EFAULT;
- do_delayed_call(&done);
- return len;
-}
-
-static const struct inode_operations afs_symlink_inode_operations = {
- .get_link = afs_get_link,
- .readlink = afs_readlink,
-};
-
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
{
static unsigned long once_only;
@@ -223,7 +124,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_mode = S_IFLNK | status->mode;
inode->i_op = &afs_symlink_inode_operations;
}
- inode->i_mapping->a_ops = &afs_dir_aops;
+ inode->i_mapping->a_ops = &afs_symlink_aops;
inode_nohighmem(inode);
mapping_set_release_always(inode->i_mapping);
break;
@@ -765,12 +666,14 @@ void afs_evict_inode(struct inode *inode)
.range_end = LLONG_MAX,
};
- afs_single_writepages(inode->i_mapping, &wbc);
+ inode->i_mapping->a_ops->writepages(inode->i_mapping, &wbc);
}
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
netfs_free_folioq_buffer(vnode->directory);
+ if (vnode->symlink)
+ afs_replace_symlink(vnode, NULL);
afs_set_cache_aux(vnode, &aux);
netfs_clear_inode_writeback(inode, &aux);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 009064b8d661..802ae22133ae 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -711,6 +711,7 @@ struct afs_vnode {
#define AFS_VNODE_DIR_READ 11 /* Set if we've read a dir's contents */
struct folio_queue *directory; /* Directory contents */
+ struct afs_symlink __rcu *symlink; /* Symlink content */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
@@ -777,6 +778,15 @@ struct afs_permits {
struct afs_permit permits[] __counted_by(nr_permits); /* List of permits sorted by key pointer */
};
+/*
+ * Copy of symlink content for normal use.
+ */
+struct afs_symlink {
+ struct rcu_head rcu;
+ refcount_t ref;
+ char content[];
+};
+
/*
* Error prioritisation and accumulation.
*/
@@ -888,7 +898,7 @@ struct afs_operation {
struct {
int reason; /* enum afs_edit_dir_reason */
mode_t mode;
- const char *symlink;
+ struct afs_symlink *symlink;
} create;
struct {
bool need_rehash;
@@ -1099,13 +1109,12 @@ extern const struct inode_operations afs_dir_inode_operations;
extern const struct address_space_operations afs_dir_aops;
extern const struct dentry_operations afs_fs_dentry_operations;
-ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file);
ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
__acquires(&dvnode->validate_lock);
extern void afs_d_release(struct dentry *);
extern void afs_check_for_remote_deletion(struct afs_operation *);
-int afs_single_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
+int afs_dir_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
/*
* dir_edit.c
@@ -1247,10 +1256,6 @@ extern void afs_fs_probe_cleanup(struct afs_net *);
*/
extern const struct afs_operation_ops afs_fetch_status_operation;
-void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op);
-const char *afs_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback);
-int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
@@ -1600,6 +1605,20 @@ void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server
extern int __init afs_fs_init(void);
extern void afs_fs_exit(void);
+/*
+ * symlink.c
+ */
+extern const struct inode_operations afs_symlink_inode_operations;
+extern const struct address_space_operations afs_symlink_aops;
+
+void afs_replace_symlink(struct afs_vnode *vnode, struct afs_symlink *symlink);
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op);
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback);
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
+int afs_symlink_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+
/*
* validation.c
*/
diff --git a/fs/afs/symlink.c b/fs/afs/symlink.c
new file mode 100644
index 000000000000..8d2521c5f19d
--- /dev/null
+++ b/fs/afs/symlink.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* AFS filesystem symbolic link handling
+ *
+ * Copyright (C) 2026 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/iov_iter.h>
+#include "internal.h"
+
+static void afs_put_symlink(struct afs_symlink *symlink)
+{
+ if (refcount_dec_and_test(&symlink->ref))
+ kfree_rcu(symlink, rcu);
+}
+
+void afs_replace_symlink(struct afs_vnode *vnode, struct afs_symlink *symlink)
+{
+ struct afs_symlink *old;
+
+ old = rcu_replace_pointer(vnode->symlink, symlink,
+ lockdep_is_held(&vnode->validate_lock));
+ if (old)
+ afs_put_symlink(old);
+}
+
+/*
+ * Set up a locally created symlink inode for immediate write to the cache.
+ */
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op)
+{
+ size_t dsize = 0;
+ size_t size = strlen(op->create.symlink->content) + 1;
+ char *p;
+
+ rcu_assign_pointer(vnode->symlink, op->create.symlink);
+ op->create.symlink = NULL;
+
+ if (!fscache_cookie_enabled(netfs_i_cookie(&vnode->netfs)))
+ return;
+
+ if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size,
+ mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0)
+ return;
+
+ vnode->directory_size = dsize;
+ p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
+ memcpy(p, vnode->symlink, size);
+ kunmap_local(p);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+}
+
+/*
+ * Read a symlink in a single download.
+ */
+static ssize_t afs_do_read_symlink(struct afs_vnode *vnode)
+{
+ struct afs_symlink *symlink;
+ struct iov_iter iter;
+ ssize_t ret;
+ loff_t i_size;
+
+ i_size = i_size_read(&vnode->netfs.inode);
+ if (i_size > PAGE_SIZE - 1) {
+ trace_afs_file_error(vnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+
+ if (!vnode->directory) {
+ size_t cur_size = 0;
+
+ ret = netfs_alloc_folioq_buffer(NULL,
+ &vnode->directory, &cur_size, PAGE_SIZE,
+ mapping_gfp_mask(vnode->netfs.inode.i_mapping));
+ vnode->directory_size = PAGE_SIZE - 1;
+ if (ret < 0)
+ return ret;
+ }
+
+ iov_iter_folio_queue(&iter, ITER_DEST, vnode->directory, 0, 0, PAGE_SIZE);
+
+ /* AFS requires us to perform the read of a symlink as a single unit to
+ * avoid issues with the content being changed between reads.
+ */
+ ret = netfs_read_single(&vnode->netfs.inode, NULL, &iter);
+ if (ret >= 0) {
+ i_size = i_size_read(&vnode->netfs.inode);
+ if (i_size > PAGE_SIZE - 1) {
+ trace_afs_file_error(vnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+ vnode->directory_size = i_size;
+
+ /* Copy the symlink. */
+ symlink = kmalloc_flex(struct afs_symlink, content, i_size + 1,
+ GFP_KERNEL);
+ if (!symlink)
+ return -ENOMEM;
+
+ refcount_set(&symlink->ref, 1);
+ symlink->content[i_size] = 0;
+
+ const char *s = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
+
+ memcpy(symlink->content, s, i_size);
+ kunmap_local(s);
+
+ afs_replace_symlink(vnode, symlink);
+ }
+
+ if (!fscache_cookie_enabled(netfs_i_cookie(&vnode->netfs))) {
+ netfs_free_folioq_buffer(vnode->directory);
+ vnode->directory = NULL;
+ vnode->directory_size = 0;
+ }
+
+ return ret;
+}
+
+static ssize_t afs_read_symlink(struct afs_vnode *vnode)
+{
+ ssize_t ret;
+
+ fscache_use_cookie(afs_vnode_cache(vnode), false);
+ ret = afs_do_read_symlink(vnode);
+ fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
+ return ret;
+}
+
+static void afs_put_link(void *arg)
+{
+ afs_put_symlink(arg);
+}
+
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ struct afs_symlink *symlink;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ ssize_t ret;
+
+ if (!dentry) {
+ /* RCU pathwalk. */
+ if (!vnode->symlink || !afs_check_validity(vnode))
+ return ERR_PTR(-ECHILD);
+ set_delayed_call(callback, NULL, NULL);
+ return rcu_dereference(vnode->symlink)->content;
+ }
+
+ if (vnode->symlink) {
+ ret = afs_validate(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ down_read(&vnode->validate_lock);
+ if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
+ goto good;
+ up_read(&vnode->validate_lock);
+ }
+
+ if (down_write_killable(&vnode->validate_lock) < 0)
+ return ERR_PTR(-ERESTARTSYS);
+ if (!vnode->symlink ||
+ test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ ret = afs_read_symlink(vnode);
+ if (ret < 0) {
+ up_write(&vnode->validate_lock);
+ return ERR_PTR(ret);
+ }
+ }
+
+ downgrade_write(&vnode->validate_lock);
+
+good:
+ symlink = rcu_dereference_protected(vnode->symlink,
+ lockdep_is_held(&vnode->validate_lock));
+ refcount_inc(&symlink->ref);
+ up_read(&vnode->validate_lock);
+
+ set_delayed_call(callback, afs_put_link, symlink);
+ return symlink->content;
+}
+
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ DEFINE_DELAYED_CALL(done);
+ const char *content;
+ int len;
+
+ content = afs_get_link(dentry, d_inode(dentry), &done);
+ if (IS_ERR(content)) {
+ do_delayed_call(&done);
+ return PTR_ERR(content);
+ }
+
+ len = umin(strlen(content), buflen);
+ if (copy_to_user(buffer, content, len))
+ len = -EFAULT;
+ do_delayed_call(&done);
+ return len;
+}
+
+/*
+ * Write the symlink contents to the cache as a single blob. We then throw
+ * away the page we used to receive it.
+ */
+int afs_symlink_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct iov_iter iter;
+ int ret = 0;
+
+ down_write(&vnode->validate_lock);
+
+ if (vnode->directory &&
+ atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE) {
+ iov_iter_folio_queue(&iter, ITER_SOURCE, vnode->directory, 0, 0,
+ i_size_read(&vnode->netfs.inode));
+ ret = netfs_writeback_single(mapping, wbc, &iter);
+ }
+
+ netfs_free_folioq_buffer(vnode->directory);
+ vnode->directory = NULL;
+ vnode->directory_size = 0;
+
+ up_write(&vnode->validate_lock);
+ return ret;
+}
+
+const struct inode_operations afs_symlink_inode_operations = {
+ .get_link = afs_get_link,
+ .readlink = afs_readlink,
+};
+
+const struct address_space_operations afs_symlink_aops = {
+ .writepages = afs_symlink_writepages,
+};
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 24fb562ebd33..d941179730a9 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -960,7 +960,7 @@ void yfs_fs_symlink(struct afs_operation *op)
_enter("");
- contents_sz = strlen(op->create.symlink);
+ contents_sz = strlen(op->create.symlink->content);
call = afs_alloc_flat_call(op->net, &yfs_RXYFSSymlink,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
@@ -981,7 +981,7 @@ void yfs_fs_symlink(struct afs_operation *op)
bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &dvp->fid);
bp = xdr_encode_name(bp, name);
- bp = xdr_encode_string(bp, op->create.symlink, contents_sz);
+ bp = xdr_encode_string(bp, op->create.symlink->content, contents_sz);
bp = xdr_encode_YFSStoreStatus(bp, &mode, &op->mtime);
yfs_check_req(call, bp);
^ permalink raw reply related [flat|nested] 5+ messages in thread