public inbox for netfs@lists.linux.dev
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Christian Brauner <christian@brauner.io>,
	Matthew Wilcox <willy@infradead.org>,
	Christoph Hellwig <hch@infradead.org>
Cc: David Howells <dhowells@redhat.com>,
	Paulo Alcantara <pc@manguebit.com>, Jens Axboe <axboe@kernel.dk>,
	Leon Romanovsky <leon@kernel.org>,
	Steve French <sfrench@samba.org>,
	ChenXiaoSong <chenxiaosong@chenxiaosong.com>,
	Marc Dionne <marc.dionne@auristor.com>,
	Eric Van Hensbergen <ericvh@kernel.org>,
	Dominique Martinet <asmadeus@codewreck.org>,
	Ilya Dryomov <idryomov@gmail.com>,
	Trond Myklebust <trondmy@kernel.org>,
	netfs@lists.linux.dev, linux-afs@lists.infradead.org,
	linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
	ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
	linux-erofs@lists.ozlabs.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, Paulo Alcantara <pc@manguebit.org>
Subject: [PATCH 20/26] netfs: Remove netfs_alloc/free_folioq_buffer()
Date: Thu, 26 Mar 2026 10:45:35 +0000	[thread overview]
Message-ID: <20260326104544.509518-21-dhowells@redhat.com> (raw)
In-Reply-To: <20260326104544.509518-1-dhowells@redhat.com>

Remove netfs_alloc/free_folioq_buffer() as these have been replaced with
netfs_alloc/free_bvecq_buffer().

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Paulo Alcantara <pc@manguebit.org>
cc: Matthew Wilcox <willy@infradead.org>
cc: Christoph Hellwig <hch@infradead.org>
cc: Steve French <sfrench@samba.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
---
 fs/afs/dir_edit.c         |  1 -
 fs/netfs/misc.c           | 98 ---------------------------------------
 fs/smb/client/smb2ops.c   |  1 -
 fs/smb/client/smbdirect.c |  1 -
 include/linux/netfs.h     |  6 ---
 5 files changed, 107 deletions(-)

diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index 59d3decf7692..d6a9bb4e2039 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -10,7 +10,6 @@
 #include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/iversion.h>
-#include <linux/folio_queue.h>
 #include "internal.h"
 #include "xdr_fs.h"
 
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index ab142cbaad35..a19724389147 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,104 +8,6 @@
 #include <linux/swap.h>
 #include "internal.h"
 
-#if 0
-/**
- * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
- * @mapping: Address space to set on the folio (or NULL).
- * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
- * @_cur_size: Current size of the buffer (updated).
- * @size: Target size of the buffer.
- * @gfp: The allocation constraints.
- */
-int netfs_alloc_folioq_buffer(struct address_space *mapping,
-			      struct folio_queue **_buffer,
-			      size_t *_cur_size, ssize_t size, gfp_t gfp)
-{
-	struct folio_queue *tail = *_buffer, *p;
-
-	size = round_up(size, PAGE_SIZE);
-	if (*_cur_size >= size)
-		return 0;
-
-	if (tail)
-		while (tail->next)
-			tail = tail->next;
-
-	do {
-		struct folio *folio;
-		int order = 0, slot;
-
-		if (!tail || folioq_full(tail)) {
-			p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
-			if (!p)
-				return -ENOMEM;
-			if (tail) {
-				tail->next = p;
-				p->prev = tail;
-			} else {
-				*_buffer = p;
-			}
-			tail = p;
-		}
-
-		if (size - *_cur_size > PAGE_SIZE)
-			order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
-				     MAX_PAGECACHE_ORDER);
-
-		folio = folio_alloc(gfp, order);
-		if (!folio && order > 0)
-			folio = folio_alloc(gfp, 0);
-		if (!folio)
-			return -ENOMEM;
-
-		folio->mapping = mapping;
-		folio->index = *_cur_size / PAGE_SIZE;
-		trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
-		slot = folioq_append_mark(tail, folio);
-		*_cur_size += folioq_folio_size(tail, slot);
-	} while (*_cur_size < size);
-
-	return 0;
-}
-EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
-
-/**
- * netfs_free_folioq_buffer - Free a folio queue.
- * @fq: The start of the folio queue to free
- *
- * Free up a chain of folio_queues and, if marked, the marked folios they point
- * to.
- */
-void netfs_free_folioq_buffer(struct folio_queue *fq)
-{
-	struct folio_queue *next;
-	struct folio_batch fbatch;
-
-	folio_batch_init(&fbatch);
-
-	for (; fq; fq = next) {
-		for (int slot = 0; slot < folioq_count(fq); slot++) {
-			struct folio *folio = folioq_folio(fq, slot);
-
-			if (!folio ||
-			    !folioq_is_marked(fq, slot))
-				continue;
-
-			trace_netfs_folio(folio, netfs_folio_trace_put);
-			if (folio_batch_add(&fbatch, folio))
-				folio_batch_release(&fbatch);
-		}
-
-		netfs_stat_d(&netfs_n_folioq);
-		next = fq->next;
-		kfree(fq);
-	}
-
-	folio_batch_release(&fbatch);
-}
-EXPORT_SYMBOL(netfs_free_folioq_buffer);
-#endif
-
 /**
  * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
  * @mapping: The mapping the folio belongs to.
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 173acca17af7..0d19c8fc4c3d 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -13,7 +13,6 @@
 #include <linux/sort.h>
 #include <crypto/aead.h>
 #include <linux/fiemap.h>
-#include <linux/folio_queue.h>
 #include <uapi/linux/magic.h>
 #include "cifsfs.h"
 #include "cifsglob.h"
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index d9e026d5e9f9..252e7757d21c 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -6,7 +6,6 @@
  */
 #include <linux/module.h>
 #include <linux/highmem.h>
-#include <linux/folio_queue.h>
 #define __SMBDIRECT_SOCKET_DISCONNECT(__sc) smbd_disconnect_rdma_connection(__sc)
 #include "../common/smbdirect/smbdirect_pdu.h"
 #include "smbdirect.h"
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 3345c88bbd8e..9d8576a62868 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -463,12 +463,6 @@ void netfs_end_io_write(struct inode *inode);
 int netfs_start_io_direct(struct inode *inode);
 void netfs_end_io_direct(struct inode *inode);
 
-/* Buffer wrangling helpers API. */
-int netfs_alloc_folioq_buffer(struct address_space *mapping,
-			      struct folio_queue **_buffer,
-			      size_t *_cur_size, ssize_t size, gfp_t gfp);
-void netfs_free_folioq_buffer(struct folio_queue *fq);
-
 /**
  * netfs_inode - Get the netfs inode context from the inode
  * @inode: The inode to query


  parent reply	other threads:[~2026-03-26 10:49 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-26 10:45 [PATCH 00/26] netfs: Keep track of folios in a segmented bio_vec[] chain David Howells
2026-03-26 10:45 ` [PATCH 01/26] netfs: Fix NULL pointer dereference in netfs_unbuffered_write() on retry David Howells
2026-03-28 18:20   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 02/26] netfs: Fix kernel BUG in netfs_limit_iter() for ITER_KVEC iterators David Howells
2026-03-28 18:21   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 03/26] netfs: fix VM_BUG_ON_FOLIO() issue in netfs_write_begin() call David Howells
2026-03-28 18:24   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 04/26] netfs: fix error handling in netfs_extract_user_iter() David Howells
2026-03-26 10:45 ` [PATCH 05/26] netfs: Fix read abandonment during retry David Howells
2026-03-28 18:25   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 06/26] netfs: Fix the handling of stream->front by removing it David Howells
2026-03-28 18:25   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 07/26] cachefiles: Fix excess dput() after end_removing() David Howells
     [not found]   ` <CA+yaA_=gpTnueByzFNYrqNL_qSC2rE4iGDjLHtJap-=_rhE3HQ@mail.gmail.com>
2026-03-26 11:10     ` David Howells
2026-03-28 18:29   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 08/26] cachefiles: Don't rely on backing fs storage map for most use cases David Howells
2026-03-26 10:45 ` [PATCH 09/26] mm: Make readahead store folio count in readahead_control David Howells
2026-03-28 18:34   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 10/26] netfs: Bulk load the readahead-provided folios up front David Howells
2026-03-28 18:36   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 11/26] Add a function to kmap one page of a multipage bio_vec David Howells
2026-03-28 18:39   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 12/26] iov_iter: Add a segmented queue of bio_vec[] David Howells
2026-03-28 18:39   ` Paulo Alcantara
2026-03-26 10:45 ` [PATCH 13/26] netfs: Add some tools for managing bvecq chains David Howells
2026-03-26 10:45 ` [PATCH 14/26] netfs: Add a function to extract from an iter into a bvecq David Howells
2026-03-26 10:45 ` [PATCH 15/26] afs: Use a bvecq to hold dir content rather than folioq David Howells
2026-03-26 10:45 ` [PATCH 16/26] cifs: Use a bvecq for buffering instead of a folioq David Howells
2026-03-26 10:45 ` [PATCH 17/26] cifs: Support ITER_BVECQ in smb_extract_iter_to_rdma() David Howells
2026-03-29 10:36   ` Stefan Metzmacher
2026-03-26 10:45 ` [PATCH 18/26] netfs: Switch to using bvecq rather than folio_queue and rolling_buffer David Howells
2026-03-26 10:45 ` [PATCH 19/26] cifs: Remove support for ITER_KVEC/BVEC/FOLIOQ from smb_extract_iter_to_rdma() David Howells
2026-03-29 10:39   ` Stefan Metzmacher
2026-03-26 10:45 ` David Howells [this message]
2026-03-26 10:45 ` [PATCH 21/26] netfs: Remove netfs_extract_user_iter() David Howells
2026-03-26 10:45 ` [PATCH 22/26] iov_iter: Remove ITER_FOLIOQ David Howells
2026-03-26 10:45 ` [PATCH 23/26] netfs: Remove folio_queue and rolling_buffer David Howells
2026-03-26 10:45 ` [PATCH 24/26] netfs: Check for too much data being read David Howells
2026-03-26 10:45 ` [PATCH 25/26] netfs: Limit the the minimum trigger for progress reporting David Howells
2026-03-26 14:19   ` ChenXiaoSong
2026-03-26 10:45 ` [PATCH 26/26] netfs: Combine prepare and issue ops and grab the buffers on request David Howells
2026-03-29 11:07 ` [PATCH 00/26] netfs: Keep track of folios in a segmented bio_vec[] chain Stefan Metzmacher
2026-03-30 10:49   ` David Howells
2026-03-30 13:57     ` Stefan Metzmacher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260326104544.509518-21-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=asmadeus@codewreck.org \
    --cc=axboe@kernel.dk \
    --cc=ceph-devel@vger.kernel.org \
    --cc=chenxiaosong@chenxiaosong.com \
    --cc=christian@brauner.io \
    --cc=ericvh@kernel.org \
    --cc=hch@infradead.org \
    --cc=idryomov@gmail.com \
    --cc=leon@kernel.org \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-erofs@lists.ozlabs.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=marc.dionne@auristor.com \
    --cc=netfs@lists.linux.dev \
    --cc=pc@manguebit.com \
    --cc=pc@manguebit.org \
    --cc=sfrench@samba.org \
    --cc=trondmy@kernel.org \
    --cc=v9fs@lists.linux.dev \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox