From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
Christoph Hellwig <hch@infradead.org>,
Jens Axboe <axboe@kernel.dk>, Leon Romanovsky <leon@kernel.org>
Cc: David Howells <dhowells@redhat.com>,
Christian Brauner <christian@brauner.io>,
Paulo Alcantara <pc@manguebit.com>,
netfs@lists.linux.dev, linux-afs@lists.infradead.org,
linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
Paulo Alcantara <pc@manguebit.org>,
Steve French <sfrench@samba.org>
Subject: [RFC PATCH 14/17] iov_iter: Remove ITER_FOLIOQ
Date: Wed, 4 Mar 2026 14:03:21 +0000 [thread overview]
Message-ID: <20260304140328.112636-15-dhowells@redhat.com> (raw)
In-Reply-To: <20260304140328.112636-1-dhowells@redhat.com>
Remove ITER_FOLIOQ as it's no longer used.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Paulo Alcantara <pc@manguebit.org>
cc: Matthew Wilcox <willy@infradead.org>
cc: Christoph Hellwig <hch@infradead.org>
cc: Steve French <sfrench@samba.org>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
---
include/linux/iov_iter.h | 65 +---------
include/linux/uio.h | 12 --
lib/iov_iter.c | 235 +---------------------------------
lib/scatterlist.c | 67 +---------
lib/tests/kunit_iov_iter.c | 256 -------------------------------------
5 files changed, 5 insertions(+), 630 deletions(-)
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index e0c129a3ca63..4b47454c5ca8 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -10,7 +10,6 @@
#include <linux/uio.h>
#include <linux/bvec.h>
-#include <linux/folio_queue.h>
typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
@@ -194,62 +193,6 @@ size_t iterate_bvecq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
return progress;
}
-/*
- * Handle ITER_FOLIOQ.
- */
-static __always_inline
-size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
- iov_step_f step)
-{
- const struct folio_queue *folioq = iter->folioq;
- unsigned int slot = iter->folioq_slot;
- size_t progress = 0, skip = iter->iov_offset;
-
- if (slot == folioq_nr_slots(folioq)) {
- /* The iterator may have been extended. */
- folioq = folioq->next;
- slot = 0;
- }
-
- do {
- struct folio *folio = folioq_folio(folioq, slot);
- size_t part, remain = 0, consumed;
- size_t fsize;
- void *base;
-
- if (!folio)
- break;
-
- fsize = folioq_folio_size(folioq, slot);
- if (skip < fsize) {
- base = kmap_local_folio(folio, skip);
- part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
- remain = step(base, progress, part, priv, priv2);
- kunmap_local(base);
- consumed = part - remain;
- len -= consumed;
- progress += consumed;
- skip += consumed;
- }
- if (skip >= fsize) {
- skip = 0;
- slot++;
- if (slot == folioq_nr_slots(folioq) && folioq->next) {
- folioq = folioq->next;
- slot = 0;
- }
- }
- if (remain)
- break;
- } while (len);
-
- iter->folioq_slot = slot;
- iter->folioq = folioq;
- iter->iov_offset = skip;
- iter->count -= progress;
- return progress;
-}
-
/*
* Handle ITER_XARRAY.
*/
@@ -361,8 +304,6 @@ size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
return iterate_kvec(iter, len, priv, priv2, step);
if (iov_iter_is_bvecq(iter))
return iterate_bvecq(iter, len, priv, priv2, step);
- if (iov_iter_is_folioq(iter))
- return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
@@ -397,8 +338,8 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
* buffer is presented in segments, which for kernel iteration are broken up by
* physical pages and mapped, with the mapped address being presented.
*
- * [!] Note This will only handle BVEC, KVEC, BVECQ, FOLIOQ, XARRAY and
- * DISCARD-type iterators; it will not handle UBUF or IOVEC-type iterators.
+ * [!] Note This will only handle BVEC, KVEC, BVECQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
*
* A step functions, @step, must be provided, one for handling mapped kernel
* addresses and the other is given user addresses which have the potential to
@@ -427,8 +368,6 @@ size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
return iterate_kvec(iter, len, priv, priv2, step);
if (iov_iter_is_bvecq(iter))
return iterate_bvecq(iter, len, priv, priv2, step);
- if (iov_iter_is_folioq(iter))
- return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
diff --git a/include/linux/uio.h b/include/linux/uio.h
index aa50d348dfcc..e84a0c4f28c6 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,7 +11,6 @@
#include <uapi/linux/uio.h>
struct page;
-struct folio_queue;
typedef unsigned int __bitwise iov_iter_extraction_t;
@@ -26,7 +25,6 @@ enum iter_type {
ITER_IOVEC,
ITER_BVEC,
ITER_KVEC,
- ITER_FOLIOQ,
ITER_BVECQ,
ITER_XARRAY,
ITER_DISCARD,
@@ -69,7 +67,6 @@ struct iov_iter {
const struct iovec *__iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
- const struct folio_queue *folioq;
const struct bvecq *bvecq;
struct xarray *xarray;
void __user *ubuf;
@@ -79,7 +76,6 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
- u8 folioq_slot;
u16 bvecq_slot;
loff_t xarray_start;
};
@@ -148,11 +144,6 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
-static inline bool iov_iter_is_folioq(const struct iov_iter *i)
-{
- return iov_iter_type(i) == ITER_FOLIOQ;
-}
-
static inline bool iov_iter_is_bvecq(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_BVECQ;
@@ -303,9 +294,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
-void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
- const struct folio_queue *folioq,
- unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_bvec_queue(struct iov_iter *i, unsigned int direction,
const struct bvecq *bvecq,
unsigned int first_slot, unsigned int offset, size_t count);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index df8d037894b1..d5a4f5e5a107 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -538,39 +538,6 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
i->__iov = iov;
}
-static void iov_iter_folioq_advance(struct iov_iter *i, size_t size)
-{
- const struct folio_queue *folioq = i->folioq;
- unsigned int slot = i->folioq_slot;
-
- if (!i->count)
- return;
- i->count -= size;
-
- if (slot >= folioq_nr_slots(folioq)) {
- folioq = folioq->next;
- slot = 0;
- }
-
- size += i->iov_offset; /* From beginning of current segment. */
- do {
- size_t fsize = folioq_folio_size(folioq, slot);
-
- if (likely(size < fsize))
- break;
- size -= fsize;
- slot++;
- if (slot >= folioq_nr_slots(folioq) && folioq->next) {
- folioq = folioq->next;
- slot = 0;
- }
- } while (size);
-
- i->iov_offset = size;
- i->folioq_slot = slot;
- i->folioq = folioq;
-}
-
static void iov_iter_bvecq_advance(struct iov_iter *i, size_t by)
{
const struct bvecq *bq = i->bvecq;
@@ -616,8 +583,6 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
iov_iter_iovec_advance(i, size);
} else if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
- } else if (iov_iter_is_folioq(i)) {
- iov_iter_folioq_advance(i, size);
} else if (iov_iter_is_bvecq(i)) {
iov_iter_bvecq_advance(i, size);
} else if (iov_iter_is_discard(i)) {
@@ -626,32 +591,6 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
}
EXPORT_SYMBOL(iov_iter_advance);
-static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll)
-{
- const struct folio_queue *folioq = i->folioq;
- unsigned int slot = i->folioq_slot;
-
- for (;;) {
- size_t fsize;
-
- if (slot == 0) {
- folioq = folioq->prev;
- slot = folioq_nr_slots(folioq);
- }
- slot--;
-
- fsize = folioq_folio_size(folioq, slot);
- if (unroll <= fsize) {
- i->iov_offset = fsize - unroll;
- break;
- }
- unroll -= fsize;
- }
-
- i->folioq_slot = slot;
- i->folioq = folioq;
-}
-
static void iov_iter_bvecq_revert(struct iov_iter *i, size_t unroll)
{
const struct bvecq *bq = i->bvecq;
@@ -709,9 +648,6 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
}
unroll -= n;
}
- } else if (iov_iter_is_folioq(i)) {
- i->iov_offset = 0;
- iov_iter_folioq_revert(i, unroll);
} else if (iov_iter_is_bvecq(i)) {
i->iov_offset = 0;
iov_iter_bvecq_revert(i, unroll);
@@ -744,8 +680,6 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
}
if (!i->count)
return 0;
- if (unlikely(iov_iter_is_folioq(i)))
- return umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
if (unlikely(iov_iter_is_bvecq(i)))
return min(i->count, i->bvecq->bv[i->bvecq_slot].bv_len - i->iov_offset);
return i->count;
@@ -784,36 +718,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_bvec);
-/**
- * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
- * @i: The iterator to initialise.
- * @direction: The direction of the transfer.
- * @folioq: The starting point in the folio queue.
- * @first_slot: The first slot in the folio queue to use
- * @offset: The offset into the folio in the first slot to start at
- * @count: The size of the I/O buffer in bytes.
- *
- * Set up an I/O iterator to either draw data out of the pages attached to an
- * inode or to inject data into those pages. The pages *must* be prevented
- * from evaporation, either by taking a ref on them or locking them by the
- * caller.
- */
-void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
- const struct folio_queue *folioq, unsigned int first_slot,
- unsigned int offset, size_t count)
-{
- BUG_ON(direction & ~1);
- *i = (struct iov_iter) {
- .iter_type = ITER_FOLIOQ,
- .data_source = direction,
- .folioq = folioq,
- .folioq_slot = first_slot,
- .count = count,
- .iov_offset = offset,
- };
-}
-EXPORT_SYMBOL(iov_iter_folio_queue);
-
/**
* iov_iter_bvec_queue - Initialise an I/O iterator to use a segmented bvec queue
* @i: The iterator to initialise.
@@ -982,9 +886,6 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
if (iov_iter_is_bvec(i))
return iov_iter_alignment_bvec(i);
- /* With both xarray and folioq types, we're dealing with whole folios. */
- if (iov_iter_is_folioq(i))
- return i->iov_offset | i->count;
if (iov_iter_is_bvecq(i))
return iov_iter_alignment_bvecq(i);
if (iov_iter_is_xarray(i))
@@ -1039,65 +940,6 @@ static int want_pages_array(struct page ***res, size_t size,
return count;
}
-static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
- struct page ***ppages, size_t maxsize,
- unsigned maxpages, size_t *_start_offset)
-{
- const struct folio_queue *folioq = iter->folioq;
- struct page **pages;
- unsigned int slot = iter->folioq_slot;
- size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
-
- if (slot >= folioq_nr_slots(folioq)) {
- folioq = folioq->next;
- slot = 0;
- if (WARN_ON(iov_offset != 0))
- return -EIO;
- }
-
- maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages);
- if (!maxpages)
- return -ENOMEM;
- *_start_offset = iov_offset & ~PAGE_MASK;
- pages = *ppages;
-
- for (;;) {
- struct folio *folio = folioq_folio(folioq, slot);
- size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
- size_t part = PAGE_SIZE - offset % PAGE_SIZE;
-
- if (offset < fsize) {
- part = umin(part, umin(maxsize - extracted, fsize - offset));
- count -= part;
- iov_offset += part;
- extracted += part;
-
- *pages = folio_page(folio, offset / PAGE_SIZE);
- get_page(*pages);
- pages++;
- maxpages--;
- }
-
- if (maxpages == 0 || extracted >= maxsize)
- break;
-
- if (iov_offset >= fsize) {
- iov_offset = 0;
- slot++;
- if (slot == folioq_nr_slots(folioq) && folioq->next) {
- folioq = folioq->next;
- slot = 0;
- }
- }
- }
-
- iter->count = count;
- iter->iov_offset = iov_offset;
- iter->folioq = folioq;
- iter->folioq_slot = slot;
- return extracted;
-}
-
static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
pgoff_t index, unsigned int nr_pages)
{
@@ -1249,8 +1091,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
}
return maxsize;
}
- if (iov_iter_is_folioq(i))
- return iter_folioq_get_pages(i, pages, maxsize, maxpages, start);
if (iov_iter_is_xarray(i))
return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
WARN_ON_ONCE(iov_iter_is_bvecq(i));
@@ -1366,11 +1206,6 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
return iov_npages(i, maxpages);
if (iov_iter_is_bvec(i))
return bvec_npages(i, maxpages);
- if (iov_iter_is_folioq(i)) {
- unsigned offset = i->iov_offset % PAGE_SIZE;
- int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
- return min(npages, maxpages);
- }
if (iov_iter_is_bvecq(i))
return iov_npages_bvecq(i, maxpages);
if (iov_iter_is_xarray(i)) {
@@ -1654,68 +1489,6 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
i->nr_segs = state->nr_segs;
}
-/*
- * Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does
- * not get references on the pages, nor does it get a pin on them.
- */
-static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
- unsigned int maxpages,
- iov_iter_extraction_t extraction_flags,
- size_t *offset0)
-{
- const struct folio_queue *folioq = i->folioq;
- struct page **p;
- unsigned int nr = 0;
- size_t extracted = 0, offset, slot = i->folioq_slot;
-
- if (slot >= folioq_nr_slots(folioq)) {
- folioq = folioq->next;
- slot = 0;
- if (WARN_ON(i->iov_offset != 0))
- return -EIO;
- }
-
- offset = i->iov_offset & ~PAGE_MASK;
- *offset0 = offset;
-
- maxpages = want_pages_array(pages, maxsize, offset, maxpages);
- if (!maxpages)
- return -ENOMEM;
- p = *pages;
-
- for (;;) {
- struct folio *folio = folioq_folio(folioq, slot);
- size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
- size_t part = PAGE_SIZE - offset % PAGE_SIZE;
-
- if (offset < fsize) {
- part = umin(part, umin(maxsize - extracted, fsize - offset));
- i->count -= part;
- i->iov_offset += part;
- extracted += part;
-
- p[nr++] = folio_page(folio, offset / PAGE_SIZE);
- }
-
- if (nr >= maxpages || extracted >= maxsize)
- break;
-
- if (i->iov_offset >= fsize) {
- i->iov_offset = 0;
- slot++;
- if (slot == folioq_nr_slots(folioq) && folioq->next) {
- folioq = folioq->next;
- slot = 0;
- }
- }
- }
-
- i->folioq = folioq;
- i->folioq_slot = slot;
- return extracted;
-}
-
/*
* Extract a list of virtually contiguous pages from an ITER_BVECQ iterator.
* This does not get references on the pages, nor does it get a pin on them.
@@ -2078,8 +1851,8 @@ static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
* added to the pages, but refs will not be taken.
* iov_iter_extract_will_pin() will return true.
*
- * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the
- * pages are merely listed; no extra refs or pins are obtained.
+ * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_XARRAY, the pages are
+ * merely listed; no extra refs or pins are obtained.
* iov_iter_extract_will_pin() will return 0.
*
* Note also:
@@ -2114,10 +1887,6 @@ ssize_t iov_iter_extract_pages(struct iov_iter *i,
return iov_iter_extract_bvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
- if (iov_iter_is_folioq(i))
- return iov_iter_extract_folioq_pages(i, pages, maxsize,
- maxpages, extraction_flags,
- offset0);
if (iov_iter_is_bvecq(i))
return iov_iter_extract_bvecq_pages(i, pages, maxsize,
maxpages, extraction_flags,
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 61ca42ac53f3..84a6e2983f2a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -11,7 +11,6 @@
#include <linux/kmemleak.h>
#include <linux/bvec.h>
#include <linux/uio.h>
-#include <linux/folio_queue.h>
/**
* sg_nents - return total count of entries in scatterlist
@@ -1267,67 +1266,6 @@ static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
return ret;
}
-/*
- * Extract up to sg_max folios from an FOLIOQ-type iterator and add them to
- * the scatterlist. The pages are not pinned.
- */
-static ssize_t extract_folioq_to_sg(struct iov_iter *iter,
- ssize_t maxsize,
- struct sg_table *sgtable,
- unsigned int sg_max,
- iov_iter_extraction_t extraction_flags)
-{
- const struct folio_queue *folioq = iter->folioq;
- struct scatterlist *sg = sgtable->sgl + sgtable->nents;
- unsigned int slot = iter->folioq_slot;
- ssize_t ret = 0;
- size_t offset = iter->iov_offset;
-
- BUG_ON(!folioq);
-
- if (slot >= folioq_nr_slots(folioq)) {
- folioq = folioq->next;
- if (WARN_ON_ONCE(!folioq))
- return 0;
- slot = 0;
- }
-
- do {
- struct folio *folio = folioq_folio(folioq, slot);
- size_t fsize = folioq_folio_size(folioq, slot);
-
- if (offset < fsize) {
- size_t part = umin(maxsize - ret, fsize - offset);
-
- sg_set_page(sg, folio_page(folio, 0), part, offset);
- sgtable->nents++;
- sg++;
- sg_max--;
- offset += part;
- ret += part;
- }
-
- if (offset >= fsize) {
- offset = 0;
- slot++;
- if (slot >= folioq_nr_slots(folioq)) {
- if (!folioq->next) {
- WARN_ON_ONCE(ret < iter->count);
- break;
- }
- folioq = folioq->next;
- slot = 0;
- }
- }
- } while (sg_max > 0 && ret < maxsize);
-
- iter->folioq = folioq;
- iter->folioq_slot = slot;
- iter->iov_offset = offset;
- iter->count -= ret;
- return ret;
-}
-
/*
* Extract up to sg_max folios from an BVECQ-type iterator and add them to
* the scatterlist. The pages are not pinned.
@@ -1452,7 +1390,7 @@ static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
* addition of @sg_max elements.
*
* The pages referred to by UBUF- and IOVEC-type iterators are extracted and
- * pinned; BVEC-, KVEC-, FOLIOQ- and XARRAY-type are extracted but aren't
+ * pinned; BVEC-, KVEC-, BVECQ- and XARRAY-type are extracted but aren't
* pinned; DISCARD-type is not supported.
*
* No end mark is placed on the scatterlist; that's left to the caller.
@@ -1485,9 +1423,6 @@ ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
case ITER_KVEC:
return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
extraction_flags);
- case ITER_FOLIOQ:
- return extract_folioq_to_sg(iter, maxsize, sgtable, sg_max,
- extraction_flags);
case ITER_BVECQ:
return extract_bvecq_to_sg(iter, maxsize, sgtable, sg_max,
extraction_flags);
diff --git a/lib/tests/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
index 644a1b9eb2d3..7ab915f77732 100644
--- a/lib/tests/kunit_iov_iter.c
+++ b/lib/tests/kunit_iov_iter.c
@@ -12,7 +12,6 @@
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/bvec.h>
-#include <linux/folio_queue.h>
#include <kunit/test.h>
MODULE_DESCRIPTION("iov_iter testing");
@@ -363,179 +362,6 @@ static void __init iov_kunit_copy_from_bvec(struct kunit *test)
KUNIT_SUCCEED(test);
}
-static void iov_kunit_destroy_folioq(void *data)
-{
- struct folio_queue *folioq, *next;
-
- for (folioq = data; folioq; folioq = next) {
- next = folioq->next;
- for (int i = 0; i < folioq_nr_slots(folioq); i++)
- if (folioq_folio(folioq, i))
- folio_put(folioq_folio(folioq, i));
- kfree(folioq);
- }
-}
-
-static void __init iov_kunit_load_folioq(struct kunit *test,
- struct iov_iter *iter, int dir,
- struct folio_queue *folioq,
- struct page **pages, size_t npages)
-{
- struct folio_queue *p = folioq;
- size_t size = 0;
- int i;
-
- for (i = 0; i < npages; i++) {
- if (folioq_full(p)) {
- p->next = kzalloc_obj(struct folio_queue);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
- folioq_init(p->next, 0);
- p->next->prev = p;
- p = p->next;
- }
- folioq_append(p, page_folio(pages[i]));
- size += PAGE_SIZE;
- }
- iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
-}
-
-static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
-{
- struct folio_queue *folioq;
-
- folioq = kzalloc_obj(struct folio_queue);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
- kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
- folioq_init(folioq, 0);
- return folioq;
-}
-
-/*
- * Test copying to a ITER_FOLIOQ-type iterator.
- */
-static void __init iov_kunit_copy_to_folioq(struct kunit *test)
-{
- const struct kvec_test_range *pr;
- struct iov_iter iter;
- struct folio_queue *folioq;
- struct page **spages, **bpages;
- u8 *scratch, *buffer;
- size_t bufsize, npages, size, copied;
- int i, patt;
-
- bufsize = 0x100000;
- npages = bufsize / PAGE_SIZE;
-
- folioq = iov_kunit_create_folioq(test);
-
- scratch = iov_kunit_create_buffer(test, &spages, npages);
- for (i = 0; i < bufsize; i++)
- scratch[i] = pattern(i);
-
- buffer = iov_kunit_create_buffer(test, &bpages, npages);
- memset(buffer, 0, bufsize);
-
- iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
-
- i = 0;
- for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
- size = pr->to - pr->from;
- KUNIT_ASSERT_LE(test, pr->to, bufsize);
-
- iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
- iov_iter_advance(&iter, pr->from);
- copied = copy_to_iter(scratch + i, size, &iter);
-
- KUNIT_EXPECT_EQ(test, copied, size);
- KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
- i += size;
- if (test->status == KUNIT_FAILURE)
- goto stop;
- }
-
- /* Build the expected image in the scratch buffer. */
- patt = 0;
- memset(scratch, 0, bufsize);
- for (pr = kvec_test_ranges; pr->from >= 0; pr++)
- for (i = pr->from; i < pr->to; i++)
- scratch[i] = pattern(patt++);
-
- /* Compare the images */
- for (i = 0; i < bufsize; i++) {
- KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
- if (buffer[i] != scratch[i])
- return;
- }
-
-stop:
- KUNIT_SUCCEED(test);
-}
-
-/*
- * Test copying from a ITER_FOLIOQ-type iterator.
- */
-static void __init iov_kunit_copy_from_folioq(struct kunit *test)
-{
- const struct kvec_test_range *pr;
- struct iov_iter iter;
- struct folio_queue *folioq;
- struct page **spages, **bpages;
- u8 *scratch, *buffer;
- size_t bufsize, npages, size, copied;
- int i, j;
-
- bufsize = 0x100000;
- npages = bufsize / PAGE_SIZE;
-
- folioq = iov_kunit_create_folioq(test);
-
- buffer = iov_kunit_create_buffer(test, &bpages, npages);
- for (i = 0; i < bufsize; i++)
- buffer[i] = pattern(i);
-
- scratch = iov_kunit_create_buffer(test, &spages, npages);
- memset(scratch, 0, bufsize);
-
- iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
-
- i = 0;
- for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
- size = pr->to - pr->from;
- KUNIT_ASSERT_LE(test, pr->to, bufsize);
-
- iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
- iov_iter_advance(&iter, pr->from);
- copied = copy_from_iter(scratch + i, size, &iter);
-
- KUNIT_EXPECT_EQ(test, copied, size);
- KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
- i += size;
- }
-
- /* Build the expected image in the main buffer. */
- i = 0;
- memset(buffer, 0, bufsize);
- for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
- for (j = pr->from; j < pr->to; j++) {
- buffer[i++] = pattern(j);
- if (i >= bufsize)
- goto stop;
- }
- }
-stop:
-
- /* Compare the images */
- for (i = 0; i < bufsize; i++) {
- KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
- if (scratch[i] != buffer[i])
- return;
- }
-
- KUNIT_SUCCEED(test);
-}
-
static void iov_kunit_destroy_bvecq(void *data)
{
struct bvecq *bq, *next;
@@ -1028,85 +854,6 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
KUNIT_SUCCEED(test);
}
-/*
- * Test the extraction of ITER_FOLIOQ-type iterators.
- */
-static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
-{
- const struct kvec_test_range *pr;
- struct folio_queue *folioq;
- struct iov_iter iter;
- struct page **bpages, *pagelist[8], **pages = pagelist;
- ssize_t len;
- size_t bufsize, size = 0, npages;
- int i, from;
-
- bufsize = 0x100000;
- npages = bufsize / PAGE_SIZE;
-
- folioq = iov_kunit_create_folioq(test);
-
- iov_kunit_create_buffer(test, &bpages, npages);
- iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
-
- for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
- from = pr->from;
- size = pr->to - from;
- KUNIT_ASSERT_LE(test, pr->to, bufsize);
-
- iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
- iov_iter_advance(&iter, from);
-
- do {
- size_t offset0 = LONG_MAX;
-
- for (i = 0; i < ARRAY_SIZE(pagelist); i++)
- pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
-
- len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
- ARRAY_SIZE(pagelist), 0, &offset0);
- KUNIT_EXPECT_GE(test, len, 0);
- if (len < 0)
- break;
- KUNIT_EXPECT_LE(test, len, size);
- KUNIT_EXPECT_EQ(test, iter.count, size - len);
- if (len == 0)
- break;
- size -= len;
- KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
- KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
- struct page *p;
- ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
- int ix;
-
- KUNIT_ASSERT_GE(test, part, 0);
- ix = from / PAGE_SIZE;
- KUNIT_ASSERT_LT(test, ix, npages);
- p = bpages[ix];
- KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
- KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
- from += part;
- len -= part;
- KUNIT_ASSERT_GE(test, len, 0);
- if (len == 0)
- break;
- offset0 = 0;
- }
-
- if (test->status == KUNIT_FAILURE)
- goto stop;
- } while (iov_iter_count(&iter) > 0);
-
- KUNIT_EXPECT_EQ(test, size, 0);
- KUNIT_EXPECT_EQ(test, iter.count, 0);
- }
-
-stop:
- KUNIT_SUCCEED(test);
-}
-
/*
* Test the extraction of ITER_XARRAY-type iterators.
*/
@@ -1191,15 +938,12 @@ static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_copy_from_kvec),
KUNIT_CASE(iov_kunit_copy_to_bvec),
KUNIT_CASE(iov_kunit_copy_from_bvec),
- KUNIT_CASE(iov_kunit_copy_to_folioq),
- KUNIT_CASE(iov_kunit_copy_from_folioq),
KUNIT_CASE(iov_kunit_copy_to_bvecq),
KUNIT_CASE(iov_kunit_copy_from_bvecq),
KUNIT_CASE(iov_kunit_copy_to_xarray),
KUNIT_CASE(iov_kunit_copy_from_xarray),
KUNIT_CASE(iov_kunit_extract_pages_kvec),
KUNIT_CASE(iov_kunit_extract_pages_bvec),
- KUNIT_CASE(iov_kunit_extract_pages_folioq),
KUNIT_CASE(iov_kunit_extract_pages_xarray),
{}
};
next prev parent reply other threads:[~2026-03-04 14:05 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-04 14:03 [RFC PATCH 00/17] netfs: [WIP] Keep track of folios in a segmented bio_vec[] chain David Howells
2026-03-04 14:03 ` [RFC PATCH 01/17] netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence David Howells
2026-03-04 14:03 ` [RFC PATCH 02/17] vfs: Implement a FIEMAP callback David Howells
2026-03-04 14:06 ` Christoph Hellwig
2026-03-04 14:21 ` David Howells
2026-03-04 14:25 ` Christoph Hellwig
2026-03-04 14:34 ` David Howells
2026-03-04 14:03 ` [RFC PATCH 03/17] iov_iter: Add a segmented queue of bio_vec[] David Howells
2026-03-04 14:03 ` [RFC PATCH 04/17] Add a function to kmap one page of a multipage bio_vec David Howells
2026-03-04 14:03 ` [RFC PATCH 05/17] netfs: Add some tools for managing bvecq chains David Howells
2026-03-04 14:03 ` [RFC PATCH 06/17] afs: Use a bvecq to hold dir content rather than folioq David Howells
2026-03-04 14:03 ` [RFC PATCH 07/17] netfs: Add a function to extract from an iter into a bvecq David Howells
2026-03-04 14:03 ` [RFC PATCH 08/17] cifs: Use a bvecq for buffering instead of a folioq David Howells
2026-03-04 14:03 ` [RFC PATCH 09/17] cifs: Support ITER_BVECQ in smb_extract_iter_to_rdma() David Howells
2026-03-04 14:03 ` [RFC PATCH 10/17] netfs: Switch to using bvecq rather than folio_queue and rolling_buffer David Howells
2026-03-04 14:03 ` [RFC PATCH 11/17] cifs: Remove support for ITER_KVEC/BVEC/FOLIOQ from smb_extract_iter_to_rdma() David Howells
2026-03-04 14:03 ` [RFC PATCH 12/17] netfs: Remove netfs_alloc/free_folioq_buffer() David Howells
2026-03-04 14:03 ` [RFC PATCH 13/17] netfs: Remove netfs_extract_user_iter() David Howells
2026-03-04 14:03 ` David Howells [this message]
2026-03-04 14:03 ` [RFC PATCH 15/17] netfs: Remove folio_queue and rolling_buffer David Howells
2026-03-04 14:03 ` [RFC PATCH 16/17] netfs: Check for too much data being read David Howells
2026-03-04 14:03 ` [RFC PATCH 17/17] netfs: Combine prepare and issue ops and grab the buffers on request David Howells
2026-03-04 14:39 ` Christoph Hellwig
2026-03-04 14:51 ` David Howells
2026-03-04 15:01 ` Christoph Hellwig
2026-03-23 18:37 ` ChenXiaoSong
2026-03-23 20:14 ` David Howells
2026-03-23 22:44 ` Paulo Alcantara
2026-03-24 1:03 ` ChenXiaoSong
2026-03-24 7:16 ` David Howells
2026-03-24 7:38 ` ChenXiaoSong
2026-03-24 7:53 ` David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260304140328.112636-15-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=axboe@kernel.dk \
--cc=ceph-devel@vger.kernel.org \
--cc=christian@brauner.io \
--cc=hch@infradead.org \
--cc=leon@kernel.org \
--cc=linux-afs@lists.infradead.org \
--cc=linux-cifs@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nfs@vger.kernel.org \
--cc=netfs@lists.linux.dev \
--cc=pc@manguebit.com \
--cc=pc@manguebit.org \
--cc=sfrench@samba.org \
--cc=v9fs@lists.linux.dev \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.