From: David Howells <dhowells@redhat.com>
To: Al Viro <viro@zeniv.linux.org.uk>, Christoph Hellwig <hch@infradead.org>
Cc: David Howells <dhowells@redhat.com>,
Matthew Wilcox <willy@infradead.org>,
Jens Axboe <axboe@kernel.dk>, Jan Kara <jack@suse.cz>,
Jeff Layton <jlayton@kernel.org>,
Logan Gunthorpe <logang@deltatee.com>,
linux-fsdevel@vger.kernel.org, linux-block@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: [PATCH v8 01/10] iov_iter: Define flags to qualify page extraction.
Date: Mon, 23 Jan 2023 17:29:58 +0000 [thread overview]
Message-ID: <20230123173007.325544-2-dhowells@redhat.com> (raw)
In-Reply-To: <20230123173007.325544-1-dhowells@redhat.com>
Define flags to qualify page extraction to pass into iov_iter_*_pages*()
rather than passing in FOLL_* flags.
For now only a flag to allow peer-to-peer DMA is supported.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Al Viro <viro@zeniv.linux.org.uk>
cc: Christoph Hellwig <hch@infradead.org>
cc: Jens Axboe <axboe@kernel.dk>
cc: Logan Gunthorpe <logang@deltatee.com>
cc: linux-fsdevel@vger.kernel.org
cc: linux-block@vger.kernel.org
---
Notes:
ver #7)
- Don't use FOLL_* as a parameter, but rather define constants
specifically to use with iov_iter_*_pages*().
- Drop the I/O direction constants for now.
block/bio.c | 6 +++---
block/blk-map.c | 8 ++++----
include/linux/uio.h | 7 +++++--
lib/iov_iter.c | 14 ++++++++------
4 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index ab59a491a883..a289bbff036f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1249,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
- unsigned int gup_flags = 0;
+ unsigned int extract_flags = 0;
ssize_t size, left;
unsigned len, i = 0;
size_t offset, trim;
@@ -1264,7 +1264,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
- gup_flags |= FOLL_PCI_P2PDMA;
+ extract_flags |= ITER_ALLOW_P2PDMA;
/*
* Each segment in the iov is required to be a block size multiple.
@@ -1275,7 +1275,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
*/
size = iov_iter_get_pages(iter, pages,
UINT_MAX - bio->bi_iter.bi_size,
- nr_pages, &offset, gup_flags);
+ nr_pages, &offset, extract_flags);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
diff --git a/block/blk-map.c b/block/blk-map.c
index 19940c978c73..bc111261fc82 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -267,7 +267,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
{
unsigned int max_sectors = queue_max_hw_sectors(rq->q);
unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
- unsigned int gup_flags = 0;
+ unsigned int extract_flags = 0;
struct bio *bio;
int ret;
int j;
@@ -280,7 +280,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
return -ENOMEM;
if (blk_queue_pci_p2pdma(rq->q))
- gup_flags |= FOLL_PCI_P2PDMA;
+ extract_flags |= ITER_ALLOW_P2PDMA;
while (iov_iter_count(iter)) {
struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -291,10 +291,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
pages = stack_pages;
bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
- nr_vecs, &offs, gup_flags);
+ nr_vecs, &offs, extract_flags);
} else {
bytes = iov_iter_get_pages_alloc(iter, &pages,
- LONG_MAX, &offs, gup_flags);
+ LONG_MAX, &offs, extract_flags);
}
if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9f158238edba..46d5080314c6 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -252,12 +252,12 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *
loff_t start, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start,
- unsigned gup_flags);
+ unsigned extract_flags);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize, size_t *start,
- unsigned gup_flags);
+ unsigned extract_flags);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
@@ -360,4 +360,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
};
}
+/* Flags for iov_iter_get/extract_pages*() */
+#define ITER_ALLOW_P2PDMA 0x01 /* Allow P2PDMA on the extracted pages */
+
#endif
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f9a3ff37ecd1..fb04abe7d746 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1432,9 +1432,9 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages, size_t *start,
- unsigned int gup_flags)
+ unsigned int extract_flags)
{
- unsigned int n;
+ unsigned int n, gup_flags = 0;
if (maxsize > i->count)
maxsize = i->count;
@@ -1442,6 +1442,8 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
return 0;
if (maxsize > MAX_RW_COUNT)
maxsize = MAX_RW_COUNT;
+ if (extract_flags & ITER_ALLOW_P2PDMA)
+ gup_flags |= FOLL_PCI_P2PDMA;
if (likely(user_backed_iter(i))) {
unsigned long addr;
@@ -1495,14 +1497,14 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start, unsigned gup_flags)
+ size_t *start, unsigned extract_flags)
{
if (!maxpages)
return 0;
BUG_ON(!pages);
return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
- start, gup_flags);
+ start, extract_flags);
}
EXPORT_SYMBOL_GPL(iov_iter_get_pages);
@@ -1515,14 +1517,14 @@ EXPORT_SYMBOL(iov_iter_get_pages2);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
- size_t *start, unsigned gup_flags)
+ size_t *start, unsigned extract_flags)
{
ssize_t len;
*pages = NULL;
len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
- gup_flags);
+ extract_flags);
if (len <= 0) {
kvfree(*pages);
*pages = NULL;
next prev parent reply other threads:[~2023-01-23 17:31 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-23 17:29 [PATCH v8 00/10] iov_iter: Improve page extraction (pin or just list) David Howells
2023-01-23 17:29 ` David Howells [this message]
2023-01-23 18:20 ` [PATCH v8 01/10] iov_iter: Define flags to qualify page extraction Christoph Hellwig
2023-01-24 2:12 ` John Hubbard
2023-01-23 17:29 ` [PATCH v8 02/10] iov_iter: Add a function to extract a page list from an iterator David Howells
2023-01-23 18:21 ` Christoph Hellwig
2023-01-24 14:27 ` David Hildenbrand
2023-01-24 14:35 ` David Howells
2023-01-24 14:37 ` David Hildenbrand
2023-01-24 14:45 ` David Howells
2023-01-24 14:52 ` David Hildenbrand
2023-01-23 17:30 ` [PATCH v8 03/10] mm: Provide a helper to drop a pin/ref on a page David Howells
2023-01-23 18:21 ` Christoph Hellwig
2023-01-24 3:03 ` John Hubbard
2023-01-24 14:28 ` David Hildenbrand
2023-01-24 14:41 ` David Howells
2023-01-24 14:52 ` Christoph Hellwig
2023-01-24 14:53 ` David Hildenbrand
2023-01-24 15:04 ` David Howells
2023-01-23 17:30 ` [PATCH v8 04/10] iomap: don't get an reference on ZERO_PAGE for direct I/O block zeroing David Howells
2023-01-23 18:22 ` Christoph Hellwig
2023-01-24 2:42 ` John Hubbard
2023-01-24 5:59 ` Christoph Hellwig
2023-01-24 7:03 ` John Hubbard
2023-01-24 14:29 ` David Hildenbrand
2023-01-23 17:30 ` [PATCH v8 05/10] block: Fix bio_flagged() so that gcc can better optimise it David Howells
2023-01-23 17:30 ` [PATCH v8 06/10] block: Rename BIO_NO_PAGE_REF to BIO_PAGE_REFFED and invert the meaning David Howells
2023-01-23 18:23 ` Christoph Hellwig
2023-01-23 17:30 ` [PATCH v8 07/10] block: Switch to pinning pages David Howells
2023-01-23 18:23 ` Christoph Hellwig
2023-01-24 14:32 ` David Hildenbrand
2023-01-24 14:47 ` David Howells
2023-01-24 14:53 ` Christoph Hellwig
2023-01-24 15:03 ` David Howells
2023-01-24 16:44 ` Christoph Hellwig
2023-01-24 16:46 ` David Hildenbrand
2023-01-24 16:59 ` Christoph Hellwig
2023-01-24 18:37 ` David Howells
2023-01-24 18:55 ` Christoph Hellwig
2023-01-24 18:38 ` David Howells
2023-01-23 17:30 ` [PATCH v8 08/10] block: Convert bio_iov_iter_get_pages to use iov_iter_extract_pages David Howells
2023-01-23 18:23 ` Christoph Hellwig
2023-01-23 17:30 ` [PATCH v8 09/10] block: convert bio_map_user_iov " David Howells
2023-01-23 18:24 ` Christoph Hellwig
2023-01-23 17:30 ` [PATCH v8 10/10] mm: Renumber FOLL_PIN and FOLL_GET down David Howells
2023-01-23 18:25 ` Christoph Hellwig
2023-01-24 3:08 ` John Hubbard
2023-01-24 3:11 ` John Hubbard
2023-01-24 13:13 ` Jason Gunthorpe
2023-01-24 13:18 ` Christoph Hellwig
2023-01-24 13:43 ` Jason Gunthorpe
2023-01-24 13:40 ` David Howells
2023-01-24 13:46 ` David Howells
2023-01-24 13:47 ` Jason Gunthorpe
2023-01-24 13:57 ` David Howells
2023-01-24 14:00 ` Jason Gunthorpe
2023-01-24 14:02 ` Christoph Hellwig
2023-01-24 14:11 ` David Howells
2023-01-24 14:14 ` Jason Gunthorpe
2023-01-24 14:27 ` David Howells
2023-01-24 14:31 ` Jason Gunthorpe
2023-01-24 14:59 ` David Howells
2023-01-24 15:06 ` Jason Gunthorpe
2023-01-24 15:12 ` David Howells
2023-01-24 14:12 ` David Howells
2023-01-24 14:13 ` Christoph Hellwig
2023-01-24 14:25 ` David Howells
2023-01-24 7:05 ` David Howells
2023-01-24 2:02 ` [PATCH v8 00/10] iov_iter: Improve page extraction (pin or just list) John Hubbard
2023-01-24 12:44 ` David Hildenbrand
2023-01-24 13:16 ` Christoph Hellwig
2023-01-24 13:22 ` David Hildenbrand
2023-01-24 13:32 ` Christoph Hellwig
2023-01-24 13:35 ` David Hildenbrand
2023-01-24 13:44 ` David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230123173007.325544-2-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=axboe@kernel.dk \
--cc=hch@infradead.org \
--cc=jack@suse.cz \
--cc=jlayton@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).