From: Dave Kleikamp <dave.kleikamp@oracle.com>
To: linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, Zach Brown <zab@zabbo.net>,
Dave Kleikamp <dave.kleikamp@oracle.com>
Subject: [RFC PATCH 04/22] iov_iter: hide iovec details behind ops function pointers
Date: Mon, 27 Feb 2012 15:19:18 -0600 [thread overview]
Message-ID: <1330377576-3659-5-git-send-email-dave.kleikamp@oracle.com> (raw)
In-Reply-To: <1330377576-3659-1-git-send-email-dave.kleikamp@oracle.com>
From: Zach Brown <zab@zabbo.net>
This moves the current iov_iter functions behind an ops struct of
function pointers. The current iov_iter functions all work with memory
which is specified by iovec arrays of user space pointers.
This patch is part of a series that lets us specify memory with bio_vec
arrays of page pointers. By moving to an iov_iter operation struct we
can add that support in later patches in this series by adding another
set of function pointers.
I only came to this after having initialy tried to teach the current
iov_iter functions about bio_vecs by introducing conditional branches
that dealt with bio_vecs in all the functions. It wasn't pretty. This
approach seems to be the lesser evil.
Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Cc: Zach Brown <zab@zabbo.net>
---
include/linux/fs.h | 65 ++++++++++++++++++++++++++++++++++++++++-----------
mm/iov-iter.c | 66 ++++++++++++++++++++++++++++++----------------------
2 files changed, 90 insertions(+), 41 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c66aa4b..1a64eda 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -529,29 +529,68 @@ struct address_space;
struct writeback_control;
struct iov_iter {
- const struct iovec *iov;
+ struct iov_iter_ops *ops;
+ unsigned long data;
unsigned long nr_segs;
size_t iov_offset;
size_t count;
};
-size_t iov_iter_copy_to_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_to_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(struct iov_iter *i);
+struct iov_iter_ops {
+ size_t (*ii_copy_to_user_atomic)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ size_t (*ii_copy_to_user)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ size_t (*ii_copy_from_user_atomic)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ size_t (*ii_copy_from_user)(struct page *, struct iov_iter *,
+ unsigned long, size_t);
+ void (*ii_advance)(struct iov_iter *, size_t);
+ int (*ii_fault_in_readable)(struct iov_iter *, size_t);
+ size_t (*ii_single_seg_count)(struct iov_iter *);
+};
+
+static inline size_t iov_iter_copy_to_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_to_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_to_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_to_user(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_from_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ return i->ops->ii_copy_from_user(page, i, offset, bytes);
+}
+static inline void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+ return i->ops->ii_advance(i, bytes);
+}
+static inline int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+ return i->ops->ii_fault_in_readable(i, bytes);
+}
+static inline size_t iov_iter_single_seg_count(struct iov_iter *i)
+{
+ return i->ops->ii_single_seg_count(i);
+}
+
+extern struct iov_iter_ops ii_iovec_ops;
static inline void iov_iter_init(struct iov_iter *i,
const struct iovec *iov, unsigned long nr_segs,
size_t count, size_t written)
{
- i->iov = iov;
+ i->ops = &ii_iovec_ops;
+ i->data = (unsigned long)iov;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count + written;
diff --git a/mm/iov-iter.c b/mm/iov-iter.c
index eea21ea..83f0db7 100644
--- a/mm/iov-iter.c
+++ b/mm/iov-iter.c
@@ -33,9 +33,10 @@ static size_t __iovec_copy_to_user_inatomic(char *vaddr,
* were sucessfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
-size_t iov_iter_copy_to_user_atomic(struct page *page,
+size_t ii_iovec_copy_to_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
+ struct iovec *iov = (struct iovec *)i->data;
char *kaddr;
size_t copied;
@@ -43,45 +44,44 @@ size_t iov_iter_copy_to_user_atomic(struct page *page,
kaddr = kmap_atomic(page, KM_USER0);
if (likely(i->nr_segs == 1)) {
int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
+ char __user *buf = iov->iov_base + i->iov_offset;
left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_to_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
+ iov, i->iov_offset, bytes);
}
kunmap_atomic(kaddr, KM_USER0);
return copied;
}
-EXPORT_SYMBOL(iov_iter_copy_to_user_atomic);
/*
* This has the same sideeffects and return value as
- * iov_iter_copy_to_user_atomic().
+ * ii_iovec_copy_to_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
-size_t iov_iter_copy_to_user(struct page *page,
+size_t ii_iovec_copy_to_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
+ struct iovec *iov = (struct iovec *)i->data;
char *kaddr;
size_t copied;
kaddr = kmap(page);
if (likely(i->nr_segs == 1)) {
int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
+ char __user *buf = iov->iov_base + i->iov_offset;
left = copy_to_user(buf, kaddr + offset, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_to_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
+ iov, i->iov_offset, bytes);
}
kunmap(page);
return copied;
}
-EXPORT_SYMBOL(iov_iter_copy_to_user);
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
@@ -111,9 +111,10 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
* were successfully copied. If a fault is encountered then return the number
* of bytes which were copied.
*/
-size_t iov_iter_copy_from_user_atomic(struct page *page,
+size_t ii_iovec_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
+ struct iovec *iov = (struct iovec *)i->data;
char *kaddr;
size_t copied;
@@ -121,12 +122,12 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
kaddr = kmap_atomic(page, KM_USER0);
if (likely(i->nr_segs == 1)) {
int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
+ char __user *buf = iov->iov_base + i->iov_offset;
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
+ iov, i->iov_offset, bytes);
}
kunmap_atomic(kaddr, KM_USER0);
@@ -136,32 +137,32 @@ EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
/*
* This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
+ * ii_iovec_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
-size_t iov_iter_copy_from_user(struct page *page,
+size_t ii_iovec_copy_from_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
+ struct iovec *iov = (struct iovec *)i->data;
char *kaddr;
size_t copied;
kaddr = kmap(page);
if (likely(i->nr_segs == 1)) {
int left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
+ char __user *buf = iov->iov_base + i->iov_offset;
left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
- i->iov, i->iov_offset, bytes);
+ iov, i->iov_offset, bytes);
}
kunmap(page);
return copied;
}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
+void ii_iovec_advance(struct iov_iter *i, size_t bytes)
{
BUG_ON(i->count < bytes);
@@ -169,7 +170,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
i->iov_offset += bytes;
i->count -= bytes;
} else {
- const struct iovec *iov = i->iov;
+ struct iovec *iov = (struct iovec *)i->data;
size_t base = i->iov_offset;
unsigned long nr_segs = i->nr_segs;
@@ -191,12 +192,11 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
base = 0;
}
}
- i->iov = iov;
+ i->data = (unsigned long)iov;
i->iov_offset = base;
i->nr_segs = nr_segs;
}
}
-EXPORT_SYMBOL(iov_iter_advance);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
@@ -207,23 +207,33 @@ EXPORT_SYMBOL(iov_iter_advance);
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
{
- char __user *buf = i->iov->iov_base + i->iov_offset;
- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+ struct iovec *iov = (struct iovec *)i->data;
+ char __user *buf = iov->iov_base + i->iov_offset;
+ bytes = min(bytes, iov->iov_len - i->iov_offset);
return fault_in_pages_readable(buf, bytes);
}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
/*
* Return the count of just the current iov_iter segment.
*/
-size_t iov_iter_single_seg_count(struct iov_iter *i)
+size_t ii_iovec_single_seg_count(struct iov_iter *i)
{
- const struct iovec *iov = i->iov;
+ struct iovec *iov = (struct iovec *)i->data;
if (i->nr_segs == 1)
return i->count;
else
return min(i->count, iov->iov_len - i->iov_offset);
}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
+
+struct iov_iter_ops ii_iovec_ops = {
+ .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
+ .ii_copy_to_user = ii_iovec_copy_to_user,
+ .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
+ .ii_copy_from_user = ii_iovec_copy_from_user,
+ .ii_advance = ii_iovec_advance,
+ .ii_fault_in_readable = ii_iovec_fault_in_readable,
+ .ii_single_seg_count = ii_iovec_single_seg_count,
+};
+EXPORT_SYMBOL(ii_iovec_ops);
--
1.7.9.2
next prev parent reply other threads:[~2012-02-27 21:19 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-02-27 21:19 [RFC PATCH 00/22] loop: Issue O_DIRECT aio with pages Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 01/22] iov_iter: move into its own file Dave Kleikamp
2012-03-01 20:25 ` Jeff Moyer
2012-02-27 21:19 ` [RFC PATCH 02/22] iov_iter: add copy_to_user support Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 03/22] fuse: convert fuse to use iov_iter_copy_[to|from]_user Dave Kleikamp
[not found] ` <1330377576-3659-4-git-send-email-dave.kleikamp-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2012-02-28 9:09 ` Miklos Szeredi
2012-02-27 21:19 ` Dave Kleikamp [this message]
2012-02-27 21:19 ` [RFC PATCH 05/22] iov_iter: add bvec support Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 06/22] iov_iter: add a shorten call Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 07/22] iov_iter: let callers extract iovecs and bio_vecs Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 08/22] dio: create a dio_aligned() helper function Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 09/22] dio: add dio_alloc_init() " Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 10/22] dio: add sdio_init() " Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 11/22] dio: add dio_lock_and_flush() helper Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 12/22] dio: add dio_post_submission() helper function Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 13/22] dio: add __blockdev_direct_IO_bdev() Dave Kleikamp
2012-02-27 22:16 ` Zach Brown
2012-02-27 21:19 ` [RFC PATCH 14/22] fs: pull iov_iter use higher up the stack Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 15/22] aio: add aio_kernel_() interface Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 16/22] aio: add aio support for iov_iter arguments Dave Kleikamp
2012-02-27 22:13 ` Zach Brown
2012-02-27 21:19 ` [RFC PATCH 17/22] bio: add bvec_length(), like iov_length() Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 18/22] ext3: add support for .read_iter and .write_iter Dave Kleikamp
2012-02-27 22:34 ` Zach Brown
2012-02-27 23:14 ` Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 19/22] ocfs2: add support for read_iter, write_iter, and direct_IO_bvec Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 20/22] ext4: " Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 21/22] btrfs: " Dave Kleikamp
2012-02-27 21:19 ` [RFC PATCH 22/22] nfs: add support for read_iter, write_iter Dave Kleikamp
[not found] ` <1330377576-3659-23-git-send-email-dave.kleikamp-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2012-02-27 22:08 ` Myklebust, Trond
2012-02-27 23:17 ` Dave Kleikamp
2012-02-27 22:27 ` [RFC PATCH 00/22] loop: Issue O_DIRECT aio with pages Zach Brown
2012-02-27 22:53 ` Dave Kleikamp
2012-02-28 9:29 ` Christoph Hellwig
2012-02-28 15:14 ` Zach Brown
2012-02-29 9:08 ` Christoph Hellwig
2012-03-01 20:33 ` Jeff Moyer
2012-03-01 20:36 ` Dave Kleikamp
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1330377576-3659-5-git-send-email-dave.kleikamp@oracle.com \
--to=dave.kleikamp@oracle.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=zab@zabbo.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).