From: Benny Halevy <bhalevy@panasas.com>
To: Trond Myklebust <Trond.Myklebust@netapp.com>,
Boaz Harrosh <bharrosh@panasas.com>
Cc: linux-nfs@vger.kernel.org, Andy Adamson <andros@netapp.com>,
Dean Hildebrand <dhildeb@us.ibm.com>,
Fred Isaman <iisaman@netapp.com>,
Benny Halevy <bhalevy@panasas.com>
Subject: [PATCH v2 02/29] pnfs: direct i/o
Date: Mon, 9 May 2011 20:06:38 +0300 [thread overview]
Message-ID: <1304960798-3826-1-git-send-email-bhalevy@panasas.com> (raw)
In-Reply-To: <4DC81E8C.6040901@panasas.com>
From: Andy Adamson <andros@netapp.com>
Signed-off-by: Dean Hildebrand <dhildeb@us.ibm.com>
Signed-off-by: Fred Isaman <iisaman@netapp.com>
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
---
fs/nfs/direct.c | 160 +++++++++++++++++++++++++++++++-----------------------
1 files changed, 92 insertions(+), 68 deletions(-)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8eea253..55dffb7 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -272,6 +272,38 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
.rpc_release = nfs_direct_read_release,
};
+static long nfs_direct_read_execute(struct nfs_read_data *data,
+ struct rpc_task_setup *task_setup_data,
+ struct rpc_message *msg)
+{
+ struct inode *inode = data->inode;
+ struct rpc_task *task;
+
+ nfs_fattr_init(&data->fattr);
+ msg->rpc_argp = &data->args;
+ msg->rpc_resp = &data->res;
+
+ task_setup_data->task = &data->task;
+ task_setup_data->callback_data = data;
+ NFS_PROTO(inode)->read_setup(data, msg);
+
+ task = rpc_run_task(task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ rpc_put_task(task);
+
+ dprintk("NFS: %5u initiated direct read call "
+ "(req %s/%lld, %u bytes @ offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ return 0;
+}
+
/*
* For each rsize'd chunk of the user's buffer, dispatch an NFS READ
* operation. If nfs_readdata_alloc() or get_user_pages() fails,
@@ -288,7 +320,6 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
size_t rsize = NFS_SERVER(inode)->rsize;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = ctx->cred,
};
@@ -349,26 +380,9 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
- nfs_fattr_init(&data->fattr);
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- NFS_PROTO(inode)->read_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
+ if (nfs_direct_read_execute(data, &task_setup_data, &msg))
break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct read call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
started += bytes;
user_addr += bytes;
@@ -461,12 +475,15 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static long nfs_direct_write_execute(struct nfs_write_data *data,
+ struct rpc_task_setup *task_setup_data,
+ struct rpc_message *msg);
+
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct inode *inode = dreq->inode;
struct list_head *p;
struct nfs_write_data *data;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = dreq->ctx->cred,
};
@@ -500,25 +517,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
* Reuse data->task; data->args should not have changed
* since the original request was sent.
*/
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- /*
- * We're called via an RPC callback, so BKL is already held.
- */
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
-
- dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- data->args.count,
- (unsigned long long)data->args.offset);
+ nfs_direct_write_execute(data, &task_setup_data, &msg);
}
if (put_dreq(dreq))
@@ -561,10 +560,31 @@ static const struct rpc_call_ops nfs_commit_direct_ops = {
.rpc_release = nfs_direct_commit_release,
};
+static long nfs_direct_commit_execute(struct nfs_direct_req *dreq,
+ struct nfs_write_data *data,
+ struct rpc_task_setup *task_setup_data,
+ struct rpc_message *msg)
+{
+ struct rpc_task *task;
+
+ NFS_PROTO(data->inode)->commit_setup(data, msg);
+
+ /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
+ dreq->commit_data = NULL;
+
+ dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+
+ task = rpc_run_task(task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ rpc_put_task(task);
+ return 0;
+}
+
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
{
struct nfs_write_data *data = dreq->commit_data;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
@@ -593,16 +613,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
- NFS_PROTO(data->inode)->commit_setup(data, &msg);
-
- /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
- dreq->commit_data = NULL;
-
- dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
-
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ nfs_direct_commit_execute(dreq, data, &task_setup_data, &msg);
}
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
@@ -703,6 +714,36 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
.rpc_release = nfs_direct_write_release,
};
+static long nfs_direct_write_execute(struct nfs_write_data *data,
+ struct rpc_task_setup *task_setup_data,
+ struct rpc_message *msg)
+{
+ struct inode *inode = data->inode;
+ struct rpc_task *task;
+
+ task_setup_data->task = &data->task;
+ task_setup_data->callback_data = data;
+ msg->rpc_argp = &data->args;
+ msg->rpc_resp = &data->res;
+ NFS_PROTO(inode)->write_setup(data, msg);
+
+ task = rpc_run_task(task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ rpc_put_task(task);
+
+ dprintk("NFS: %5u initiated direct write call "
+ "(req %s/%lld, %u bytes @ offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ return 0;
+}
+
/*
* For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
* operation. If nfs_writedata_alloc() or get_user_pages() fails,
@@ -718,7 +759,6 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
struct inode *inode = ctx->path.dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = ctx->cred,
};
@@ -785,24 +825,8 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
+ if (nfs_direct_write_execute(data, &task_setup_data, &msg))
break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct write call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
started += bytes;
user_addr += bytes;
--
1.7.3.4
next prev parent reply other threads:[~2011-05-09 17:06 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-09 17:04 [PATCH v2 0/29] pnfs for 2.6.40 Benny Halevy
2011-05-09 17:06 ` [PATCH v2 01/29] pnfs: CB_NOTIFY_DEVICEID Benny Halevy
2011-05-12 14:35 ` Fred Isaman
2011-05-13 0:00 ` Benny Halevy
2011-05-09 17:06 ` Benny Halevy [this message]
2011-05-12 14:41 ` [PATCH v2 02/29] pnfs: direct i/o Fred Isaman
2011-05-12 23:54 ` Benny Halevy
2011-05-09 17:06 ` [PATCH v2 03/29] pnfs: Use byte-range for layoutget Benny Halevy
2011-05-12 15:18 ` Fred Isaman
2011-05-12 23:46 ` Benny Halevy
2011-05-16 13:59 ` [PATCH 1/4] pnfs: align layoutget requests on page boundaries Benny Halevy
2011-05-16 13:59 ` [PATCH 2/4] SQUASHME: pnfs: fix lseg ordering Benny Halevy
2011-05-16 13:59 ` [PATCH 3/4] SQUASHME: pnfs: clean up pnfs_find_lseg lseg arg Benny Halevy
2011-05-16 13:59 ` [PATCH 4/4] SQUASHME: remove unnecessary FIXME Benny Halevy
2011-05-09 17:07 ` [PATCH v2 04/29] pnfs: Use byte-range for cb_layoutrecall Benny Halevy
2011-05-09 17:07 ` [PATCH v2 05/29] pnfs: client stats Benny Halevy
2011-05-09 17:07 ` [PATCH v2 06/29] pnfs: resolve header dependency in pnfs.h Benny Halevy
2011-05-09 17:07 ` [PATCH v2 07/29] pnfs-obj: objlayoutdriver module skeleton Benny Halevy
2011-05-09 17:07 ` [PATCH v2 08/29] NFSD: introduce exp_xdr.h Benny Halevy
2011-05-09 17:08 ` [PATCH v2 09/29] pnfs-obj: pnfs_osd XDR definitions Benny Halevy
2011-05-09 17:08 ` [PATCH v2 10/29] exofs: pnfs-tree: Remove pnfs-osd private definitions Benny Halevy
2011-05-09 17:08 ` [PATCH v2 11/29] pnfs-obj: pnfs_osd XDR client implementation Benny Halevy
2011-05-09 17:08 ` [PATCH v2 12/29] pnfs-obj: decode layout, alloc/free lseg Benny Halevy
2011-05-09 17:08 ` [PATCH v2 13/29] pnfs: per mount layout driver private data Benny Halevy
2011-05-09 17:08 ` [PATCH v2 14/29] pnfs-obj: objio_osd device information retrieval and caching Benny Halevy
2011-05-09 17:09 ` [PATCH v2 15/29] pnfs: set/unset layoutdriver Benny Halevy
2011-05-09 17:09 ` [PATCH v2 16/29] pnfs-obj: objlayout set/unset layout driver methods Benny Halevy
2011-05-09 17:09 ` [PATCH v2 17/29] pnfs: alloc and free layout_hdr layoutdriver methods Benny Halevy
2011-05-09 17:09 ` [PATCH v2 18/29] pnfs: support for non-rpc layout drivers Benny Halevy
2011-05-12 16:07 ` Fred Isaman
2011-05-12 23:48 ` Benny Halevy
2011-05-16 14:29 ` [PATCH] SQUASHME: revert useless change in nfs4_write_done_cb Benny Halevy
2011-05-09 17:09 ` [PATCH v2 19/29] pnfs-obj: read/write implementation Benny Halevy
2011-05-09 17:10 ` [PATCH v2 20/29] pnfs: layoutreturn Benny Halevy
2011-05-09 17:10 ` [PATCH v2 21/29] pnfs: layoutret_on_setattr Benny Halevy
2011-05-09 17:10 ` [PATCH v2 22/29] pnfs: encode_layoutreturn Benny Halevy
2011-05-09 17:10 ` [PATCH v2 23/29] sunrpc: xdr_rewind_stream() Benny Halevy
2011-05-09 17:10 ` [PATCH v2 24/29] pnfs-obj: objlayout_encode_layoutreturn Implementation Benny Halevy
2011-05-09 17:11 ` [PATCH v2 25/29] pnfs-obj: objio_osd report osd_errors for layoutreturn Benny Halevy
2011-05-09 17:11 ` [PATCH v2 26/29] pnfs: encode_layoutcommit Benny Halevy
2011-05-09 17:11 ` [PATCH v2 27/29] pnfs-obj: objlayout_encode_layoutcommit implementation Benny Halevy
2011-05-09 17:11 ` [PATCH v2 28/29] pnfs-obj: objio_osd: RAID0 support Benny Halevy
2011-05-09 17:11 ` [PATCH v2 29/29] pnfs-obj: objio_osd: groups support Benny Halevy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1304960798-3826-1-git-send-email-bhalevy@panasas.com \
--to=bhalevy@panasas.com \
--cc=Trond.Myklebust@netapp.com \
--cc=andros@netapp.com \
--cc=bharrosh@panasas.com \
--cc=dhildeb@us.ibm.com \
--cc=iisaman@netapp.com \
--cc=linux-nfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).