From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, famz@redhat.com, kwolf@redhat.com,
stefanha@redhat.com
Subject: [Qemu-devel] [PATCH 09/20] nfs: use BDRV_POLL_WHILE
Date: Mon, 17 Oct 2016 15:54:19 +0200 [thread overview]
Message-ID: <1476712470-11660-10-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1476712470-11660-1-git-send-email-pbonzini@redhat.com>
This will make it possible to use nfs_get_allocated_file_size on
a file that is not in the main AioContext.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
v1->v2: no bdrv_wakeup yet
block/nfs.c | 46 ++++++++++++++++++++++++++++------------------
1 file changed, 28 insertions(+), 18 deletions(-)
diff --git a/block/nfs.c b/block/nfs.c
index c8df8d8..7474fbc 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -52,6 +52,7 @@ typedef struct NFSClient {
} NFSClient;
typedef struct NFSRPC {
+ BlockDriverState *bs;
int ret;
int complete;
QEMUIOVector *iov;
@@ -90,11 +91,12 @@ static void nfs_process_write(void *arg)
nfs_set_events(client);
}
-static void nfs_co_init_task(NFSClient *client, NFSRPC *task)
+static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
{
*task = (NFSRPC) {
.co = qemu_coroutine_self(),
- .client = client,
+ .bs = bs,
+ .client = bs->opaque,
};
}
@@ -111,6 +113,7 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
{
NFSRPC *task = private_data;
task->ret = ret;
+ assert(!task->st);
if (task->ret > 0 && task->iov) {
if (task->ret <= task->iov->size) {
qemu_iovec_from_buf(task->iov, 0, data, task->ret);
@@ -118,18 +121,11 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
task->ret = -EIO;
}
}
- if (task->ret == 0 && task->st) {
- memcpy(task->st, data, sizeof(struct stat));
- }
if (task->ret < 0) {
error_report("NFS Error: %s", nfs_get_error(nfs));
}
- if (task->co) {
- aio_bh_schedule_oneshot(task->client->aio_context,
- nfs_co_generic_bh_cb, task);
- } else {
- task->complete = 1;
- }
+ aio_bh_schedule_oneshot(task->client->aio_context,
+ nfs_co_generic_bh_cb, task);
}
static int coroutine_fn nfs_co_readv(BlockDriverState *bs,
@@ -139,7 +135,7 @@ static int coroutine_fn nfs_co_readv(BlockDriverState *bs,
NFSClient *client = bs->opaque;
NFSRPC task;
- nfs_co_init_task(client, &task);
+ nfs_co_init_task(bs, &task);
task.iov = iov;
if (nfs_pread_async(client->context, client->fh,
@@ -174,7 +170,7 @@ static int coroutine_fn nfs_co_writev(BlockDriverState *bs,
NFSRPC task;
char *buf = NULL;
- nfs_co_init_task(client, &task);
+ nfs_co_init_task(bs, &task);
buf = g_try_malloc(nb_sectors * BDRV_SECTOR_SIZE);
if (nb_sectors && buf == NULL) {
@@ -210,7 +206,7 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
NFSClient *client = bs->opaque;
NFSRPC task;
- nfs_co_init_task(client, &task);
+ nfs_co_init_task(bs, &task);
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
&task) != 0) {
@@ -496,6 +492,21 @@ static int nfs_has_zero_init(BlockDriverState *bs)
return client->has_zero_init;
}
+static void
+nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
+ void *private_data)
+{
+ NFSRPC *task = private_data;
+ task->ret = ret;
+ if (task->ret == 0) {
+ memcpy(task->st, data, sizeof(struct stat));
+ }
+ if (task->ret < 0) {
+ error_report("NFS Error: %s", nfs_get_error(nfs));
+ }
+ task->complete = 1;
+}
+
static int64_t nfs_get_allocated_file_size(BlockDriverState *bs)
{
NFSClient *client = bs->opaque;
@@ -507,16 +518,15 @@ static int64_t nfs_get_allocated_file_size(BlockDriverState *bs)
return client->st_blocks * 512;
}
+ task.bs = bs;
task.st = &st;
- if (nfs_fstat_async(client->context, client->fh, nfs_co_generic_cb,
+ if (nfs_fstat_async(client->context, client->fh, nfs_get_allocated_file_size_cb,
&task) != 0) {
return -ENOMEM;
}
nfs_set_events(client);
- while (!task.complete) {
- aio_poll(client->aio_context, true);
- }
+ BDRV_POLL_WHILE(bs, !task.complete);
return (task.ret < 0 ? task.ret : st.st_blocks * 512);
}
--
2.7.4
next prev parent reply other threads:[~2016-10-17 13:55 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-17 13:54 [Qemu-devel] [PATCH v2 00/20] dataplane: remove RFifoLock Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 01/20] replication: interrupt failover if the main device is closed Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 02/20] blockjob: introduce .drain callback for jobs Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 03/20] mirror: use bdrv_drained_begin/bdrv_drained_end Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 04/20] block: add BDS field to count in-flight requests Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 05/20] block: change drain to look only at one child at a time Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 06/20] qed: Implement .bdrv_drain Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 07/20] block: introduce BDRV_POLL_WHILE Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 08/20] nfs: move nfs_set_events out of the while loops Paolo Bonzini
2016-10-17 13:54 ` Paolo Bonzini [this message]
2016-10-17 13:54 ` [Qemu-devel] [PATCH 10/20] sheepdog: use BDRV_POLL_WHILE Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 11/20] aio: introduce qemu_get_current_aio_context Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 12/20] iothread: detach all block devices before stopping them Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 13/20] replication: pass BlockDriverState to reopen_backing_file Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 14/20] block: prepare bdrv_reopen_multiple to release AioContext Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 15/20] qemu-io: acquire AioContext Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 16/20] qemu-img: call aio_context_acquire/release around block job Paolo Bonzini
2016-10-19 0:54 ` Fam Zheng
2016-10-19 11:05 ` Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 17/20] block: only call aio_poll on the current thread's AioContext Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 18/20] iothread: release AioContext around aio_poll Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 19/20] qemu-thread: introduce QemuRecMutex Paolo Bonzini
2016-10-17 13:54 ` [Qemu-devel] [PATCH 20/20] aio: convert from RFifoLock to QemuRecMutex Paolo Bonzini
2016-10-19 0:55 ` [Qemu-devel] [PATCH v2 00/20] dataplane: remove RFifoLock Fam Zheng
2016-10-19 0:55 ` Fam Zheng
2016-10-27 1:30 ` Fam Zheng
-- strict thread matches above, loose matches on Subject: below --
2016-10-27 10:48 [Qemu-devel] [PATCH v3 " Paolo Bonzini
2016-10-27 10:48 ` [Qemu-devel] [PATCH 09/20] nfs: use BDRV_POLL_WHILE Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1476712470-11660-10-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=famz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).