From: Peter Lieven <pl@kamp.de>
To: Stefan Hajnoczi <stefanha@redhat.com>, qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Christian Borntraeger <borntraeger@de.ibm.com>
Subject: Re: [Qemu-devel] [PATCH v3 11/25] nfs: implement .bdrv_detach/attach_aio_context()
Date: Thu, 08 May 2014 17:46:23 +0200 [thread overview]
Message-ID: <536BA6CF.2040204@kamp.de> (raw)
In-Reply-To: <1399559698-31900-12-git-send-email-stefanha@redhat.com>
Am 08.05.2014 16:34, schrieb Stefan Hajnoczi:
> Drop the assumption that we're using the main AioContext. The following
> functions need to be converted:
> * qemu_bh_new() -> aio_bh_new()
> * qemu_aio_set_fd_handler() -> aio_set_fd_handler()
> * qemu_aio_wait() -> aio_poll()
>
> The .bdrv_detach/attach_aio_context() interfaces also need to be
> implemented to move the fd handler from the old to the new AioContext.
>
> Cc: Peter Lieven <pl@kamp.de>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> v3:
> * Stash away NFSClient pointer for future use instead of AioContext pointer [Peter Lieven]
> * Use NFSClient->aio_context where possible instead of bdrv_get_aio_context() [Peter Lieven]
> ---
> block/nfs.c | 81 +++++++++++++++++++++++++++++++++++++++++++------------------
> 1 file changed, 57 insertions(+), 24 deletions(-)
>
> diff --git a/block/nfs.c b/block/nfs.c
> index 9fa831f..f6a23f0 100644
> --- a/block/nfs.c
> +++ b/block/nfs.c
> @@ -40,6 +40,7 @@ typedef struct NFSClient {
> struct nfsfh *fh;
> int events;
> bool has_zero_init;
> + AioContext *aio_context;
> } NFSClient;
>
> typedef struct NFSRPC {
> @@ -49,6 +50,7 @@ typedef struct NFSRPC {
> struct stat *st;
> Coroutine *co;
> QEMUBH *bh;
> + NFSClient *client;
> } NFSRPC;
>
> static void nfs_process_read(void *arg);
> @@ -58,10 +60,11 @@ static void nfs_set_events(NFSClient *client)
> {
> int ev = nfs_which_events(client->context);
> if (ev != client->events) {
> - qemu_aio_set_fd_handler(nfs_get_fd(client->context),
> - (ev & POLLIN) ? nfs_process_read : NULL,
> - (ev & POLLOUT) ? nfs_process_write : NULL,
> - client);
> + aio_set_fd_handler(client->aio_context,
> + nfs_get_fd(client->context),
> + (ev & POLLIN) ? nfs_process_read : NULL,
> + (ev & POLLOUT) ? nfs_process_write : NULL,
> + client);
>
> }
> client->events = ev;
> @@ -84,7 +87,8 @@ static void nfs_process_write(void *arg)
> static void nfs_co_init_task(NFSClient *client, NFSRPC *task)
> {
> *task = (NFSRPC) {
> - .co = qemu_coroutine_self(),
> + .co = qemu_coroutine_self(),
> + .client = client,
> };
> }
>
> @@ -116,7 +120,8 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
> error_report("NFS Error: %s", nfs_get_error(nfs));
> }
> if (task->co) {
> - task->bh = qemu_bh_new(nfs_co_generic_bh_cb, task);
> + task->bh = aio_bh_new(task->client->aio_context,
> + nfs_co_generic_bh_cb, task);
> qemu_bh_schedule(task->bh);
> }
> }
> @@ -224,13 +229,34 @@ static QemuOptsList runtime_opts = {
> },
> };
>
> +static void nfs_detach_aio_context(BlockDriverState *bs)
> +{
> + NFSClient *client = bs->opaque;
> +
> + aio_set_fd_handler(client->aio_context,
> + nfs_get_fd(client->context),
> + NULL, NULL, NULL);
> + client->events = 0;
> +}
> +
> +static void nfs_attach_aio_context(BlockDriverState *bs,
> + AioContext *new_context)
> +{
> + NFSClient *client = bs->opaque;
> +
> + client->aio_context = new_context;
> + nfs_set_events(client);
> +}
> +
> static void nfs_client_close(NFSClient *client)
> {
> if (client->context) {
> if (client->fh) {
> nfs_close(client->context, client->fh);
> }
> - qemu_aio_set_fd_handler(nfs_get_fd(client->context), NULL, NULL, NULL);
> + aio_set_fd_handler(client->aio_context,
> + nfs_get_fd(client->context),
> + NULL, NULL, NULL);
> nfs_destroy_context(client->context);
> }
> memset(client, 0, sizeof(NFSClient));
> @@ -341,6 +367,8 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
> QemuOpts *opts;
> Error *local_err = NULL;
>
> + client->aio_context = bdrv_get_aio_context(bs);
> +
> opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
> qemu_opts_absorb_qdict(opts, options, &local_err);
> if (local_err) {
> @@ -364,6 +392,8 @@ static int nfs_file_create(const char *url, QEMUOptionParameter *options,
> int64_t total_size = 0;
> NFSClient *client = g_malloc0(sizeof(NFSClient));
>
> + client->aio_context = qemu_get_aio_context();
> +
> /* Read out options */
> while (options && options->name) {
> if (!strcmp(options->name, "size")) {
> @@ -403,7 +433,7 @@ static int64_t nfs_get_allocated_file_size(BlockDriverState *bs)
>
> while (!task.complete) {
> nfs_set_events(client);
> - qemu_aio_wait();
> + aio_poll(client->aio_context, true);
> }
>
> return (task.ret < 0 ? task.ret : st.st_blocks * st.st_blksize);
> @@ -416,22 +446,25 @@ static int nfs_file_truncate(BlockDriverState *bs, int64_t offset)
> }
>
> static BlockDriver bdrv_nfs = {
> - .format_name = "nfs",
> - .protocol_name = "nfs",
> -
> - .instance_size = sizeof(NFSClient),
> - .bdrv_needs_filename = true,
> - .bdrv_has_zero_init = nfs_has_zero_init,
> - .bdrv_get_allocated_file_size = nfs_get_allocated_file_size,
> - .bdrv_truncate = nfs_file_truncate,
> -
> - .bdrv_file_open = nfs_file_open,
> - .bdrv_close = nfs_file_close,
> - .bdrv_create = nfs_file_create,
> -
> - .bdrv_co_readv = nfs_co_readv,
> - .bdrv_co_writev = nfs_co_writev,
> - .bdrv_co_flush_to_disk = nfs_co_flush,
> + .format_name = "nfs",
> + .protocol_name = "nfs",
> +
> + .instance_size = sizeof(NFSClient),
> + .bdrv_needs_filename = true,
> + .bdrv_has_zero_init = nfs_has_zero_init,
> + .bdrv_get_allocated_file_size = nfs_get_allocated_file_size,
> + .bdrv_truncate = nfs_file_truncate,
> +
> + .bdrv_file_open = nfs_file_open,
> + .bdrv_close = nfs_file_close,
> + .bdrv_create = nfs_file_create,
> +
> + .bdrv_co_readv = nfs_co_readv,
> + .bdrv_co_writev = nfs_co_writev,
> + .bdrv_co_flush_to_disk = nfs_co_flush,
> +
> + .bdrv_detach_aio_context = nfs_detach_aio_context,
> + .bdrv_attach_aio_context = nfs_attach_aio_context,
> };
>
> static void nfs_block_init(void)
Reviewed-by: Peter Lieven <pl@kamp.de>
next prev parent reply other threads:[~2014-05-08 15:46 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-08 14:34 [Qemu-devel] [PATCH v3 00/25] dataplane: use QEMU block layer Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 01/25] block: use BlockDriverState AioContext Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 02/25] block: acquire AioContext in bdrv_*_all() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 03/25] block: acquire AioContext in bdrv_drain_all() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 04/25] block: add bdrv_set_aio_context() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 05/25] blkdebug: use BlockDriverState's AioContext Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 06/25] blkverify: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 07/25] curl: " Stefan Hajnoczi
2014-05-13 2:06 ` Fam Zheng
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 08/25] gluster: use BlockDriverState's AioContext Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 09/25] iscsi: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-08 15:47 ` Peter Lieven
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 10/25] nbd: " Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 11/25] nfs: " Stefan Hajnoczi
2014-05-08 15:46 ` Peter Lieven [this message]
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 12/25] qed: use BlockDriverState's AioContext Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 13/25] quorum: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 14/25] block/raw-posix: " Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 15/25] block/linux-aio: fix memory and fd leak Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 16/25] block/raw-win32: create one QEMUWin32AIOState per BDRVRawState Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 17/25] block/raw-win32: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 18/25] rbd: use BlockDriverState's AioContext Stefan Hajnoczi
2014-05-09 0:04 ` Josh Durgin
2014-05-09 8:16 ` Paolo Bonzini
2014-05-09 8:22 ` Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 19/25] sheepdog: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 20/25] ssh: use BlockDriverState's AioContext Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 21/25] vmdk: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-05-15 1:34 ` Fam Zheng
2014-05-15 7:33 ` Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 22/25] dataplane: use the QEMU block layer for I/O Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 23/25] dataplane: delete IOQueue since it is no longer used Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 24/25] dataplane: implement async flush Stefan Hajnoczi
2014-05-08 14:34 ` [Qemu-devel] [PATCH v3 25/25] raw-posix: drop raw_get_aio_fd() since it is no longer used Stefan Hajnoczi
2014-06-03 13:33 ` [Qemu-devel] [PATCH v3 00/25] dataplane: use QEMU block layer Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=536BA6CF.2040204@kamp.de \
--to=pl@kamp.de \
--cc=borntraeger@de.ibm.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).