From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, jcody@redhat.com
Subject: [Qemu-devel] [PATCH 2/3] nfs: do not use aio_context_acquire/release
Date: Fri, 17 Feb 2017 20:40:27 +0100 [thread overview]
Message-ID: <20170217194028.8398-3-pbonzini@redhat.com> (raw)
In-Reply-To: <20170217194028.8398-1-pbonzini@redhat.com>
Now that all bottom halves and callbacks take care of taking the
AioContext lock, we can migrate some users away from it and to a
specific QemuMutex or CoMutex.
Protect libnfs calls with a QemuMutex. Callbacks are invoked
using bottom halves, so we don't even have to drop it around
callback invocations.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
block/nfs.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/block/nfs.c b/block/nfs.c
index 08b43dd..4eddcee 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -54,6 +54,7 @@ typedef struct NFSClient {
int events;
bool has_zero_init;
AioContext *aio_context;
+ QemuMutex mutex;
blkcnt_t st_blocks;
bool cache_used;
NFSServer *server;
@@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options,
static void nfs_process_read(void *arg);
static void nfs_process_write(void *arg);
+/* Called with QemuMutex held. */
static void nfs_set_events(NFSClient *client)
{
int ev = nfs_which_events(client->context);
@@ -209,20 +211,20 @@ static void nfs_process_read(void *arg)
{
NFSClient *client = arg;
- aio_context_acquire(client->aio_context);
+ qemu_mutex_lock(&client->mutex);
nfs_service(client->context, POLLIN);
nfs_set_events(client);
- aio_context_release(client->aio_context);
+ qemu_mutex_unlock(&client->mutex);
}
static void nfs_process_write(void *arg)
{
NFSClient *client = arg;
- aio_context_acquire(client->aio_context);
+ qemu_mutex_lock(&client->mutex);
nfs_service(client->context, POLLOUT);
nfs_set_events(client);
- aio_context_release(client->aio_context);
+ qemu_mutex_unlock(&client->mutex);
}
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
@@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque)
aio_co_wake(task->co);
}
+/* Called (via nfs_service) with QemuMutex held. */
static void
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
void *private_data)
@@ -273,6 +276,7 @@ static int coroutine_fn nfs_co_readv(BlockDriverState *bs,
nfs_co_init_task(bs, &task);
task.iov = iov;
+ qemu_mutex_lock(&client->mutex);
if (nfs_pread_async(client->context, client->fh,
sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE,
@@ -281,6 +285,7 @@ static int coroutine_fn nfs_co_readv(BlockDriverState *bs,
}
nfs_set_events(client);
+ qemu_mutex_unlock(&client->mutex);
while (!task.complete) {
qemu_coroutine_yield();
}
@@ -314,6 +319,7 @@ static int coroutine_fn nfs_co_writev(BlockDriverState *bs,
qemu_iovec_to_buf(iov, 0, buf, nb_sectors * BDRV_SECTOR_SIZE);
+ qemu_mutex_lock(&client->mutex);
if (nfs_pwrite_async(client->context, client->fh,
sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE,
@@ -323,6 +329,7 @@ static int coroutine_fn nfs_co_writev(BlockDriverState *bs,
}
nfs_set_events(client);
+ qemu_mutex_unlock(&client->mutex);
while (!task.complete) {
qemu_coroutine_yield();
}
@@ -343,12 +350,14 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
nfs_co_init_task(bs, &task);
+ qemu_mutex_lock(&client->mutex);
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
&task) != 0) {
return -ENOMEM;
}
nfs_set_events(client);
+ qemu_mutex_unlock(&client->mutex);
while (!task.complete) {
qemu_coroutine_yield();
}
@@ -434,6 +443,7 @@ static void nfs_file_close(BlockDriverState *bs)
{
NFSClient *client = bs->opaque;
nfs_client_close(client);
+ qemu_mutex_destroy(&client->mutex);
}
static NFSServer *nfs_config(QDict *options, Error **errp)
@@ -641,6 +651,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
if (ret < 0) {
return ret;
}
+ qemu_mutex_init(&client->mutex);
bs->total_sectors = ret;
ret = 0;
return ret;
@@ -696,6 +707,7 @@ static int nfs_has_zero_init(BlockDriverState *bs)
return client->has_zero_init;
}
+/* Called (via nfs_service) with QemuMutex held. */
static void
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
void *private_data)
--
2.9.3
next prev parent reply other threads:[~2017-02-17 19:40 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-17 19:40 [Qemu-devel] [PATCH 0/3] do not use aio_context_acquire/release in AIO-based drivers Paolo Bonzini
2017-02-17 19:40 ` [Qemu-devel] [PATCH 1/3] curl: do not use aio_context_acquire/release Paolo Bonzini
2017-02-22 15:13 ` Stefan Hajnoczi
2017-02-17 19:40 ` Paolo Bonzini [this message]
2017-02-22 15:13 ` [Qemu-devel] [Qemu-block] [PATCH 2/3] nfs: " Stefan Hajnoczi
2017-02-17 19:40 ` [Qemu-devel] [PATCH 3/3] iscsi: " Paolo Bonzini
2017-02-22 15:24 ` [Qemu-devel] [Qemu-block] " Stefan Hajnoczi
-- strict thread matches above, loose matches on Subject: below --
2017-02-22 18:07 [Qemu-devel] [PATCH v2 0/3] do not use aio_context_acquire/release in AIO-based drivers Paolo Bonzini
2017-02-22 18:07 ` [Qemu-devel] [PATCH 2/3] nfs: do not use aio_context_acquire/release Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170217194028.8398-3-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=jcody@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).