From: Paolo Bonzini <pbonzini@redhat.com>
To: Emanuele Giuseppe Esposito <eesposit@redhat.com>, qemu-block@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>,
Stefan Weil <sw@weilnetz.de>,
Aarushi Mehta <mehta.aaru20@gmail.com>,
Julia Suvorova <jusual@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Fam Zheng <fam@euphon.net>,
qemu-devel@nongnu.org
Subject: Re: [PATCH v2 3/3] thread-pool: use ThreadPool from the running thread
Date: Fri, 28 Oct 2022 13:52:07 +0200 [thread overview]
Message-ID: <51318024-109f-fb02-a945-1c11b260dab8@redhat.com> (raw)
In-Reply-To: <20221028071635.3037348-4-eesposit@redhat.com>
On 10/28/22 09:16, Emanuele Giuseppe Esposito wrote:
> Use qemu_get_current_aio_context() where possible, since we always
> submit work to the current thread anyways.
>
> We want to also be sure that the thread submitting the work is
> the same as the one processing the pool, to avoid adding
> synchronization to the pool list.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> ---
> block/file-posix.c | 21 ++++++++++-----------
> block/file-win32.c | 2 +-
> block/qcow2-threads.c | 2 +-
> util/thread-pool.c | 5 ++++-
> 4 files changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/block/file-posix.c b/block/file-posix.c
> index 3800dbd222..28f12b08c8 100644
> --- a/block/file-posix.c
> +++ b/block/file-posix.c
> @@ -2044,11 +2044,10 @@ out:
> return result;
> }
>
> -static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
> - ThreadPoolFunc func, void *arg)
> +static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
> {
> /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
> - ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
> + ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
> return thread_pool_submit_co(pool, func, arg);
> }
>
> @@ -2116,7 +2115,7 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
> };
>
> assert(qiov->size == bytes);
> - return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
> + return raw_thread_pool_submit(handle_aiocb_rw, &acb);
> }
>
> static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
> @@ -2186,7 +2185,7 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
> return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
> }
> #endif
> - return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
> + return raw_thread_pool_submit(handle_aiocb_flush, &acb);
> }
>
> static void raw_aio_attach_aio_context(BlockDriverState *bs,
> @@ -2248,7 +2247,7 @@ raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
> },
> };
>
> - return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb);
> + return raw_thread_pool_submit(handle_aiocb_truncate, &acb);
> }
>
> static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
> @@ -2998,7 +2997,7 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
> acb.aio_type |= QEMU_AIO_BLKDEV;
> }
>
> - ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
> + ret = raw_thread_pool_submit(handle_aiocb_discard, &acb);
> raw_account_discard(s, bytes, ret);
> return ret;
> }
> @@ -3073,7 +3072,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
> handler = handle_aiocb_write_zeroes;
> }
>
> - return raw_thread_pool_submit(bs, handler, &acb);
> + return raw_thread_pool_submit(handler, &acb);
> }
>
> static int coroutine_fn raw_co_pwrite_zeroes(
> @@ -3284,7 +3283,7 @@ static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
> },
> };
>
> - return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb);
> + return raw_thread_pool_submit(handle_aiocb_copy_range, &acb);
> }
>
> BlockDriver bdrv_file = {
> @@ -3614,7 +3613,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
> struct sg_io_hdr *io_hdr = buf;
> if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
> io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
> - return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs),
> + return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(),
> s->fd, io_hdr);
> }
> }
> @@ -3630,7 +3629,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
> },
> };
>
> - return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb);
> + return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
> }
> #endif /* linux */
>
> diff --git a/block/file-win32.c b/block/file-win32.c
> index ec9d64d0e4..3d7f59a592 100644
> --- a/block/file-win32.c
> +++ b/block/file-win32.c
> @@ -167,7 +167,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
> acb->aio_offset = offset;
>
> trace_file_paio_submit(acb, opaque, offset, count, type);
> - pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
> + pool = aio_get_thread_pool(qemu_get_current_aio_context());
> return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
> }
>
> diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
> index 1914baf456..9e370acbb3 100644
> --- a/block/qcow2-threads.c
> +++ b/block/qcow2-threads.c
> @@ -42,7 +42,7 @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
> {
> int ret;
> BDRVQcow2State *s = bs->opaque;
> - ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
> + ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
>
> qemu_co_mutex_lock(&s->lock);
> while (s->nb_threads >= QCOW2_MAX_THREADS) {
> diff --git a/util/thread-pool.c b/util/thread-pool.c
> index 31113b5860..0e26687e97 100644
> --- a/util/thread-pool.c
> +++ b/util/thread-pool.c
> @@ -48,7 +48,7 @@ struct ThreadPoolElement {
> /* Access to this list is protected by lock. */
> QTAILQ_ENTRY(ThreadPoolElement) reqs;
>
> - /* Access to this list is protected by the global mutex. */
> + /* This list is only written by the thread pool's mother thread. */
> QLIST_ENTRY(ThreadPoolElement) all;
> };
>
> @@ -251,6 +251,9 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
> {
> ThreadPoolElement *req;
>
> + /* Assert that the thread submitting work is the same running the pool */
> + assert(pool->ctx == qemu_get_current_aio_context());
> +
> req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
> req->func = func;
> req->arg = arg;
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo
prev parent reply other threads:[~2022-10-28 11:52 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-28 7:16 [PATCH v2 0/3] AioContext removal: LinuxAioState and ThreadPool Emanuele Giuseppe Esposito
2022-10-28 7:16 ` [PATCH v2 1/3] linux-aio: use LinuxAioState from the running thread Emanuele Giuseppe Esposito
2022-10-28 11:51 ` Paolo Bonzini
2022-10-28 12:16 ` Emanuele Giuseppe Esposito
2022-10-28 7:16 ` [PATCH v2 2/3] io_uring: use LuringState " Emanuele Giuseppe Esposito
2022-10-28 7:16 ` [PATCH v2 3/3] thread-pool: use ThreadPool " Emanuele Giuseppe Esposito
2022-10-28 11:52 ` Paolo Bonzini [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=51318024-109f-fb02-a945-1c11b260dab8@redhat.com \
--to=pbonzini@redhat.com \
--cc=eesposit@redhat.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=jusual@redhat.com \
--cc=kwolf@redhat.com \
--cc=mehta.aaru20@gmail.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=stefanha@redhat.com \
--cc=sw@weilnetz.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).