From: John Snow <jsnow@redhat.com>
To: Paolo Bonzini <pbonzini@redhat.com>, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 05/11] blockjob: separate monitor and blockjob APIs
Date: Mon, 24 Apr 2017 19:24:28 -0400 [thread overview]
Message-ID: <165bf811-fe3e-ad05-09f6-ef0de1b43e98@redhat.com> (raw)
In-Reply-To: <20170419144219.20371-6-pbonzini@redhat.com>
On 04/19/2017 10:42 AM, Paolo Bonzini wrote:
> We have two different headers for block job operations, blockjob.h
> and blockjob_int.h. The former contains APIs called by the monitor,
> the latter contains APIs called by the block job drivers and the
> block layer itself.
>
> Keep the two APIs separate in the blockjob.c file too. This will
> be useful when transitioning away from the AioContext lock, because
> there will be locking policies for the two categories, too---the
> monitor will have to call new block_job_lock/unlock APIs, while blockjob
> APIs will take care of this for the users.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
> ---
> v1->v2: move blockjob_create in the blockjob_int.h category,
> rewrite commit message [John]
grazie ~
>
> blockjob.c | 390 ++++++++++++++++++++++++++++++++-----------------------------
> 1 file changed, 205 insertions(+), 185 deletions(-)
>
> diff --git a/blockjob.c b/blockjob.c
> index 85ad610..140e176 100644
> --- a/blockjob.c
> +++ b/blockjob.c
> @@ -55,6 +55,21 @@ struct BlockJobTxn {
>
> static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
>
> +/*
> + * The block job API is composed of two categories of functions.
> + *
> + * The first includes functions used by the monitor. The monitor is
> + * peculiar in that it accesses the block job list with block_job_get, and
> + * therefore needs consistency across block_job_get and the actual operation
> + * (e.g. block_job_set_speed). The consistency is achieved with
> + * aio_context_acquire/release. These functions are declared in blockjob.h.
> + *
> + * The second includes functions used by the block job drivers and sometimes
> + * by the core block layer. These do not care about locking, because the
> + * whole coroutine runs under the AioContext lock, and are declared in
> + * blockjob_int.h.
> + */
> +
> BlockJob *block_job_next(BlockJob *job)
> {
> if (!job) {
> @@ -216,90 +231,6 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
> return 0;
> }
>
> -void *block_job_create(const char *job_id, const BlockJobDriver *driver,
> - BlockDriverState *bs, uint64_t perm,
> - uint64_t shared_perm, int64_t speed, int flags,
> - BlockCompletionFunc *cb, void *opaque, Error **errp)
> -{
> - BlockBackend *blk;
> - BlockJob *job;
> - int ret;
> -
> - if (bs->job) {
> - error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
> - return NULL;
> - }
> -
> - if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
> - job_id = bdrv_get_device_name(bs);
> - if (!*job_id) {
> - error_setg(errp, "An explicit job ID is required for this node");
> - return NULL;
> - }
> - }
> -
> - if (job_id) {
> - if (flags & BLOCK_JOB_INTERNAL) {
> - error_setg(errp, "Cannot specify job ID for internal block job");
> - return NULL;
> - }
> -
> - if (!id_wellformed(job_id)) {
> - error_setg(errp, "Invalid job ID '%s'", job_id);
> - return NULL;
> - }
> -
> - if (block_job_get(job_id)) {
> - error_setg(errp, "Job ID '%s' already in use", job_id);
> - return NULL;
> - }
> - }
> -
> - blk = blk_new(perm, shared_perm);
> - ret = blk_insert_bs(blk, bs, errp);
> - if (ret < 0) {
> - blk_unref(blk);
> - return NULL;
> - }
> -
> - job = g_malloc0(driver->instance_size);
> - job->driver = driver;
> - job->id = g_strdup(job_id);
> - job->blk = blk;
> - job->cb = cb;
> - job->opaque = opaque;
> - job->busy = false;
> - job->paused = true;
> - job->pause_count = 1;
> - job->refcnt = 1;
> -
> - error_setg(&job->blocker, "block device is in use by block job: %s",
> - BlockJobType_lookup[driver->job_type]);
> - block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
> - bs->job = job;
> -
> - blk_set_dev_ops(blk, &block_job_dev_ops, job);
> - bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
> -
> - QLIST_INSERT_HEAD(&block_jobs, job, job_list);
> -
> - blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
> - block_job_detach_aio_context, job);
> -
> - /* Only set speed when necessary to avoid NotSupported error */
> - if (speed != 0) {
> - Error *local_err = NULL;
> -
> - block_job_set_speed(job, speed, &local_err);
> - if (local_err) {
> - block_job_unref(job);
> - error_propagate(errp, local_err);
> - return NULL;
> - }
> - }
> - return job;
> -}
> -
> bool block_job_is_internal(BlockJob *job)
> {
> return (job->id == NULL);
> @@ -334,11 +265,6 @@ void block_job_start(BlockJob *job)
> bdrv_coroutine_enter(blk_bs(job->blk), job->co);
> }
>
> -void block_job_early_fail(BlockJob *job)
> -{
> - block_job_unref(job);
> -}
> -
> static void block_job_completed_single(BlockJob *job)
> {
> if (!job->ret) {
> @@ -440,21 +366,6 @@ static void block_job_completed_txn_success(BlockJob *job)
> }
> }
>
> -void block_job_completed(BlockJob *job, int ret)
> -{
> - assert(blk_bs(job->blk)->job == job);
> - assert(!job->completed);
> - job->completed = true;
> - job->ret = ret;
> - if (!job->txn) {
> - block_job_completed_single(job);
> - } else if (ret < 0 || block_job_is_cancelled(job)) {
> - block_job_completed_txn_abort(job);
> - } else {
> - block_job_completed_txn_success(job);
> - }
> -}
> -
> void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
> {
> Error *local_err = NULL;
> @@ -492,44 +403,11 @@ void block_job_user_pause(BlockJob *job)
> block_job_pause(job);
> }
>
> -static bool block_job_should_pause(BlockJob *job)
> -{
> - return job->pause_count > 0;
> -}
> -
> bool block_job_user_paused(BlockJob *job)
> {
> return job->user_paused;
> }
>
> -void coroutine_fn block_job_pause_point(BlockJob *job)
> -{
> - assert(job && block_job_started(job));
> -
> - if (!block_job_should_pause(job)) {
> - return;
> - }
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - if (job->driver->pause) {
> - job->driver->pause(job);
> - }
> -
> - if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
> - job->paused = true;
> - job->busy = false;
> - qemu_coroutine_yield(); /* wait for block_job_resume() */
> - job->busy = true;
> - job->paused = false;
> - }
> -
> - if (job->driver->resume) {
> - job->driver->resume(job);
> - }
> -}
> -
> void block_job_user_resume(BlockJob *job)
> {
> if (job && job->user_paused && job->pause_count > 0) {
> @@ -538,13 +416,6 @@ void block_job_user_resume(BlockJob *job)
> }
> }
>
> -void block_job_enter(BlockJob *job)
> -{
> - if (job->co && !job->busy) {
> - bdrv_coroutine_enter(blk_bs(job->blk), job->co);
> - }
> -}
> -
> void block_job_cancel(BlockJob *job)
> {
> if (block_job_started(job)) {
> @@ -556,11 +427,6 @@ void block_job_cancel(BlockJob *job)
> }
> }
>
> -bool block_job_is_cancelled(BlockJob *job)
> -{
> - return job->cancelled;
> -}
> -
> void block_job_iostatus_reset(BlockJob *job)
> {
> job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
> @@ -628,42 +494,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp)
> return block_job_finish_sync(job, &block_job_complete, errp);
> }
>
> -void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
> -{
> - assert(job->busy);
> -
> - /* Check cancellation *before* setting busy = false, too! */
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - job->busy = false;
> - if (!block_job_should_pause(job)) {
> - co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
> - }
> - job->busy = true;
> -
> - block_job_pause_point(job);
> -}
> -
> -void block_job_yield(BlockJob *job)
> -{
> - assert(job->busy);
> -
> - /* Check cancellation *before* setting busy = false, too! */
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - job->busy = false;
> - if (!block_job_should_pause(job)) {
> - qemu_coroutine_yield();
> - }
> - job->busy = true;
> -
> - block_job_pause_point(job);
> -}
> -
> BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
> {
> BlockJobInfo *info;
> @@ -723,6 +553,95 @@ static void block_job_event_completed(BlockJob *job, const char *msg)
> &error_abort);
> }
>
> +/*
> + * API for block job drivers and the block layer. These functions are
> + * declared in blockjob_int.h.
> + */
> +
> +void *block_job_create(const char *job_id, const BlockJobDriver *driver,
> + BlockDriverState *bs, uint64_t perm,
> + uint64_t shared_perm, int64_t speed, int flags,
> + BlockCompletionFunc *cb, void *opaque, Error **errp)
> +{
> + BlockBackend *blk;
> + BlockJob *job;
> + int ret;
> +
> + if (bs->job) {
> + error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
> + return NULL;
> + }
> +
> + if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
> + job_id = bdrv_get_device_name(bs);
> + if (!*job_id) {
> + error_setg(errp, "An explicit job ID is required for this node");
> + return NULL;
> + }
> + }
> +
> + if (job_id) {
> + if (flags & BLOCK_JOB_INTERNAL) {
> + error_setg(errp, "Cannot specify job ID for internal block job");
> + return NULL;
> + }
> +
> + if (!id_wellformed(job_id)) {
> + error_setg(errp, "Invalid job ID '%s'", job_id);
> + return NULL;
> + }
> +
> + if (block_job_get(job_id)) {
> + error_setg(errp, "Job ID '%s' already in use", job_id);
> + return NULL;
> + }
> + }
> +
> + blk = blk_new(perm, shared_perm);
> + ret = blk_insert_bs(blk, bs, errp);
> + if (ret < 0) {
> + blk_unref(blk);
> + return NULL;
> + }
> +
> + job = g_malloc0(driver->instance_size);
> + job->driver = driver;
> + job->id = g_strdup(job_id);
> + job->blk = blk;
> + job->cb = cb;
> + job->opaque = opaque;
> + job->busy = false;
> + job->paused = true;
> + job->pause_count = 1;
> + job->refcnt = 1;
> +
> + error_setg(&job->blocker, "block device is in use by block job: %s",
> + BlockJobType_lookup[driver->job_type]);
> + block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
> + bs->job = job;
> +
> + blk_set_dev_ops(blk, &block_job_dev_ops, job);
> + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
> +
> + QLIST_INSERT_HEAD(&block_jobs, job, job_list);
> +
> + blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
> + block_job_detach_aio_context, job);
> +
> + /* Only set speed when necessary to avoid NotSupported error */
> + if (speed != 0) {
> + Error *local_err = NULL;
> +
> + block_job_set_speed(job, speed, &local_err);
> + if (local_err) {
> + block_job_unref(job);
> + error_propagate(errp, local_err);
> + return NULL;
> + }
> + }
> + return job;
> +}
> +
> void block_job_pause_all(void)
> {
> BlockJob *job = NULL;
> @@ -735,6 +654,59 @@ void block_job_pause_all(void)
> }
> }
>
> +void block_job_early_fail(BlockJob *job)
> +{
> + block_job_unref(job);
> +}
> +
> +void block_job_completed(BlockJob *job, int ret)
> +{
> + assert(blk_bs(job->blk)->job == job);
> + assert(!job->completed);
> + job->completed = true;
> + job->ret = ret;
> + if (!job->txn) {
> + block_job_completed_single(job);
> + } else if (ret < 0 || block_job_is_cancelled(job)) {
> + block_job_completed_txn_abort(job);
> + } else {
> + block_job_completed_txn_success(job);
> + }
> +}
> +
> +static bool block_job_should_pause(BlockJob *job)
> +{
> + return job->pause_count > 0;
> +}
> +
> +void coroutine_fn block_job_pause_point(BlockJob *job)
> +{
> + assert(job && block_job_started(job));
> +
> + if (!block_job_should_pause(job)) {
> + return;
> + }
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + if (job->driver->pause) {
> + job->driver->pause(job);
> + }
> +
> + if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
> + job->paused = true;
> + job->busy = false;
> + qemu_coroutine_yield(); /* wait for block_job_resume() */
> + job->busy = true;
> + job->paused = false;
> + }
> +
> + if (job->driver->resume) {
> + job->driver->resume(job);
> + }
> +}
> +
> void block_job_resume_all(void)
> {
> BlockJob *job = NULL;
> @@ -747,6 +719,54 @@ void block_job_resume_all(void)
> }
> }
>
> +void block_job_enter(BlockJob *job)
> +{
> + if (job->co && !job->busy) {
> + bdrv_coroutine_enter(blk_bs(job->blk), job->co);
> + }
> +}
> +
> +bool block_job_is_cancelled(BlockJob *job)
> +{
> + return job->cancelled;
> +}
> +
> +void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
> +{
> + assert(job->busy);
> +
> + /* Check cancellation *before* setting busy = false, too! */
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + job->busy = false;
> + if (!block_job_should_pause(job)) {
> + co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
> + }
> + job->busy = true;
> +
> + block_job_pause_point(job);
> +}
> +
> +void block_job_yield(BlockJob *job)
> +{
> + assert(job->busy);
> +
> + /* Check cancellation *before* setting busy = false, too! */
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + job->busy = false;
> + if (!block_job_should_pause(job)) {
> + qemu_coroutine_yield();
> + }
> + job->busy = true;
> +
> + block_job_pause_point(job);
> +}
> +
> void block_job_event_ready(BlockJob *job)
> {
> job->ready = true;
>
--
—js
next prev parent reply other threads:[~2017-04-24 23:24 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-19 14:42 [Qemu-devel] [PATCH for 2.10 00/11] Preparation for block job mutex Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 01/11] blockjob: remove unnecessary check Paolo Bonzini
2017-04-19 15:48 ` Eric Blake
2017-04-19 16:12 ` Paolo Bonzini
2017-04-19 16:16 ` Eric Blake
2017-04-24 23:19 ` John Snow
2017-04-26 8:36 ` Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 02/11] blockjob: remove iostatus_reset callback Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 03/11] blockjob: introduce block_job_early_fail Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 04/11] blockjob: introduce block_job_pause/resume_all Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 05/11] blockjob: separate monitor and blockjob APIs Paolo Bonzini
2017-04-24 23:24 ` John Snow [this message]
2017-05-04 10:50 ` Stefan Hajnoczi
2017-04-19 14:42 ` [Qemu-devel] [PATCH 06/11] blockjob: move iostatus reset inside block_job_user_resume Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 07/11] blockjob: introduce block_job_cancel_async, check iostatus invariants Paolo Bonzini
2017-04-26 20:25 ` John Snow
2017-04-27 8:20 ` Paolo Bonzini
2017-05-04 10:53 ` Stefan Hajnoczi
2017-04-19 14:42 ` [Qemu-devel] [PATCH 08/11] blockjob: group BlockJob transaction functions together Paolo Bonzini
2017-04-24 23:46 ` John Snow
2017-05-04 10:53 ` Stefan Hajnoczi
2017-04-19 14:42 ` [Qemu-devel] [PATCH 09/11] blockjob: strengthen a bit test-blockjob-txn Paolo Bonzini
2017-04-19 14:42 ` [Qemu-devel] [PATCH 10/11] blockjob: reorganize block_job_completed_txn_abort Paolo Bonzini
2017-05-04 10:59 ` Stefan Hajnoczi
2017-04-19 14:42 ` [Qemu-devel] [PATCH 11/11] blockjob: use deferred_to_main_loop to indicate the coroutine has ended Paolo Bonzini
2017-05-04 11:07 ` Stefan Hajnoczi
2017-05-04 11:07 ` Stefan Hajnoczi
-- strict thread matches above, loose matches on Subject: below --
2017-05-08 14:12 [Qemu-devel] [PATCH v2 00/11] Preparation for block job mutex Paolo Bonzini
2017-05-08 14:13 ` [Qemu-devel] [PATCH 05/11] blockjob: separate monitor and blockjob APIs Paolo Bonzini
2017-05-09 17:06 ` Jeff Cody
2017-05-09 17:09 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=165bf811-fe3e-ad05-09f6-ef0de1b43e98@redhat.com \
--to=jsnow@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).