From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, stefanha@redhat.com, qemu-devel@nongnu.org
Subject: [PULL 39/50] jobs: group together API calls under the same job lock
Date: Fri, 7 Oct 2022 12:47:41 +0200 [thread overview]
Message-ID: <20221007104752.141361-40-kwolf@redhat.com> (raw)
In-Reply-To: <20221007104752.141361-1-kwolf@redhat.com>
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Now that the API offers also _locked() functions, take advantage
of it and give also the caller control to take the lock and call
_locked functions.
This makes sense especially when we have for loops, because it
makes no sense to have:
for(job = job_next(); ...)
where each job_next() takes the lock internally.
Instead we want
JOB_LOCK_GUARD();
for(job = job_next_locked(); ...)
In addition, protect also direct field accesses, by either creating a
new critical section or widening the existing ones.
Note: at this stage, job_{lock/unlock} and job lock guard macros
are *nop*.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220926093214.506243-12-eesposit@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
block.c | 17 ++++++++++-------
blockdev.c | 14 ++++++++++----
blockjob.c | 35 ++++++++++++++++++++++-------------
job-qmp.c | 9 ++++++---
monitor/qmp-cmds.c | 7 +++++--
qemu-img.c | 15 ++++++++++-----
6 files changed, 63 insertions(+), 34 deletions(-)
diff --git a/block.c b/block.c
index 9b0fae5c1e..1fbf6b9e69 100644
--- a/block.c
+++ b/block.c
@@ -4981,8 +4981,8 @@ static void bdrv_close(BlockDriverState *bs)
void bdrv_close_all(void)
{
- assert(job_next(NULL) == NULL);
GLOBAL_STATE_CODE();
+ assert(job_next(NULL) == NULL);
/* Drop references from requests still in flight, such as canceled block
* jobs whose AIO context has not been polled yet */
@@ -6168,13 +6168,16 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
}
}
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
- GSList *el;
+ WITH_JOB_LOCK_GUARD() {
+ for (job = block_job_next_locked(NULL); job;
+ job = block_job_next_locked(job)) {
+ GSList *el;
- xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
- job->job.id);
- for (el = job->nodes; el; el = el->next) {
- xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
+ xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
+ job->job.id);
+ for (el = job->nodes; el; el = el->next) {
+ xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
+ }
}
}
diff --git a/blockdev.c b/blockdev.c
index 2e941e2979..46090bb0aa 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -150,12 +150,15 @@ void blockdev_mark_auto_del(BlockBackend *blk)
return;
}
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
+ JOB_LOCK_GUARD();
+
+ for (job = block_job_next_locked(NULL); job;
+ job = block_job_next_locked(job)) {
if (block_job_has_bdrv(job, blk_bs(blk))) {
AioContext *aio_context = job->job.aio_context;
aio_context_acquire(aio_context);
- job_cancel(&job->job, false);
+ job_cancel_locked(&job->job, false);
aio_context_release(aio_context);
}
@@ -3756,7 +3759,10 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
BlockJobInfoList *head = NULL, **tail = &head;
BlockJob *job;
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
+ JOB_LOCK_GUARD();
+
+ for (job = block_job_next_locked(NULL); job;
+ job = block_job_next_locked(job)) {
BlockJobInfo *value;
AioContext *aio_context;
@@ -3765,7 +3771,7 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
}
aio_context = block_job_get_aio_context(job);
aio_context_acquire(aio_context);
- value = block_job_query(job, errp);
+ value = block_job_query_locked(job, errp);
aio_context_release(aio_context);
if (!value) {
qapi_free_BlockJobInfoList(head);
diff --git a/blockjob.c b/blockjob.c
index 0d59aba439..96fb9d9f73 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -111,8 +111,10 @@ static bool child_job_drained_poll(BdrvChild *c)
/* An inactive or completed job doesn't have any pending requests. Jobs
* with !job->busy are either already paused or have a pause point after
* being reentered, so no job driver code will run before they pause. */
- if (!job->busy || job_is_completed(job)) {
- return false;
+ WITH_JOB_LOCK_GUARD() {
+ if (!job->busy || job_is_completed_locked(job)) {
+ return false;
+ }
}
/* Otherwise, assume that it isn't fully stopped yet, but allow the job to
@@ -475,13 +477,15 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
job->ready_notifier.notify = block_job_event_ready;
job->idle_notifier.notify = block_job_on_idle;
- notifier_list_add(&job->job.on_finalize_cancelled,
- &job->finalize_cancelled_notifier);
- notifier_list_add(&job->job.on_finalize_completed,
- &job->finalize_completed_notifier);
- notifier_list_add(&job->job.on_pending, &job->pending_notifier);
- notifier_list_add(&job->job.on_ready, &job->ready_notifier);
- notifier_list_add(&job->job.on_idle, &job->idle_notifier);
+ WITH_JOB_LOCK_GUARD() {
+ notifier_list_add(&job->job.on_finalize_cancelled,
+ &job->finalize_cancelled_notifier);
+ notifier_list_add(&job->job.on_finalize_completed,
+ &job->finalize_completed_notifier);
+ notifier_list_add(&job->job.on_pending, &job->pending_notifier);
+ notifier_list_add(&job->job.on_ready, &job->ready_notifier);
+ notifier_list_add(&job->job.on_idle, &job->idle_notifier);
+ }
error_setg(&job->blocker, "block device is in use by block job: %s",
job_type_str(&job->job));
@@ -558,10 +562,15 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
action);
}
if (action == BLOCK_ERROR_ACTION_STOP) {
- if (!job->job.user_paused) {
- job_pause(&job->job);
- /* make the pause user visible, which will be resumed from QMP. */
- job->job.user_paused = true;
+ WITH_JOB_LOCK_GUARD() {
+ if (!job->job.user_paused) {
+ job_pause_locked(&job->job);
+ /*
+ * make the pause user visible, which will be
+ * resumed from QMP.
+ */
+ job->job.user_paused = true;
+ }
}
block_job_iostatus_set_err(job, error);
}
diff --git a/job-qmp.c b/job-qmp.c
index b1c456482a..393d3a5b81 100644
--- a/job-qmp.c
+++ b/job-qmp.c
@@ -164,7 +164,8 @@ void qmp_job_dismiss(const char *id, Error **errp)
aio_context_release(aio_context);
}
-static JobInfo *job_query_single(Job *job, Error **errp)
+/* Called with job_mutex held. */
+static JobInfo *job_query_single_locked(Job *job, Error **errp)
{
JobInfo *info;
uint64_t progress_current;
@@ -194,7 +195,9 @@ JobInfoList *qmp_query_jobs(Error **errp)
JobInfoList *head = NULL, **tail = &head;
Job *job;
- for (job = job_next(NULL); job; job = job_next(job)) {
+ JOB_LOCK_GUARD();
+
+ for (job = job_next_locked(NULL); job; job = job_next_locked(job)) {
JobInfo *value;
AioContext *aio_context;
@@ -203,7 +206,7 @@ JobInfoList *qmp_query_jobs(Error **errp)
}
aio_context = job->aio_context;
aio_context_acquire(aio_context);
- value = job_query_single(job, errp);
+ value = job_query_single_locked(job, errp);
aio_context_release(aio_context);
if (!value) {
qapi_free_JobInfoList(head);
diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c
index 7314cd813d..81c8fdadf8 100644
--- a/monitor/qmp-cmds.c
+++ b/monitor/qmp-cmds.c
@@ -135,8 +135,11 @@ void qmp_cont(Error **errp)
blk_iostatus_reset(blk);
}
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
- block_job_iostatus_reset(job);
+ WITH_JOB_LOCK_GUARD() {
+ for (job = block_job_next_locked(NULL); job;
+ job = block_job_next_locked(job)) {
+ block_job_iostatus_reset_locked(job);
+ }
}
/* Continuing after completed migration. Images have been inactivated to
diff --git a/qemu-img.c b/qemu-img.c
index cab9776f42..e0a30b1f4c 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -912,9 +912,11 @@ static void run_block_job(BlockJob *job, Error **errp)
int ret = 0;
aio_context_acquire(aio_context);
- job_ref(&job->job);
+ job_lock();
+ job_ref_locked(&job->job);
do {
float progress = 0.0f;
+ job_unlock();
aio_poll(aio_context, true);
progress_get_snapshot(&job->job.progress, &progress_current,
@@ -923,14 +925,17 @@ static void run_block_job(BlockJob *job, Error **errp)
progress = (float)progress_current / progress_total * 100.f;
}
qemu_progress_print(progress, 0);
- } while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
+ job_lock();
+ } while (!job_is_ready_locked(&job->job) &&
+ !job_is_completed_locked(&job->job));
- if (!job_is_completed(&job->job)) {
- ret = job_complete_sync(&job->job, errp);
+ if (!job_is_completed_locked(&job->job)) {
+ ret = job_complete_sync_locked(&job->job, errp);
} else {
ret = job->job.ret;
}
- job_unref(&job->job);
+ job_unref_locked(&job->job);
+ job_unlock();
aio_context_release(aio_context);
/* publish completion progress only when success */
--
2.37.3
next prev parent reply other threads:[~2022-10-07 11:59 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-07 10:47 [PULL 00/50] Block layer patches Kevin Wolf
2022-10-07 10:47 ` [PULL 01/50] Revert "qapi: fix examples of blockdev-add with qcow2" Kevin Wolf
2022-10-07 10:47 ` [PULL 02/50] coroutine: Drop coroutine_fn annotation from qemu_coroutine_self() Kevin Wolf
2022-10-07 10:47 ` [PULL 03/50] block/nvme: separate nvme_get_free_req cases for coroutine/non-coroutine context Kevin Wolf
2022-10-07 10:47 ` [PULL 04/50] block: add missing coroutine_fn annotations Kevin Wolf
2022-10-07 10:47 ` [PULL 05/50] qcow2: remove incorrect " Kevin Wolf
2022-10-07 10:47 ` [PULL 06/50] nbd: " Kevin Wolf
2022-10-07 10:47 ` [PULL 07/50] coroutine: " Kevin Wolf
2022-10-07 10:47 ` [PULL 08/50] blkverify: add missing " Kevin Wolf
2022-10-07 10:47 ` [PULL 09/50] file-posix: " Kevin Wolf
2022-10-07 10:47 ` [PULL 10/50] iscsi: " Kevin Wolf
2022-10-07 10:47 ` [PULL 11/50] nbd: " Kevin Wolf
2022-10-07 10:47 ` [PULL 12/50] nfs: " Kevin Wolf
2022-10-07 10:47 ` [PULL 13/50] nvme: " Kevin Wolf
2022-10-07 10:47 ` [PULL 14/50] parallels: " Kevin Wolf
2022-10-07 10:47 ` [PULL 15/50] qcow2: " Kevin Wolf
2022-10-07 10:47 ` [PULL 16/50] copy-before-write: " Kevin Wolf
2022-10-07 10:47 ` [PULL 17/50] curl: " Kevin Wolf
2022-10-07 10:47 ` [PULL 18/50] qed: " Kevin Wolf
2022-10-07 10:47 ` [PULL 19/50] quorum: " Kevin Wolf
2022-10-07 10:47 ` [PULL 20/50] throttle: " Kevin Wolf
2022-10-07 10:47 ` [PULL 21/50] vmdk: " Kevin Wolf
2022-10-07 10:47 ` [PULL 22/50] job: " Kevin Wolf
2022-10-07 10:47 ` [PULL 23/50] coroutine-lock: " Kevin Wolf
2022-10-07 10:47 ` [PULL 24/50] raw-format: " Kevin Wolf
2022-10-07 10:47 ` [PULL 25/50] 9p: " Kevin Wolf
2022-10-07 10:47 ` [PULL 26/50] migration: " Kevin Wolf
2022-10-07 10:47 ` [PULL 27/50] test-coroutine: " Kevin Wolf
2022-10-07 10:47 ` [PULL 28/50] quorum: Remove unnecessary forward declaration Kevin Wolf
2022-10-07 10:47 ` [PULL 29/50] job.c: make job_mutex and job_lock/unlock() public Kevin Wolf
2022-10-07 10:47 ` [PULL 30/50] job.h: categorize fields in struct Job Kevin Wolf
2022-10-07 10:47 ` [PULL 31/50] job.c: API functions not used outside should be static Kevin Wolf
2022-10-07 10:47 ` [PULL 32/50] aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED Kevin Wolf
2022-10-07 10:47 ` [PULL 33/50] job.c: add job_lock/unlock while keeping job.h intact Kevin Wolf
2022-10-07 10:47 ` [PULL 34/50] job: move and update comments from blockjob.c Kevin Wolf
2022-10-07 10:47 ` [PULL 35/50] blockjob: introduce block_job _locked() APIs Kevin Wolf
2022-10-07 10:47 ` [PULL 36/50] jobs: add job lock in find_* functions Kevin Wolf
2022-10-07 10:47 ` [PULL 37/50] jobs: use job locks also in the unit tests Kevin Wolf
2022-10-07 10:47 ` [PULL 38/50] block/mirror.c: use of job helpers in drivers Kevin Wolf
2022-10-07 10:47 ` Kevin Wolf [this message]
2022-10-07 10:47 ` [PULL 40/50] job: detect change of aiocontext within job coroutine Kevin Wolf
2022-10-07 10:47 ` [PULL 41/50] jobs: protect job.aio_context with BQL and job_mutex Kevin Wolf
2022-10-07 10:47 ` [PULL 42/50] blockjob.h: categorize fields in struct BlockJob Kevin Wolf
2022-10-07 10:47 ` [PULL 43/50] blockjob: rename notifier callbacks as _locked Kevin Wolf
2022-10-07 10:47 ` [PULL 44/50] blockjob: protect iostatus field in BlockJob struct Kevin Wolf
2022-10-07 10:47 ` [PULL 45/50] job.h: categorize JobDriver callbacks that need the AioContext lock Kevin Wolf
2022-10-07 10:47 ` [PULL 46/50] job.c: enable job lock/unlock and remove Aiocontext locks Kevin Wolf
2022-10-07 10:47 ` [PULL 47/50] block_job_query: remove atomic read Kevin Wolf
2022-10-07 10:47 ` [PULL 48/50] blockjob: remove unused functions Kevin Wolf
2022-10-07 10:47 ` [PULL 49/50] job: " Kevin Wolf
2022-10-07 10:47 ` [PULL 50/50] file-posix: Remove unused s->discard_zeroes Kevin Wolf
2022-10-12 21:25 ` [PULL 00/50] Block layer patches Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221007104752.141361-40-kwolf@redhat.com \
--to=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).