qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, stefanha@redhat.com, qemu-devel@nongnu.org
Subject: [PULL 36/50] jobs: add job lock in find_* functions
Date: Fri,  7 Oct 2022 12:47:38 +0200	[thread overview]
Message-ID: <20221007104752.141361-37-kwolf@redhat.com> (raw)
In-Reply-To: <20221007104752.141361-1-kwolf@redhat.com>

From: Emanuele Giuseppe Esposito <eesposit@redhat.com>

Both blockdev.c and job-qmp.c have TOC/TOU conditions, because
they first search for the job and then perform an action on it.
Therefore, we need to do the search + action under the same
job mutex critical section.

Note: at this stage, job_{lock/unlock} and job lock guard macros
are *nop*.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20220926093214.506243-9-eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 blockdev.c | 67 +++++++++++++++++++++++++++++++++++++-----------------
 job-qmp.c  | 57 ++++++++++++++++++++++++++++++++--------------
 2 files changed, 86 insertions(+), 38 deletions(-)

diff --git a/blockdev.c b/blockdev.c
index 392d9476e6..2e941e2979 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3313,9 +3313,13 @@ out:
     aio_context_release(aio_context);
 }
 
-/* Get a block job using its ID and acquire its AioContext */
-static BlockJob *find_block_job(const char *id, AioContext **aio_context,
-                                Error **errp)
+/*
+ * Get a block job using its ID and acquire its AioContext.
+ * Called with job_mutex held.
+ */
+static BlockJob *find_block_job_locked(const char *id,
+                                       AioContext **aio_context,
+                                       Error **errp)
 {
     BlockJob *job;
 
@@ -3323,7 +3327,7 @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
 
     *aio_context = NULL;
 
-    job = block_job_get(id);
+    job = block_job_get_locked(id);
 
     if (!job) {
         error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
@@ -3340,13 +3344,16 @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(device, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(device, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
-    block_job_set_speed(job, speed, errp);
+    block_job_set_speed_locked(job, speed, errp);
     aio_context_release(aio_context);
 }
 
@@ -3354,7 +3361,10 @@ void qmp_block_job_cancel(const char *device,
                           bool has_force, bool force, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(device, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(device, &aio_context, errp);
 
     if (!job) {
         return;
@@ -3364,14 +3374,14 @@ void qmp_block_job_cancel(const char *device,
         force = false;
     }
 
-    if (job_user_paused(&job->job) && !force) {
+    if (job_user_paused_locked(&job->job) && !force) {
         error_setg(errp, "The block job for device '%s' is currently paused",
                    device);
         goto out;
     }
 
     trace_qmp_block_job_cancel(job);
-    job_user_cancel(&job->job, force, errp);
+    job_user_cancel_locked(&job->job, force, errp);
 out:
     aio_context_release(aio_context);
 }
@@ -3379,57 +3389,69 @@ out:
 void qmp_block_job_pause(const char *device, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(device, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(device, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_block_job_pause(job);
-    job_user_pause(&job->job, errp);
+    job_user_pause_locked(&job->job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_block_job_resume(const char *device, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(device, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(device, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_block_job_resume(job);
-    job_user_resume(&job->job, errp);
+    job_user_resume_locked(&job->job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_block_job_complete(const char *device, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(device, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(device, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_block_job_complete(job);
-    job_complete(&job->job, errp);
+    job_complete_locked(&job->job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_block_job_finalize(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *job = find_block_job(id, &aio_context, errp);
+    BlockJob *job;
+
+    JOB_LOCK_GUARD();
+    job = find_block_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_block_job_finalize(job);
-    job_ref(&job->job);
-    job_finalize(&job->job, errp);
+    job_ref_locked(&job->job);
+    job_finalize_locked(&job->job, errp);
 
     /*
      * Job's context might have changed via job_finalize (and job_txn_apply
@@ -3437,23 +3459,26 @@ void qmp_block_job_finalize(const char *id, Error **errp)
      * one.
      */
     aio_context = block_job_get_aio_context(job);
-    job_unref(&job->job);
+    job_unref_locked(&job->job);
     aio_context_release(aio_context);
 }
 
 void qmp_block_job_dismiss(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    BlockJob *bjob = find_block_job(id, &aio_context, errp);
+    BlockJob *bjob;
     Job *job;
 
+    JOB_LOCK_GUARD();
+    bjob = find_block_job_locked(id, &aio_context, errp);
+
     if (!bjob) {
         return;
     }
 
     trace_qmp_block_job_dismiss(bjob);
     job = &bjob->job;
-    job_dismiss(&job, errp);
+    job_dismiss_locked(&job, errp);
     aio_context_release(aio_context);
 }
 
diff --git a/job-qmp.c b/job-qmp.c
index 829a28aa70..b1c456482a 100644
--- a/job-qmp.c
+++ b/job-qmp.c
@@ -29,14 +29,19 @@
 #include "qapi/error.h"
 #include "trace/trace-root.h"
 
-/* Get a job using its ID and acquire its AioContext */
-static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
+/*
+ * Get a job using its ID and acquire its AioContext.
+ * Called with job_mutex held.
+ */
+static Job *find_job_locked(const char *id,
+                            AioContext **aio_context,
+                            Error **errp)
 {
     Job *job;
 
     *aio_context = NULL;
 
-    job = job_get(id);
+    job = job_get_locked(id);
     if (!job) {
         error_setg(errp, "Job not found");
         return NULL;
@@ -51,71 +56,86 @@ static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
 void qmp_job_cancel(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_cancel(job);
-    job_user_cancel(job, true, errp);
+    job_user_cancel_locked(job, true, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_job_pause(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_pause(job);
-    job_user_pause(job, errp);
+    job_user_pause_locked(job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_job_resume(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_resume(job);
-    job_user_resume(job, errp);
+    job_user_resume_locked(job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_job_complete(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_complete(job);
-    job_complete(job, errp);
+    job_complete_locked(job, errp);
     aio_context_release(aio_context);
 }
 
 void qmp_job_finalize(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_finalize(job);
-    job_ref(job);
-    job_finalize(job, errp);
+    job_ref_locked(job);
+    job_finalize_locked(job, errp);
 
     /*
      * Job's context might have changed via job_finalize (and job_txn_apply
@@ -123,21 +143,24 @@ void qmp_job_finalize(const char *id, Error **errp)
      * one.
      */
     aio_context = job->aio_context;
-    job_unref(job);
+    job_unref_locked(job);
     aio_context_release(aio_context);
 }
 
 void qmp_job_dismiss(const char *id, Error **errp)
 {
     AioContext *aio_context;
-    Job *job = find_job(id, &aio_context, errp);
+    Job *job;
+
+    JOB_LOCK_GUARD();
+    job = find_job_locked(id, &aio_context, errp);
 
     if (!job) {
         return;
     }
 
     trace_qmp_job_dismiss(job);
-    job_dismiss(&job, errp);
+    job_dismiss_locked(&job, errp);
     aio_context_release(aio_context);
 }
 
-- 
2.37.3



  parent reply	other threads:[~2022-10-07 11:33 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-07 10:47 [PULL 00/50] Block layer patches Kevin Wolf
2022-10-07 10:47 ` [PULL 01/50] Revert "qapi: fix examples of blockdev-add with qcow2" Kevin Wolf
2022-10-07 10:47 ` [PULL 02/50] coroutine: Drop coroutine_fn annotation from qemu_coroutine_self() Kevin Wolf
2022-10-07 10:47 ` [PULL 03/50] block/nvme: separate nvme_get_free_req cases for coroutine/non-coroutine context Kevin Wolf
2022-10-07 10:47 ` [PULL 04/50] block: add missing coroutine_fn annotations Kevin Wolf
2022-10-07 10:47 ` [PULL 05/50] qcow2: remove incorrect " Kevin Wolf
2022-10-07 10:47 ` [PULL 06/50] nbd: " Kevin Wolf
2022-10-07 10:47 ` [PULL 07/50] coroutine: " Kevin Wolf
2022-10-07 10:47 ` [PULL 08/50] blkverify: add missing " Kevin Wolf
2022-10-07 10:47 ` [PULL 09/50] file-posix: " Kevin Wolf
2022-10-07 10:47 ` [PULL 10/50] iscsi: " Kevin Wolf
2022-10-07 10:47 ` [PULL 11/50] nbd: " Kevin Wolf
2022-10-07 10:47 ` [PULL 12/50] nfs: " Kevin Wolf
2022-10-07 10:47 ` [PULL 13/50] nvme: " Kevin Wolf
2022-10-07 10:47 ` [PULL 14/50] parallels: " Kevin Wolf
2022-10-07 10:47 ` [PULL 15/50] qcow2: " Kevin Wolf
2022-10-07 10:47 ` [PULL 16/50] copy-before-write: " Kevin Wolf
2022-10-07 10:47 ` [PULL 17/50] curl: " Kevin Wolf
2022-10-07 10:47 ` [PULL 18/50] qed: " Kevin Wolf
2022-10-07 10:47 ` [PULL 19/50] quorum: " Kevin Wolf
2022-10-07 10:47 ` [PULL 20/50] throttle: " Kevin Wolf
2022-10-07 10:47 ` [PULL 21/50] vmdk: " Kevin Wolf
2022-10-07 10:47 ` [PULL 22/50] job: " Kevin Wolf
2022-10-07 10:47 ` [PULL 23/50] coroutine-lock: " Kevin Wolf
2022-10-07 10:47 ` [PULL 24/50] raw-format: " Kevin Wolf
2022-10-07 10:47 ` [PULL 25/50] 9p: " Kevin Wolf
2022-10-07 10:47 ` [PULL 26/50] migration: " Kevin Wolf
2022-10-07 10:47 ` [PULL 27/50] test-coroutine: " Kevin Wolf
2022-10-07 10:47 ` [PULL 28/50] quorum: Remove unnecessary forward declaration Kevin Wolf
2022-10-07 10:47 ` [PULL 29/50] job.c: make job_mutex and job_lock/unlock() public Kevin Wolf
2022-10-07 10:47 ` [PULL 30/50] job.h: categorize fields in struct Job Kevin Wolf
2022-10-07 10:47 ` [PULL 31/50] job.c: API functions not used outside should be static Kevin Wolf
2022-10-07 10:47 ` [PULL 32/50] aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED Kevin Wolf
2022-10-07 10:47 ` [PULL 33/50] job.c: add job_lock/unlock while keeping job.h intact Kevin Wolf
2022-10-07 10:47 ` [PULL 34/50] job: move and update comments from blockjob.c Kevin Wolf
2022-10-07 10:47 ` [PULL 35/50] blockjob: introduce block_job _locked() APIs Kevin Wolf
2022-10-07 10:47 ` Kevin Wolf [this message]
2022-10-07 10:47 ` [PULL 37/50] jobs: use job locks also in the unit tests Kevin Wolf
2022-10-07 10:47 ` [PULL 38/50] block/mirror.c: use of job helpers in drivers Kevin Wolf
2022-10-07 10:47 ` [PULL 39/50] jobs: group together API calls under the same job lock Kevin Wolf
2022-10-07 10:47 ` [PULL 40/50] job: detect change of aiocontext within job coroutine Kevin Wolf
2022-10-07 10:47 ` [PULL 41/50] jobs: protect job.aio_context with BQL and job_mutex Kevin Wolf
2022-10-07 10:47 ` [PULL 42/50] blockjob.h: categorize fields in struct BlockJob Kevin Wolf
2022-10-07 10:47 ` [PULL 43/50] blockjob: rename notifier callbacks as _locked Kevin Wolf
2022-10-07 10:47 ` [PULL 44/50] blockjob: protect iostatus field in BlockJob struct Kevin Wolf
2022-10-07 10:47 ` [PULL 45/50] job.h: categorize JobDriver callbacks that need the AioContext lock Kevin Wolf
2022-10-07 10:47 ` [PULL 46/50] job.c: enable job lock/unlock and remove Aiocontext locks Kevin Wolf
2022-10-07 10:47 ` [PULL 47/50] block_job_query: remove atomic read Kevin Wolf
2022-10-07 10:47 ` [PULL 48/50] blockjob: remove unused functions Kevin Wolf
2022-10-07 10:47 ` [PULL 49/50] job: " Kevin Wolf
2022-10-07 10:47 ` [PULL 50/50] file-posix: Remove unused s->discard_zeroes Kevin Wolf
2022-10-12 21:25 ` [PULL 00/50] Block layer patches Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221007104752.141361-37-kwolf@redhat.com \
    --to=kwolf@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).