qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
To: qemu-block@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>,
	John Snow <jsnow@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>,
	Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>,
	Stefan Hajnoczi <stefanha@redhat.com>, Fam Zheng <fam@euphon.net>,
	Eric Blake <eblake@redhat.com>, Cleber Rosa <crosa@redhat.com>,
	qemu-devel@nongnu.org,
	Emanuele Giuseppe Esposito <eesposit@redhat.com>
Subject: [PATCH 02/20] graph-lock: introduce BdrvGraphRWlock structure
Date: Wed, 16 Nov 2022 08:48:32 -0500	[thread overview]
Message-ID: <20221116134850.3051419-3-eesposit@redhat.com> (raw)
In-Reply-To: <20221116134850.3051419-1-eesposit@redhat.com>

Just a wrapper to simplify what is available to the struct AioContext.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 block/graph-lock.c         | 59 ++++++++++++++++++++++++++------------
 include/block/aio.h        | 12 ++++----
 include/block/graph-lock.h |  1 +
 3 files changed, 48 insertions(+), 24 deletions(-)

diff --git a/block/graph-lock.c b/block/graph-lock.c
index b608a89d7c..c3c6eeedad 100644
--- a/block/graph-lock.c
+++ b/block/graph-lock.c
@@ -44,12 +44,23 @@ static uint32_t orphaned_reader_count;
 /* Queue of readers waiting for the writer to finish */
 static CoQueue reader_queue;
 
+struct BdrvGraphRWlock {
+    /* How many readers are currently reading the graph. */
+    uint32_t reader_count;
+
+    /*
+     * List of BdrvGraphRWlock kept in graph-lock.c
+     * Protected by aio_context_list_lock
+     */
+    QTAILQ_ENTRY(BdrvGraphRWlock) next_aio;
+};
+
 /*
- * List of AioContext. This list ensures that each AioContext
+ * List of BdrvGraphRWlock. This list ensures that each BdrvGraphRWlock
  * can safely modify only its own counter, avoid reading/writing
  * others and thus improving performances by avoiding cacheline bounces.
  */
-static QTAILQ_HEAD(, AioContext) aio_context_list =
+static QTAILQ_HEAD(, BdrvGraphRWlock) aio_context_list =
     QTAILQ_HEAD_INITIALIZER(aio_context_list);
 
 static void __attribute__((__constructor__)) bdrv_init_graph_lock(void)
@@ -60,29 +71,31 @@ static void __attribute__((__constructor__)) bdrv_init_graph_lock(void)
 
 void register_aiocontext(AioContext *ctx)
 {
+    ctx->bdrv_graph = g_new0(BdrvGraphRWlock, 1);
     QEMU_LOCK_GUARD(&aio_context_list_lock);
-    assert(ctx->reader_count == 0);
-    QTAILQ_INSERT_TAIL(&aio_context_list, ctx, next_aio);
+    assert(ctx->bdrv_graph->reader_count == 0);
+    QTAILQ_INSERT_TAIL(&aio_context_list, ctx->bdrv_graph, next_aio);
 }
 
 void unregister_aiocontext(AioContext *ctx)
 {
     QEMU_LOCK_GUARD(&aio_context_list_lock);
-    orphaned_reader_count += ctx->reader_count;
-    QTAILQ_REMOVE(&aio_context_list, ctx, next_aio);
+    orphaned_reader_count += ctx->bdrv_graph->reader_count;
+    QTAILQ_REMOVE(&aio_context_list, ctx->bdrv_graph, next_aio);
+    g_free(ctx->bdrv_graph);
 }
 
 static uint32_t reader_count(void)
 {
-    AioContext *ctx;
+    BdrvGraphRWlock *brdv_graph;
     uint32_t rd;
 
     QEMU_LOCK_GUARD(&aio_context_list_lock);
 
     /* rd can temporarly be negative, but the total will *always* be >= 0 */
     rd = orphaned_reader_count;
-    QTAILQ_FOREACH(ctx, &aio_context_list, next_aio) {
-        rd += qatomic_read(&ctx->reader_count);
+    QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) {
+        rd += qatomic_read(&brdv_graph->reader_count);
     }
 
     /* shouldn't overflow unless there are 2^31 readers */
@@ -138,12 +151,17 @@ void bdrv_graph_wrunlock(void)
 
 void coroutine_fn bdrv_graph_co_rdlock(void)
 {
-    AioContext *aiocontext;
-    aiocontext = qemu_get_current_aio_context();
+    BdrvGraphRWlock *bdrv_graph;
+    bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
+
+    /* Do not lock if in main thread */
+    if (qemu_in_main_thread()) {
+        return;
+    }
 
     for (;;) {
-        qatomic_set(&aiocontext->reader_count,
-                    aiocontext->reader_count + 1);
+        qatomic_set(&bdrv_graph->reader_count,
+                    bdrv_graph->reader_count + 1);
         /* make sure writer sees reader_count before we check has_writer */
         smp_mb();
 
@@ -192,7 +210,7 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
             }
 
             /* slow path where reader sleeps */
-            aiocontext->reader_count--;
+            bdrv_graph->reader_count--;
             aio_wait_kick();
             qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
         }
@@ -201,11 +219,16 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
 
 void coroutine_fn bdrv_graph_co_rdunlock(void)
 {
-    AioContext *aiocontext;
-    aiocontext = qemu_get_current_aio_context();
+    BdrvGraphRWlock *bdrv_graph;
+    bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
+
+    /* Do not lock if in main thread */
+    if (qemu_in_main_thread()) {
+        return;
+    }
 
-    qatomic_store_release(&aiocontext->reader_count,
-                          aiocontext->reader_count - 1);
+    qatomic_store_release(&bdrv_graph->reader_count,
+                          bdrv_graph->reader_count - 1);
     /* make sure writer sees reader_count before we check has_writer */
     smp_mb();
 
diff --git a/include/block/aio.h b/include/block/aio.h
index 8e64f81d01..0f65a3cc9e 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -22,6 +22,7 @@
 #include "qemu/event_notifier.h"
 #include "qemu/thread.h"
 #include "qemu/timer.h"
+#include "block/graph-lock.h"
 
 typedef struct BlockAIOCB BlockAIOCB;
 typedef void BlockCompletionFunc(void *opaque, int ret);
@@ -127,14 +128,13 @@ struct AioContext {
     /* Used by AioContext users to protect from multi-threaded access.  */
     QemuRecMutex lock;
 
-    /* How many readers in this AioContext are currently reading the graph. */
-    uint32_t reader_count;
-
     /*
-     * List of AioContext kept in graph-lock.c
-     * Protected by aio_context_list_lock
+     * Keep track of readers and writers of the block layer graph.
+     * This is essential to avoid performing additions and removal
+     * of nodes and edges from block graph while some
+     * other thread is traversing it.
      */
-    QTAILQ_ENTRY(AioContext) next_aio;
+    BdrvGraphRWlock *bdrv_graph;
 
     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
     AioHandlerList aio_handlers;
diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h
index f975312bb6..fc806aefa3 100644
--- a/include/block/graph-lock.h
+++ b/include/block/graph-lock.h
@@ -53,6 +53,7 @@
  * reader count. In that case we transfer the count to a global shared counter
  * so that the writer is always aware of all readers.
  */
+typedef struct BdrvGraphRWlock BdrvGraphRWlock;
 
 /*
  * register_aiocontext:
-- 
2.31.1



  parent reply	other threads:[~2022-11-16 13:50 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-16 13:48 [PATCH 00/20] Protect the block layer with a rwlock: part 1 Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 01/20] block: introduce a lock to protect graph operations Emanuele Giuseppe Esposito
2022-11-16 13:48 ` Emanuele Giuseppe Esposito [this message]
2022-11-16 13:48 ` [PATCH 03/20] async: register/unregister aiocontext in graph lock list Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 04/20] block.c: wrlock in bdrv_replace_child_noperm Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 05/20] block: remove unnecessary assert_bdrv_graph_writable() Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 06/20] block: assert that graph read and writes are performed correctly Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 07/20] graph-lock: implement WITH_GRAPH_RDLOCK_GUARD and GRAPH_RDLOCK_GUARD macros Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 08/20] block-coroutine-wrapper.py: take the graph rdlock in bdrv_* functions Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 09/20] block-backend: introduce new generated_co_wrapper_blk annotation Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 10/20] block-gen: assert that {bdrv/blk}_co_truncate is always called with graph rdlock taken Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 11/20] block-gen: assert that bdrv_co_{check/invalidate_cache} are " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 12/20] block-gen: assert that bdrv_co_pwrite is " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 13/20] block-gen: assert that bdrv_co_pwrite_{zeros/sync} " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 14/20] block-gen: assert that bdrv_co_pread " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 15/20] block-gen: assert that {bdrv/blk}_co_flush " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 16/20] block-gen: assert that bdrv_co_{read/write}v_vmstate are " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 17/20] block-gen: assert that bdrv_co_pdiscard is " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 18/20] block-gen: assert that bdrv_co_common_block_status_above " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 19/20] block-gen: assert that bdrv_co_ioctl " Emanuele Giuseppe Esposito
2022-11-16 13:48 ` [PATCH 20/20] block-gen: assert that nbd_co_do_establish_connection " Emanuele Giuseppe Esposito
2022-11-21 15:02 ` [PATCH 00/20] Protect the block layer with a rwlock: part 1 Emanuele Giuseppe Esposito

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221116134850.3051419-3-eesposit@redhat.com \
    --to=eesposit@redhat.com \
    --cc=crosa@redhat.com \
    --cc=eblake@redhat.com \
    --cc=fam@euphon.net \
    --cc=hreitz@redhat.com \
    --cc=jsnow@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=vsementsov@yandex-team.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).