From: "Richard W.M. Jones" <rjones@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, vsementsov@yandex-team.ru,
eblake@redhat.com, hreitz@redhat.com, kwolf@redhat.com
Subject: [PATCH nbd 4/4] nbd: Enable multi-conn using round-robin
Date: Thu, 9 Mar 2023 11:39:46 +0000 [thread overview]
Message-ID: <20230309113946.1528247-5-rjones@redhat.com> (raw)
In-Reply-To: <20230309113946.1528247-1-rjones@redhat.com>
Enable NBD multi-conn by spreading operations across multiple
connections.
(XXX) This uses a naive round-robin approach which could be improved.
For example we could look at how many requests are in flight and
assign operations to the connections with fewest. Or we could try to
estimate (based on size of requests outstanding) the load on each
connection. But this implementation doesn't do any of that.
Signed-off-by: Richard W.M. Jones <rjones@redhat.com>
---
block/nbd.c | 67 +++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 49 insertions(+), 18 deletions(-)
diff --git a/block/nbd.c b/block/nbd.c
index 4c99c3f865..df32ba67ed 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -1232,6 +1232,26 @@ static int coroutine_fn nbd_co_request(NBDConnState *cs, NBDRequest *request,
return ret ? ret : request_ret;
}
+/*
+ * If multi-conn, choose a connection for this operation.
+ */
+static NBDConnState *choose_connection(BDRVNBDState *s)
+{
+ static size_t next;
+ size_t i;
+
+ if (s->multi_conn <= 1) {
+ return s->conns[0];
+ }
+
+ /* XXX Stupid simple round robin. */
+ i = qatomic_fetch_inc(&next);
+ i %= s->multi_conn;
+
+ assert(s->conns[i] != NULL);
+ return s->conns[i];
+}
+
static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
@@ -1244,7 +1264,7 @@ static int coroutine_fn nbd_client_co_preadv(BlockDriverState *bs, int64_t offse
.from = offset,
.len = bytes,
};
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
assert(bytes <= NBD_MAX_BUFFER_SIZE);
@@ -1301,7 +1321,7 @@ static int coroutine_fn nbd_client_co_pwritev(BlockDriverState *bs, int64_t offs
.from = offset,
.len = bytes,
};
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
assert(!(cs->info.flags & NBD_FLAG_READ_ONLY));
if (flags & BDRV_REQ_FUA) {
@@ -1326,7 +1346,7 @@ static int coroutine_fn nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_
.from = offset,
.len = bytes, /* .len is uint32_t actually */
};
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
@@ -1357,7 +1377,13 @@ static int coroutine_fn nbd_client_co_flush(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = { .type = NBD_CMD_FLUSH };
- NBDConnState * const cs = s->conns[0];
+
+ /*
+ * Multi-conn (if used) guarantees that flushing on any connection
+ * flushes caches on all connections, so we can perform this
+ * operation on any.
+ */
+ NBDConnState * const cs = choose_connection(s);
if (!(cs->info.flags & NBD_FLAG_SEND_FLUSH)) {
return 0;
@@ -1378,7 +1404,7 @@ static int coroutine_fn nbd_client_co_pdiscard(BlockDriverState *bs, int64_t off
.from = offset,
.len = bytes, /* len is uint32_t */
};
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
@@ -1398,7 +1424,7 @@ static int coroutine_fn nbd_client_co_block_status(
NBDExtent extent = { 0 };
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
Error *local_err = NULL;
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
NBDRequest request = {
.type = NBD_CMD_BLOCK_STATUS,
@@ -2027,7 +2053,7 @@ static int coroutine_fn nbd_co_flush(BlockDriverState *bs)
static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
uint32_t min = cs->info.min_block;
uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, cs->info.max_block);
@@ -2085,7 +2111,7 @@ static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
BdrvRequestFlags flags, Error **errp)
{
BDRVNBDState *s = bs->opaque;
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
if (offset != cs->info.size && exact) {
error_setg(errp, "Cannot resize NBD nodes");
@@ -2168,24 +2194,29 @@ static const char *const nbd_strong_runtime_opts[] = {
static void nbd_cancel_in_flight(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
- NBDConnState * const cs = s->conns[0];
+ size_t i;
+ NBDConnState *cs;
- reconnect_delay_timer_del(cs);
+ for (i = 0; i < MAX_MULTI_CONN; ++i) {
+ cs = s->conns[i];
- qemu_mutex_lock(&cs->requests_lock);
- if (cs->state == NBD_CLIENT_CONNECTING_WAIT) {
- cs->state = NBD_CLIENT_CONNECTING_NOWAIT;
+ reconnect_delay_timer_del(cs);
+
+ qemu_mutex_lock(&cs->requests_lock);
+ if (cs->state == NBD_CLIENT_CONNECTING_WAIT) {
+ cs->state = NBD_CLIENT_CONNECTING_NOWAIT;
+ }
+ qemu_mutex_unlock(&cs->requests_lock);
+
+ nbd_co_establish_connection_cancel(cs->conn);
}
- qemu_mutex_unlock(&cs->requests_lock);
-
- nbd_co_establish_connection_cancel(cs->conn);
}
static void nbd_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BDRVNBDState *s = bs->opaque;
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
/* The open_timer is used only during nbd_open() */
assert(!cs->open_timer);
@@ -2209,7 +2240,7 @@ static void nbd_attach_aio_context(BlockDriverState *bs,
static void nbd_detach_aio_context(BlockDriverState *bs)
{
BDRVNBDState *s = bs->opaque;
- NBDConnState * const cs = s->conns[0];
+ NBDConnState * const cs = choose_connection(s);
assert(!cs->open_timer);
assert(!cs->reconnect_delay_timer);
--
2.39.2
next prev parent reply other threads:[~2023-03-09 11:41 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-09 11:39 [PATCH nbd 0/4] Enable multi-conn NBD [for discussion only] Richard W.M. Jones
2023-03-09 11:39 ` [PATCH nbd 1/4] nbd: Add multi-conn option Richard W.M. Jones
2023-03-10 22:17 ` Eric Blake
2023-03-10 22:29 ` Richard W.M. Jones
2023-03-09 11:39 ` [PATCH nbd 2/4] nbd: Split out block device state from underlying NBD connections Richard W.M. Jones
2023-03-14 12:13 ` Eric Blake
2023-03-09 11:39 ` [PATCH nbd 3/4] nbd: Open multiple NBD connections if multi-conn is set Richard W.M. Jones
2023-03-09 11:39 ` Richard W.M. Jones [this message]
2023-03-10 17:43 ` [PATCH nbd 0/4] Enable multi-conn NBD [for discussion only] Vladimir Sementsov-Ogievskiy
2023-03-10 19:04 ` Eric Blake
2023-03-10 19:19 ` Richard W.M. Jones
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230309113946.1528247-5-rjones@redhat.com \
--to=rjones@redhat.com \
--cc=eblake@redhat.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=vsementsov@yandex-team.ru \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).