From: Yuan Liu <yuan1.liu@intel.com>
To: peterx@redhat.com, farosas@suse.de
Cc: qemu-devel@nongnu.org, hao.xiang@bytedance.com,
bryan.zhang@bytedance.com, yuan1.liu@intel.com,
nanhai.zou@intel.com
Subject: [PATCH v4 2/8] migration/multifd: add get_iov_count in the multifd method
Date: Mon, 4 Mar 2024 22:00:22 +0800 [thread overview]
Message-ID: <20240304140028.1590649-3-yuan1.liu@intel.com> (raw)
In-Reply-To: <20240304140028.1590649-1-yuan1.liu@intel.com>
the new function get_iov_count is used to get the number of
IOVs required by a specified multifd method
Different multifd methods may require different numbers of IOVs.
Based on streaming compression of zlib and zstd, all pages will be
compressed to a data block, so an IOV is required to send this data
block. For no compression, each IOV is used to send a page, so the
number of IOVs required is the same as the number of pages.
Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
---
migration/multifd-zlib.c | 18 +++++++++++++++++-
migration/multifd-zstd.c | 18 +++++++++++++++++-
migration/multifd.c | 24 +++++++++++++++++++++---
migration/multifd.h | 2 ++
4 files changed, 57 insertions(+), 5 deletions(-)
diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index 012e3bdea1..35187f2aff 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -313,13 +313,29 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
return 0;
}
+/**
+ * zlib_get_iov_count: get the count of IOVs
+ *
+ * For zlib streaming compression, all pages will be compressed into a data
+ * block, and an IOV is requested for sending this block.
+ *
+ * Returns the count of the IOVs
+ *
+ * @page_count: Indicate the maximum count of pages processed by multifd
+ */
+static uint32_t zlib_get_iov_count(uint32_t page_count)
+{
+ return 1;
+}
+
static MultiFDMethods multifd_zlib_ops = {
.send_setup = zlib_send_setup,
.send_cleanup = zlib_send_cleanup,
.send_prepare = zlib_send_prepare,
.recv_setup = zlib_recv_setup,
.recv_cleanup = zlib_recv_cleanup,
- .recv_pages = zlib_recv_pages
+ .recv_pages = zlib_recv_pages,
+ .get_iov_count = zlib_get_iov_count
};
static void multifd_zlib_register(void)
diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index dc8fe43e94..25ed1add2a 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -304,13 +304,29 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
return 0;
}
+/**
+ * zstd_get_iov_count: get the count of IOVs
+ *
+ * For zstd streaming compression, all pages will be compressed into a data
+ * block, and an IOV is requested for sending this block.
+ *
+ * Returns the count of the IOVs
+ *
+ * @page_count: Indicate the maximum count of pages processed by multifd
+ */
+static uint32_t zstd_get_iov_count(uint32_t page_count)
+{
+ return 1;
+}
+
static MultiFDMethods multifd_zstd_ops = {
.send_setup = zstd_send_setup,
.send_cleanup = zstd_send_cleanup,
.send_prepare = zstd_send_prepare,
.recv_setup = zstd_recv_setup,
.recv_cleanup = zstd_recv_cleanup,
- .recv_pages = zstd_recv_pages
+ .recv_pages = zstd_recv_pages,
+ .get_iov_count = zstd_get_iov_count
};
static void multifd_zstd_register(void)
diff --git a/migration/multifd.c b/migration/multifd.c
index adfe8c9a0a..787402247e 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -209,13 +209,29 @@ static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
}
+/**
+ * nocomp_get_iov_count: get the count of IOVs
+ *
+ * For no compression, the count of IOVs required is the same as the count of
+ * pages
+ *
+ * Returns the count of the IOVs
+ *
+ * @page_count: Indicate the maximum count of pages processed by multifd
+ */
+static uint32_t nocomp_get_iov_count(uint32_t page_count)
+{
+ return page_count;
+}
+
static MultiFDMethods multifd_nocomp_ops = {
.send_setup = nocomp_send_setup,
.send_cleanup = nocomp_send_cleanup,
.send_prepare = nocomp_send_prepare,
.recv_setup = nocomp_recv_setup,
.recv_cleanup = nocomp_recv_cleanup,
- .recv_pages = nocomp_recv_pages
+ .recv_pages = nocomp_recv_pages,
+ .get_iov_count = nocomp_get_iov_count
};
static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {
@@ -998,6 +1014,8 @@ bool multifd_send_setup(void)
Error *local_err = NULL;
int thread_count, ret = 0;
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
+ /* We need one extra place for the packet header */
+ uint32_t iov_count = 1;
uint8_t i;
if (!migrate_multifd()) {
@@ -1012,6 +1030,7 @@ bool multifd_send_setup(void)
qemu_sem_init(&multifd_send_state->channels_ready, 0);
qatomic_set(&multifd_send_state->exiting, 0);
multifd_send_state->ops = multifd_ops[migrate_multifd_compression()];
+ iov_count += multifd_send_state->ops->get_iov_count(page_count);
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
@@ -1026,8 +1045,7 @@ bool multifd_send_setup(void)
p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
p->packet->version = cpu_to_be32(MULTIFD_VERSION);
p->name = g_strdup_printf("multifdsend_%d", i);
- /* We need one extra place for the packet header */
- p->iov = g_new0(struct iovec, page_count + 1);
+ p->iov = g_new0(struct iovec, iov_count);
p->page_size = qemu_target_page_size();
p->page_count = page_count;
p->write_flags = 0;
diff --git a/migration/multifd.h b/migration/multifd.h
index 8a1cad0996..d82495c508 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -201,6 +201,8 @@ typedef struct {
void (*recv_cleanup)(MultiFDRecvParams *p);
/* Read all pages */
int (*recv_pages)(MultiFDRecvParams *p, Error **errp);
+ /* Get the count of required IOVs */
+ uint32_t (*get_iov_count)(uint32_t page_count);
} MultiFDMethods;
void multifd_register_ops(int method, MultiFDMethods *ops);
--
2.39.3
next prev parent reply other threads:[~2024-03-05 5:49 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-04 14:00 [PATCH v4 0/8] Live Migration With IAA Yuan Liu
2024-03-04 14:00 ` [PATCH v4 1/8] docs/migration: add qpl compression feature Yuan Liu
2024-03-04 14:00 ` Yuan Liu [this message]
2024-03-05 20:24 ` [PATCH v4 2/8] migration/multifd: add get_iov_count in the multifd method Fabiano Rosas
2024-03-06 1:16 ` Liu, Yuan1
2024-03-04 14:00 ` [PATCH v4 3/8] configure: add --enable-qpl build option Yuan Liu
2024-03-05 20:32 ` Fabiano Rosas
2024-03-06 2:20 ` Liu, Yuan1
2024-03-06 11:56 ` Fabiano Rosas
2024-03-07 6:45 ` Liu, Yuan1
2024-03-04 14:00 ` [PATCH v4 4/8] migration/multifd: add qpl compression method Yuan Liu
2024-03-05 20:58 ` Fabiano Rosas
2024-03-06 2:29 ` Liu, Yuan1
2024-03-04 14:00 ` [PATCH v4 5/8] migration/multifd: implement initialization of qpl compression Yuan Liu
2024-03-04 14:00 ` [PATCH v4 6/8] migration/multifd: implement qpl compression and decompression Yuan Liu
2024-03-04 14:00 ` [PATCH v4 7/8] migration/multifd: fix zlib and zstd compression levels not working Yuan Liu
2024-03-04 14:00 ` [PATCH v4 8/8] tests/migration-test: add qpl compression test Yuan Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240304140028.1590649-3-yuan1.liu@intel.com \
--to=yuan1.liu@intel.com \
--cc=bryan.zhang@bytedance.com \
--cc=farosas@suse.de \
--cc=hao.xiang@bytedance.com \
--cc=nanhai.zou@intel.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).