From: Hao Xiang <hao.xiang@linux.dev>
To: marcandre.lureau@redhat.com, peterx@redhat.com, farosas@suse.de,
armbru@redhat.com, lvivier@redhat.com, qemu-devel@nongnu.org
Cc: Hao Xiang <hao.xiang@linux.dev>
Subject: [PATCH v4 12/14] migration/multifd: Enable set packet size migration option.
Date: Thu, 25 Apr 2024 02:21:15 +0000 [thread overview]
Message-ID: <20240425022117.4035031-13-hao.xiang@linux.dev> (raw)
In-Reply-To: <20240425022117.4035031-1-hao.xiang@linux.dev>
During live migration, if the latency between sender and receiver
is high and bandwidth is also high (a long and fat pipe), using a bigger
packet size can help reduce migration total time. In addition, Intel
DSA offloading performs better with a large batch task. Providing an
option to set the packet size is useful for performance tuning.
Set the option:
migrate_set_parameter multifd-packet-size 4190208
Signed-off-by: Hao Xiang <hao.xiang@linux.dev>
---
migration/migration-hmp-cmds.c | 7 +++++++
migration/multifd-zlib.c | 6 ++++--
migration/multifd-zstd.c | 6 ++++--
migration/multifd.c | 6 ++++--
migration/multifd.h | 3 ---
5 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 7e9bb278c9..053ad0283a 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -338,6 +338,9 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "%s: %s\n",
MigrationParameter_str(MIGRATION_PARAMETER_BLOCK_INCREMENTAL),
params->block_incremental ? "on" : "off");
+ monitor_printf(mon, "%s: %" PRIu64 "\n",
+ MigrationParameter_str(MIGRATION_PARAMETER_MULTIFD_PACKET_SIZE),
+ params->multifd_packet_size);
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_MULTIFD_CHANNELS),
params->multifd_channels);
@@ -630,6 +633,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
p->multifd_dsa_accel->type = QTYPE_QSTRING;
visit_type_str(v, param, &p->multifd_dsa_accel->u.s, &err);
break;
+ case MIGRATION_PARAMETER_MULTIFD_PACKET_SIZE:
+ p->has_multifd_packet_size = true;
+ visit_type_size(v, param, &p->multifd_packet_size, &err);
+ break;
case MIGRATION_PARAMETER_MULTIFD_CHANNELS:
p->has_multifd_channels = true;
visit_type_uint8(v, param, &p->multifd_channels, &err);
diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index 737a9645d2..2880d35841 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -49,6 +49,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
struct zlib_data *z = g_new0(struct zlib_data, 1);
z_stream *zs = &z->zs;
const char *err_msg;
+ uint64_t multifd_packet_size = migrate_multifd_packet_size();
zs->zalloc = Z_NULL;
zs->zfree = Z_NULL;
@@ -58,7 +59,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
goto err_free_z;
}
/* This is the maximum size of the compressed buffer */
- z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE);
+ z->zbuff_len = compressBound(multifd_packet_size);
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
err_msg = "out of memory for zbuff";
@@ -193,6 +194,7 @@ out:
*/
static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
{
+ uint64_t multifd_packet_size = migrate_multifd_packet_size();
struct zlib_data *z = g_new0(struct zlib_data, 1);
z_stream *zs = &z->zs;
@@ -207,7 +209,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
return -1;
}
/* To be safe, we reserve twice the size of the packet */
- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
+ z->zbuff_len = multifd_packet_size * 2;
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
inflateEnd(zs);
diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index 256858df0a..edc738afbb 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -49,6 +49,7 @@ struct zstd_data {
*/
static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
{
+ uint64_t multifd_packet_size = migrate_multifd_packet_size();
struct zstd_data *z = g_new0(struct zstd_data, 1);
int res;
@@ -69,7 +70,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
return -1;
}
/* This is the maximum size of the compressed buffer */
- z->zbuff_len = ZSTD_compressBound(MULTIFD_PACKET_SIZE);
+ z->zbuff_len = ZSTD_compressBound(multifd_packet_size);
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
ZSTD_freeCStream(z->zcs);
@@ -182,6 +183,7 @@ out:
*/
static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
{
+ uint64_t multifd_packet_size = migrate_multifd_packet_size();
struct zstd_data *z = g_new0(struct zstd_data, 1);
int ret;
@@ -203,7 +205,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
}
/* To be safe, we reserve twice the size of the packet */
- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
+ z->zbuff_len = multifd_packet_size * 2;
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
ZSTD_freeDStream(z->zds);
diff --git a/migration/multifd.c b/migration/multifd.c
index 7316643d0a..2796646087 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -1154,7 +1154,8 @@ bool multifd_send_setup(void)
MigrationState *s = migrate_get_current();
Error *local_err = NULL;
int thread_count, ret = 0;
- uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
+ uint32_t page_count =
+ migrate_multifd_packet_size() / qemu_target_page_size();
bool use_packets = multifd_use_packets();
uint8_t i;
const char *dsa_parameter = migrate_multifd_dsa_accel();
@@ -1577,7 +1578,8 @@ static void *multifd_recv_thread(void *opaque)
int multifd_recv_setup(Error **errp)
{
int thread_count;
- uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
+ uint32_t page_count =
+ migrate_multifd_packet_size() / qemu_target_page_size();
bool use_packets = multifd_use_packets();
uint8_t i;
const char *dsa_parameter = migrate_multifd_dsa_accel();
diff --git a/migration/multifd.h b/migration/multifd.h
index b3717fae24..97d4095b6a 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -42,9 +42,6 @@ MultiFDRecvData *multifd_get_recv_data(void);
#define MULTIFD_FLAG_ZLIB (1 << 1)
#define MULTIFD_FLAG_ZSTD (2 << 1)
-/* This value needs to be a multiple of qemu_target_page_size() */
-#define MULTIFD_PACKET_SIZE (512 * 1024)
-
typedef struct {
uint32_t magic;
uint32_t version;
--
2.30.2
next prev parent reply other threads:[~2024-04-25 2:24 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-25 2:21 [PATCH v4 00/14] Use Intel DSA accelerator to offload zero page checking in multifd live migration Hao Xiang
2024-04-25 2:21 ` [PATCH v4 01/14] meson: Introduce new instruction set enqcmd to the build system Hao Xiang
2024-04-25 18:50 ` Fabiano Rosas
2024-04-25 2:21 ` [PATCH v4 02/14] util/dsa: Add dependency idxd Hao Xiang
2024-04-25 20:33 ` Fabiano Rosas
2024-04-25 2:21 ` [PATCH v4 03/14] util/dsa: Implement DSA device start and stop logic Hao Xiang
2024-04-25 14:21 ` Daniel P. Berrangé
2024-04-25 14:25 ` Daniel P. Berrangé
2024-04-25 14:32 ` Daniel P. Berrangé
2024-04-25 21:22 ` Fabiano Rosas
2024-04-25 2:21 ` [PATCH v4 04/14] util/dsa: Implement DSA task enqueue and dequeue Hao Xiang
2024-04-25 20:55 ` Fabiano Rosas
2024-04-25 21:48 ` Fabiano Rosas
2024-04-25 2:21 ` [PATCH v4 05/14] util/dsa: Implement DSA task asynchronous completion thread model Hao Xiang
2024-04-25 2:21 ` [PATCH v4 06/14] util/dsa: Implement zero page checking in DSA task Hao Xiang
2024-04-25 2:21 ` [PATCH v4 07/14] util/dsa: Implement DSA task asynchronous submission and wait for completion Hao Xiang
2024-05-01 18:59 ` Peter Xu
2024-04-25 2:21 ` [PATCH v4 08/14] migration/multifd: Add new migration option for multifd DSA offloading Hao Xiang
2024-04-25 14:17 ` Daniel P. Berrangé
2024-04-26 9:16 ` Markus Armbruster
2024-04-25 2:21 ` [PATCH v4 09/14] migration/multifd: Prepare to introduce DSA acceleration on the multifd path Hao Xiang
2024-05-01 19:18 ` Peter Xu
2024-04-25 2:21 ` [PATCH v4 10/14] migration/multifd: Enable DSA offloading in multifd sender path Hao Xiang
2024-04-25 14:29 ` Daniel P. Berrangé
2024-04-25 15:39 ` Fabiano Rosas
2024-05-01 19:25 ` Peter Xu
2024-04-25 2:21 ` [PATCH v4 11/14] migration/multifd: Add migration option set packet size Hao Xiang
2024-05-01 19:36 ` Peter Xu
2024-04-25 2:21 ` Hao Xiang [this message]
2024-04-25 2:21 ` [PATCH v4 13/14] util/dsa: Add unit test coverage for Intel DSA task submission and completion Hao Xiang
2024-04-25 2:21 ` [PATCH v4 14/14] migration/multifd: Add integration tests for multifd with Intel DSA offloading Hao Xiang
2024-05-01 19:54 ` [PATCH v4 00/14] Use Intel DSA accelerator to offload zero page checking in multifd live migration Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240425022117.4035031-13-hao.xiang@linux.dev \
--to=hao.xiang@linux.dev \
--cc=armbru@redhat.com \
--cc=farosas@suse.de \
--cc=lvivier@redhat.com \
--cc=marcandre.lureau@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).