* [PULL 1/3] migration: Revert mapped-ram multifd support to fd: URI
2024-03-22 16:14 [PULL 0/3] Migration 20240322 patches peterx
@ 2024-03-22 16:14 ` peterx
2024-03-22 16:14 ` [PULL 2/3] migration/postcopy: Fix high frequency sync peterx
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: peterx @ 2024-03-22 16:14 UTC (permalink / raw)
To: qemu-devel; +Cc: Fabiano Rosas, Daniel P . Berrangé, peterx
From: Fabiano Rosas <farosas@suse.de>
This reverts commit decdc76772c453ff1444612e910caa0d45cd8eac in full
and also the relevant migration-tests from
7a09f092834641b7a793d50a3a261073bbb404a6.
After the addition of the new QAPI-based migration address API in 8.2
we've been converting an "fd:" URI into a SocketAddress, missing the
fact that the "fd:" syntax could also be used for a plain file instead
of a socket. This is a problem because the SocketAddress is part of
the API, so we're effectively asking users to create a "socket"
channel to pass in a plain file.
The easiest way to fix this situation is to deprecate the usage of
both SocketAddress and "fd:" when used with a plain file for
migration. Since this has been possible since 8.2, we can wait until
9.1 to deprecate it.
For 9.0, however, we should avoid adding further support to migration
to a plain file using the old "fd:" syntax or the new SocketAddress
API, and instead require the usage of either the old-style "file:" URI
or the FileMigrationArgs::filename field of the new API with the
"/dev/fdset/NN" syntax, both of which are already supported.
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Link: https://lore.kernel.org/r/20240319210941.1907-1-farosas@suse.de
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/fd.h | 2 --
migration/fd.c | 56 ++++--------------------------------
migration/file.c | 19 ++----------
migration/migration.c | 13 ---------
migration/multifd.c | 2 --
tests/qtest/migration-test.c | 43 ---------------------------
6 files changed, 8 insertions(+), 127 deletions(-)
diff --git a/migration/fd.h b/migration/fd.h
index 0c0a18d9e7..b901bc014e 100644
--- a/migration/fd.h
+++ b/migration/fd.h
@@ -20,6 +20,4 @@ void fd_start_incoming_migration(const char *fdname, Error **errp);
void fd_start_outgoing_migration(MigrationState *s, const char *fdname,
Error **errp);
-void fd_cleanup_outgoing_migration(void);
-int fd_args_get_fd(void);
#endif
diff --git a/migration/fd.c b/migration/fd.c
index fe0d096abd..449adaa2de 100644
--- a/migration/fd.c
+++ b/migration/fd.c
@@ -15,42 +15,19 @@
*/
#include "qemu/osdep.h"
-#include "qapi/error.h"
#include "channel.h"
#include "fd.h"
#include "file.h"
#include "migration.h"
#include "monitor/monitor.h"
-#include "io/channel-file.h"
-#include "io/channel-socket.h"
#include "io/channel-util.h"
-#include "options.h"
#include "trace.h"
-static struct FdOutgoingArgs {
- int fd;
-} outgoing_args;
-
-int fd_args_get_fd(void)
-{
- return outgoing_args.fd;
-}
-
-void fd_cleanup_outgoing_migration(void)
-{
- if (outgoing_args.fd > 0) {
- close(outgoing_args.fd);
- outgoing_args.fd = -1;
- }
-}
-
void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp)
{
QIOChannel *ioc;
int fd = monitor_get_fd(monitor_cur(), fdname, errp);
- int newfd;
-
if (fd == -1) {
return;
}
@@ -62,18 +39,6 @@ void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **
return;
}
- /*
- * This is dup()ed just to avoid referencing an fd that might
- * be already closed by the iochannel.
- */
- newfd = dup(fd);
- if (newfd == -1) {
- error_setg_errno(errp, errno, "Could not dup FD %d", fd);
- object_unref(ioc);
- return;
- }
- outgoing_args.fd = newfd;
-
qio_channel_set_name(ioc, "migration-fd-outgoing");
migration_channel_connect(s, ioc, NULL, NULL);
object_unref(OBJECT(ioc));
@@ -104,20 +69,9 @@ void fd_start_incoming_migration(const char *fdname, Error **errp)
return;
}
- if (migrate_multifd()) {
- if (fd_is_socket(fd)) {
- error_setg(errp,
- "Multifd migration to a socket FD is not supported");
- object_unref(ioc);
- return;
- }
-
- file_create_incoming_channels(ioc, errp);
- } else {
- qio_channel_set_name(ioc, "migration-fd-incoming");
- qio_channel_add_watch_full(ioc, G_IO_IN,
- fd_accept_incoming_migration,
- NULL, NULL,
- g_main_context_get_thread_default());
- }
+ qio_channel_set_name(ioc, "migration-fd-incoming");
+ qio_channel_add_watch_full(ioc, G_IO_IN,
+ fd_accept_incoming_migration,
+ NULL, NULL,
+ g_main_context_get_thread_default());
}
diff --git a/migration/file.c b/migration/file.c
index b6e8ba13f2..ab18ba505a 100644
--- a/migration/file.c
+++ b/migration/file.c
@@ -11,7 +11,6 @@
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "channel.h"
-#include "fd.h"
#include "file.h"
#include "migration.h"
#include "io/channel-file.h"
@@ -55,27 +54,15 @@ bool file_send_channel_create(gpointer opaque, Error **errp)
{
QIOChannelFile *ioc;
int flags = O_WRONLY;
- bool ret = false;
- int fd = fd_args_get_fd();
-
- if (fd && fd != -1) {
- if (fd_is_socket(fd)) {
- error_setg(errp,
- "Multifd migration to a socket FD is not supported");
- goto out;
- }
-
- ioc = qio_channel_file_new_dupfd(fd, errp);
- } else {
- ioc = qio_channel_file_new_path(outgoing_args.fname, flags, 0, errp);
- }
+ bool ret = true;
+ ioc = qio_channel_file_new_path(outgoing_args.fname, flags, 0, errp);
if (!ioc) {
+ ret = false;
goto out;
}
multifd_channel_connect(opaque, QIO_CHANNEL(ioc));
- ret = true;
out:
/*
diff --git a/migration/migration.c b/migration/migration.c
index f60bd371e3..047b6b49cf 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -140,10 +140,6 @@ static bool transport_supports_multi_channels(MigrationAddress *addr)
if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
SocketAddress *saddr = &addr->u.socket;
- if (saddr->type == SOCKET_ADDRESS_TYPE_FD) {
- return migrate_mapped_ram();
- }
-
return (saddr->type == SOCKET_ADDRESS_TYPE_INET ||
saddr->type == SOCKET_ADDRESS_TYPE_UNIX ||
saddr->type == SOCKET_ADDRESS_TYPE_VSOCK);
@@ -165,15 +161,6 @@ static bool transport_supports_seeking(MigrationAddress *addr)
return true;
}
- /*
- * At this point QEMU has not yet fetched the fd passed in by the
- * user, so we cannot know for sure whether it refers to a plain
- * file or a socket. Let it through anyway and check at fd.c.
- */
- if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
- return addr->u.socket.type == SOCKET_ADDRESS_TYPE_FD;
- }
-
return false;
}
diff --git a/migration/multifd.c b/migration/multifd.c
index 0179422f6d..d2f0238f70 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -18,7 +18,6 @@
#include "exec/ramblock.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-#include "fd.h"
#include "file.h"
#include "migration.h"
#include "migration-stats.h"
@@ -794,7 +793,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
static void multifd_send_cleanup_state(void)
{
file_cleanup_outgoing_migration();
- fd_cleanup_outgoing_migration();
socket_cleanup_outgoing_migration();
qemu_sem_destroy(&multifd_send_state->channels_created);
qemu_sem_destroy(&multifd_send_state->channels_ready);
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index 71895abb7f..1d2cee87ea 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -2536,13 +2536,6 @@ static void *migrate_precopy_fd_file_start(QTestState *from, QTestState *to)
return NULL;
}
-static void *migrate_fd_file_mapped_ram_start(QTestState *from, QTestState *to)
-{
- migrate_mapped_ram_start(from, to);
-
- return migrate_precopy_fd_file_start(from, to);
-}
-
static void test_migrate_precopy_fd_file(void)
{
MigrateCommon args = {
@@ -2553,36 +2546,6 @@ static void test_migrate_precopy_fd_file(void)
};
test_file_common(&args, true);
}
-
-static void test_migrate_precopy_fd_file_mapped_ram(void)
-{
- MigrateCommon args = {
- .listen_uri = "defer",
- .connect_uri = "fd:fd-mig",
- .start_hook = migrate_fd_file_mapped_ram_start,
- .finish_hook = test_migrate_fd_finish_hook
- };
- test_file_common(&args, true);
-}
-
-static void *migrate_multifd_fd_mapped_ram_start(QTestState *from,
- QTestState *to)
-{
- migrate_multifd_mapped_ram_start(from, to);
- return migrate_precopy_fd_file_start(from, to);
-}
-
-static void test_multifd_fd_mapped_ram(void)
-{
- MigrateCommon args = {
- .connect_uri = "fd:fd-mig",
- .listen_uri = "defer",
- .start_hook = migrate_multifd_fd_mapped_ram_start,
- .finish_hook = test_migrate_fd_finish_hook
- };
-
- test_file_common(&args, true);
-}
#endif /* _WIN32 */
static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
@@ -3687,10 +3650,6 @@ int main(int argc, char **argv)
test_multifd_file_mapped_ram);
migration_test_add("/migration/multifd/file/mapped-ram/live",
test_multifd_file_mapped_ram_live);
-#ifndef _WIN32
- migration_test_add("/migration/multifd/fd/mapped-ram",
- test_multifd_fd_mapped_ram);
-#endif
#ifdef CONFIG_GNUTLS
migration_test_add("/migration/precopy/unix/tls/psk",
@@ -3753,8 +3712,6 @@ int main(int argc, char **argv)
test_migrate_precopy_fd_socket);
migration_test_add("/migration/precopy/fd/file",
test_migrate_precopy_fd_file);
- migration_test_add("/migration/precopy/fd/file/mapped-ram",
- test_migrate_precopy_fd_file_mapped_ram);
#endif
migration_test_add("/migration/validate_uuid", test_validate_uuid);
migration_test_add("/migration/validate_uuid_error",
--
2.44.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PULL 2/3] migration/postcopy: Fix high frequency sync
2024-03-22 16:14 [PULL 0/3] Migration 20240322 patches peterx
2024-03-22 16:14 ` [PULL 1/3] migration: Revert mapped-ram multifd support to fd: URI peterx
@ 2024-03-22 16:14 ` peterx
2024-03-22 16:14 ` [PULL 3/3] migration/multifd: Fix clearing of mapped-ram zero pages peterx
2024-03-25 16:46 ` [PULL 0/3] Migration 20240322 patches Peter Maydell
3 siblings, 0 replies; 5+ messages in thread
From: peterx @ 2024-03-22 16:14 UTC (permalink / raw)
To: qemu-devel
Cc: Fabiano Rosas, Daniel P . Berrangé, peterx,
Nina Schoetterl-Glausch
From: Peter Xu <peterx@redhat.com>
With current code base I can observe extremely high sync count during
precopy, as long as one enables postcopy-ram=on before switchover to
postcopy.
To provide some context of when QEMU decides to do a full sync: it checks
must_precopy (which implies "data must be sent during precopy phase"), and
as long as it is lower than the threshold size we calculated (out of
bandwidth and expected downtime) QEMU will kick off the slow/exact sync.
However, when postcopy is enabled (even if still during precopy phase), RAM
only reports all pages as can_postcopy, and report must_precopy==0. Then
"must_precopy <= threshold_size" mostly always triggers and enforces a slow
sync for every call to migration_iteration_run() when postcopy is enabled
even if not used. That is insane.
It turns out it was a regress bug introduced in the previous refactoring in
8.0 as reported by Nina [1]:
(a) c8df4a7aef ("migration: Split save_live_pending() into state_pending_*")
Then a workaround patch is applied at the end of release (8.0-rc4) to fix it:
(b) 28ef5339c3 ("migration: fix ram_state_pending_exact()")
However that "workaround" was overlooked when during the cleanup in this
9.0 release in this commit..
(c) b0504edd40 ("migration: Drop unnecessary check in ram's pending_exact()")
Then the issue was re-exposed as reported by Nina [1].
The problem with (b) is that it only fixed the case for RAM, rather than
all the rest of iterators. Here a slow sync should only be required if all
dirty data (precopy+postcopy) is less than the threshold_size that QEMU
calculated. It is even debatable whether a sync is needed when switched to
postcopy. Currently ram_state_pending_exact() will be mostly noop if
switched to postcopy, and that logic seems to apply too for all the rest of
iterators, as sync dirty bitmap during a postcopy doesn't make much sense.
However let's leave such change for later, as we're in rc phase.
So rather than reusing commit (b), this patch provides the complete fix for
all iterators. When at it, cleanup a little bit on the lines around.
[1] https://gitlab.com/qemu-project/qemu/-/issues/1565
Reported-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Fixes: b0504edd40 ("migration: Drop unnecessary check in ram's pending_exact()")
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Link: https://lore.kernel.org/r/20240320214453.584374-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/migration.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 047b6b49cf..9fe8fd2afd 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -3199,17 +3199,16 @@ typedef enum {
*/
static MigIterateState migration_iteration_run(MigrationState *s)
{
- uint64_t must_precopy, can_postcopy;
+ uint64_t must_precopy, can_postcopy, pending_size;
Error *local_err = NULL;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
bool can_switchover = migration_can_switchover(s);
qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
- uint64_t pending_size = must_precopy + can_postcopy;
-
+ pending_size = must_precopy + can_postcopy;
trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
- if (must_precopy <= s->threshold_size) {
+ if (pending_size < s->threshold_size) {
qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
pending_size = must_precopy + can_postcopy;
trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
--
2.44.0
^ permalink raw reply related [flat|nested] 5+ messages in thread