From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: Juan Quintela <quintela@redhat.com>, Peter Xu <peterx@redhat.com>,
Leonardo Bras <leobras@redhat.com>
Subject: [PATCH 07/19] migration/rdma: Unflod ram_control_before_iterate()
Date: Thu, 27 Apr 2023 18:34:37 +0200 [thread overview]
Message-ID: <20230427163449.27473-8-quintela@redhat.com> (raw)
In-Reply-To: <20230427163449.27473-1-quintela@redhat.com>
Once there:
- Remove unused data parameter
- unfold it in its callers.
- change all callers to call qemu_rdma_registration_start()
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/qemu-file.c | 13 +------------
migration/qemu-file.h | 2 --
migration/ram.c | 16 +++++++++++++---
migration/rdma.c | 4 +---
migration/rdma.h | 6 ++++++
5 files changed, 21 insertions(+), 20 deletions(-)
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index ee04240a21..b6dca23706 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -30,6 +30,7 @@
#include "qemu-file.h"
#include "trace.h"
#include "qapi/error.h"
+#include "rdma.h"
#define IO_BUF_SIZE 32768
#define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
@@ -314,18 +315,6 @@ void qemu_fflush(QEMUFile *f)
f->iovcnt = 0;
}
-void ram_control_before_iterate(QEMUFile *f, uint64_t flags)
-{
- int ret = 0;
-
- if (f->hooks && f->hooks->before_ram_iterate) {
- ret = f->hooks->before_ram_iterate(f, flags, NULL);
- if (ret < 0) {
- qemu_file_set_error(f, ret);
- }
- }
-}
-
void ram_control_after_iterate(QEMUFile *f, uint64_t flags)
{
int ret = 0;
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index d16cd50448..c898c5c537 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -56,7 +56,6 @@ typedef size_t (QEMURamSaveFunc)(QEMUFile *f,
uint64_t *bytes_sent);
typedef struct QEMUFileHooks {
- QEMURamHookFunc *before_ram_iterate;
QEMURamHookFunc *after_ram_iterate;
QEMURamHookFunc *hook_ram_load;
QEMURamSaveFunc *save_page;
@@ -150,7 +149,6 @@ void qemu_fflush(QEMUFile *f);
void qemu_file_set_blocking(QEMUFile *f, bool block);
int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size);
-void ram_control_before_iterate(QEMUFile *f, uint64_t flags);
void ram_control_after_iterate(QEMUFile *f, uint64_t flags);
void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data);
diff --git a/migration/ram.c b/migration/ram.c
index 7d81c4a39e..ce5dfc3c86 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -58,6 +58,7 @@
#include "qemu/iov.h"
#include "multifd.h"
#include "sysemu/runstate.h"
+#include "rdma.h"
#include "options.h"
#include "hw/boards.h" /* for machine_dump_guest_core() */
@@ -3277,7 +3278,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
}
- ram_control_before_iterate(f, RAM_CONTROL_SETUP);
+ ret = qemu_rdma_registration_start(f, RAM_CONTROL_SETUP);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ }
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
migration_ops = g_malloc0(sizeof(MigrationOps));
@@ -3337,7 +3341,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
/* Read version before ram_list.blocks */
smp_rmb();
- ram_control_before_iterate(f, RAM_CONTROL_ROUND);
+ ret = qemu_rdma_registration_start(f, RAM_CONTROL_ROUND);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ }
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
i = 0;
@@ -3442,7 +3449,10 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
migration_bitmap_sync_precopy(rs);
}
- ram_control_before_iterate(f, RAM_CONTROL_FINISH);
+ ret = qemu_rdma_registration_start(f, RAM_CONTROL_FINISH);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ }
/* try transferring iterative blocks of memory */
diff --git a/migration/rdma.c b/migration/rdma.c
index 7e747b2595..56b7f6901e 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3860,8 +3860,7 @@ static int rdma_load_hook(QEMUFile *f, uint64_t flags, void *data)
}
}
-static int qemu_rdma_registration_start(QEMUFile *f,
- uint64_t flags, void *data)
+int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags)
{
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
RDMAContext *rdma;
@@ -4004,7 +4003,6 @@ static const QEMUFileHooks rdma_read_hooks = {
};
static const QEMUFileHooks rdma_write_hooks = {
- .before_ram_iterate = qemu_rdma_registration_start,
.after_ram_iterate = qemu_rdma_registration_stop,
.save_page = qemu_rdma_save_page,
};
diff --git a/migration/rdma.h b/migration/rdma.h
index de2ba09dc5..901c829c8b 100644
--- a/migration/rdma.h
+++ b/migration/rdma.h
@@ -22,4 +22,10 @@ void rdma_start_outgoing_migration(void *opaque, const char *host_port,
void rdma_start_incoming_migration(const char *host_port, Error **errp);
+
+#ifdef CONFIG_RDMA
+int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags);
+#else
+int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags) { return 0; }
+#endif
#endif
--
2.40.0
next prev parent reply other threads:[~2023-04-27 16:36 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-27 16:34 [PATCH 00/19] migration: Remove QEMUFileHooks Juan Quintela
2023-04-27 16:34 ` [PATCH 01/19] multifd: We already account for this packet on the multifd thread Juan Quintela
2023-04-27 18:23 ` Lukas Straub
2023-04-27 16:34 ` [PATCH 02/19] migration: Move ram_stats to its own file migration-stats.[ch] Juan Quintela
2023-04-27 18:30 ` Lukas Straub
2023-04-27 16:34 ` [PATCH 03/19] migration: Rename ram_counters to mig_stats Juan Quintela
2023-04-27 18:31 ` Lukas Straub
2023-04-27 16:34 ` [PATCH 04/19] migration: Rename RAMStats to MigrationAtomicStats Juan Quintela
2023-04-27 18:58 ` Lukas Straub
2023-04-27 16:34 ` [PATCH 05/19] migration/rdma: Split the zero page case from acct_update_position Juan Quintela
2023-04-27 18:59 ` Lukas Straub
2023-04-27 16:34 ` [PATCH 06/19] migration/rdma: Unfold last user of acct_update_position() Juan Quintela
2023-04-27 19:01 ` Lukas Straub
2023-04-27 16:34 ` Juan Quintela [this message]
2023-04-28 9:06 ` [PATCH 07/19] migration/rdma: Unflod ram_control_before_iterate() Juan Quintela
2023-04-27 16:34 ` [PATCH 08/19] migration/rdma: Unflod ram_control_after_iterate() Juan Quintela
2023-04-27 16:34 ` [PATCH 09/19] migration/rdma: simplify ram_control_load_hook() Juan Quintela
2023-04-27 16:34 ` [PATCH 10/19] migration/rdma: Don't pass the QIOChannelRDMA as an opaque Juan Quintela
2023-04-27 16:34 ` [PATCH 11/19] migration/rdma: We can calculate the rioc from the QEMUFile Juan Quintela
2023-04-27 16:34 ` [PATCH 12/19] migration/rdma: It makes no sense to recive that flag without RDMA Juan Quintela
2023-04-27 16:34 ` [PATCH 13/19] migration: Make RAM_SAVE_FLAG_HOOK a normal case entry Juan Quintela
2023-04-27 16:34 ` [PATCH 14/19] migration/rdma: Remove all uses of RAM_CONTROL_HOOK Juan Quintela
2023-04-27 16:34 ` [PATCH 15/19] migration/rdma: Unfold hook_ram_load() Juan Quintela
2023-04-27 16:34 ` [PATCH 16/19] migration/rdma: Make ram_control_save_page() use exported interfaces Juan Quintela
2023-04-27 16:34 ` [PATCH 17/19] migration/rdma: Create rdma_control_save_page() Juan Quintela
2023-04-27 16:34 ` [PATCH 18/19] qemu-file: Remove QEMUFileHooks Juan Quintela
2023-04-27 16:34 ` [PATCH 19/19] migration/rdma: Move rdma constants from qemu-file.h to rdma.h Juan Quintela
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230427163449.27473-8-quintela@redhat.com \
--to=quintela@redhat.com \
--cc=leobras@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).