qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: qemu-block@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
	Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>,
	Fam Zheng <fam@euphon.net>, Cleber Rosa <crosa@redhat.com>,
	Eric Blake <eblake@redhat.com>,
	Li Zhijian <lizhijian@fujitsu.com>, Peter Xu <peterx@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	John Snow <jsnow@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	Leonardo Bras <leobras@redhat.com>,
	Laurent Vivier <lvivier@redhat.com>,
	Fabiano Rosas <farosas@suse.de>, Thomas Huth <thuth@redhat.com>
Subject: [PULL 16/38] migration/rdma: Unfold ram_control_after_iterate()
Date: Mon, 16 Oct 2023 12:06:44 +0200	[thread overview]
Message-ID: <20231016100706.2551-17-quintela@redhat.com> (raw)
In-Reply-To: <20231016100706.2551-1-quintela@redhat.com>

Once there:
- Remove unused data parameter
- unfold it in its callers
- change all callers to call qemu_rdma_registration_stop()
- We need to call QIO_CHANNEL_RDMA() after we check for migrate_rdma()

Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231011203527.9061-4-quintela@redhat.com>
---
 migration/qemu-file.h |  2 --
 migration/rdma.h      |  3 +++
 migration/qemu-file.c | 12 ------------
 migration/ram.c       | 17 ++++++++++++++---
 migration/rdma.c      |  9 ++++-----
 5 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index d6a370c569..35e671a01e 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -55,7 +55,6 @@ typedef int (QEMURamSaveFunc)(QEMUFile *f,
                               size_t size);
 
 typedef struct QEMUFileHooks {
-    QEMURamHookFunc *after_ram_iterate;
     QEMURamHookFunc *hook_ram_load;
     QEMURamSaveFunc *save_page;
 } QEMUFileHooks;
@@ -126,7 +125,6 @@ void qemu_fflush(QEMUFile *f);
 void qemu_file_set_blocking(QEMUFile *f, bool block);
 int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size);
 
-void ram_control_after_iterate(QEMUFile *f, uint64_t flags);
 void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data);
 
 /* Whenever this is found in the data stream, the flags
diff --git a/migration/rdma.h b/migration/rdma.h
index 670c67a8cb..c13b94c782 100644
--- a/migration/rdma.h
+++ b/migration/rdma.h
@@ -25,8 +25,11 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp);
 
 #ifdef CONFIG_RDMA
 int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags);
+int qemu_rdma_registration_stop(QEMUFile *f, uint64_t flags);
 #else
 static inline
 int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags) { return 0; }
+static inline
+int qemu_rdma_registration_stop(QEMUFile *f, uint64_t flags) { return 0; }
 #endif
 #endif
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index 5e2d73fd68..e7dba2a849 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -298,18 +298,6 @@ void qemu_fflush(QEMUFile *f)
     f->iovcnt = 0;
 }
 
-void ram_control_after_iterate(QEMUFile *f, uint64_t flags)
-{
-    int ret = 0;
-
-    if (f->hooks && f->hooks->after_ram_iterate) {
-        ret = f->hooks->after_ram_iterate(f, flags, NULL);
-        if (ret < 0) {
-            qemu_file_set_error(f, ret);
-        }
-    }
-}
-
 void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data)
 {
     if (f->hooks && f->hooks->hook_ram_load) {
diff --git a/migration/ram.c b/migration/ram.c
index 6592431a4e..f1ddc1f9fa 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3065,7 +3065,11 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     if (ret < 0) {
         qemu_file_set_error(f, ret);
     }
-    ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+
+    ret = qemu_rdma_registration_stop(f, RAM_CONTROL_SETUP);
+    if (ret < 0) {
+        qemu_file_set_error(f, ret);
+    }
 
     migration_ops = g_malloc0(sizeof(MigrationOps));
     migration_ops->ram_save_target_page = ram_save_target_page_legacy;
@@ -3187,7 +3191,10 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
      * Must occur before EOS (or any QEMUFile operation)
      * because of RDMA protocol.
      */
-    ram_control_after_iterate(f, RAM_CONTROL_ROUND);
+    ret = qemu_rdma_registration_stop(f, RAM_CONTROL_ROUND);
+    if (ret < 0) {
+        qemu_file_set_error(f, ret);
+    }
 
 out:
     if (ret >= 0
@@ -3260,7 +3267,11 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
         qemu_mutex_unlock(&rs->bitmap_mutex);
 
         ram_flush_compressed_data(rs);
-        ram_control_after_iterate(f, RAM_CONTROL_FINISH);
+
+        int ret = qemu_rdma_registration_stop(f, RAM_CONTROL_FINISH);
+        if (ret < 0) {
+            qemu_file_set_error(f, ret);
+        }
     }
 
     if (ret < 0) {
diff --git a/migration/rdma.c b/migration/rdma.c
index 3d74ad6db0..4b32d375ec 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3878,20 +3878,20 @@ int qemu_rdma_registration_start(QEMUFile *f, uint64_t flags)
  * Inform dest that dynamic registrations are done for now.
  * First, flush writes, if any.
  */
-static int qemu_rdma_registration_stop(QEMUFile *f,
-                                       uint64_t flags, void *data)
+int qemu_rdma_registration_stop(QEMUFile *f, uint64_t flags)
 {
-    QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
+    QIOChannelRDMA *rioc;
     Error *err = NULL;
     RDMAContext *rdma;
     RDMAControlHeader head = { .len = 0, .repeat = 1 };
     int ret;
 
-    if (migration_in_postcopy()) {
+    if (!migrate_rdma() || migration_in_postcopy()) {
         return 0;
     }
 
     RCU_READ_LOCK_GUARD();
+    rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
     rdma = qatomic_rcu_read(&rioc->rdmaout);
     if (!rdma) {
         return -1;
@@ -3999,7 +3999,6 @@ static const QEMUFileHooks rdma_read_hooks = {
 };
 
 static const QEMUFileHooks rdma_write_hooks = {
-    .after_ram_iterate  = qemu_rdma_registration_stop,
     .save_page          = qemu_rdma_save_page,
 };
 
-- 
2.41.0



  parent reply	other threads:[~2023-10-16 10:09 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-16 10:06 [PULL 00/38] Migration 20231016 patches Juan Quintela
2023-10-16 10:06 ` [PULL 01/38] migration: refactor migration_completion Juan Quintela
2023-10-16 10:06 ` [PULL 02/38] migration: Use g_autofree to simplify ram_dirty_bitmap_reload() Juan Quintela
2023-10-16 10:06 ` [PULL 03/38] migration: Allow user to specify available switchover bandwidth Juan Quintela
2023-10-16 10:06 ` [PULL 04/38] migration: fix RAMBlock add NULL check Juan Quintela
2023-10-16 10:06 ` [PULL 05/38] migration: Add the configuration vmstate to the json writer Juan Quintela
2023-10-16 10:06 ` [PULL 06/38] migration: Fix analyze-migration.py 'configuration' parsing Juan Quintela
2023-10-16 10:06 ` [PULL 07/38] migration: Add capability parsing to analyze-migration.py Juan Quintela
2023-10-16 10:06 ` [PULL 08/38] migration: Fix analyze-migration.py when ignore-shared is used Juan Quintela
2023-10-16 10:06 ` [PULL 09/38] migration: Fix analyze-migration read operation signedness Juan Quintela
2023-10-16 10:06 ` [PULL 10/38] tests/qtest/migration: Add a test for the analyze-migration script Juan Quintela
2023-10-16 10:06 ` [PULL 11/38] tests/qtest: migration-test: Add tests for file-based migration Juan Quintela
2023-10-16 18:25   ` Fabiano Rosas
2023-10-17  7:21     ` Juan Quintela
2023-10-17 12:30       ` Fabiano Rosas
2023-10-17 12:55         ` Juan Quintela
2023-10-17 13:19           ` Fabiano Rosas
2023-10-17 13:30             ` Juan Quintela
2023-10-16 10:06 ` [PULL 12/38] migration: hold the BQL during setup Juan Quintela
2023-10-16 10:06 ` [PULL 13/38] migration: Non multifd migration don't care about multifd flushes Juan Quintela
2023-10-16 10:06 ` [PULL 14/38] migration: Create migrate_rdma() Juan Quintela
2023-10-16 10:06 ` [PULL 15/38] migration/rdma: Unfold ram_control_before_iterate() Juan Quintela
2023-10-16 10:06 ` Juan Quintela [this message]
2023-10-16 10:06 ` [PULL 17/38] migration/rdma: Remove all uses of RAM_CONTROL_HOOK Juan Quintela
2023-10-16 10:06 ` [PULL 18/38] migration/rdma: Unfold hook_ram_load() Juan Quintela
2023-10-16 10:06 ` [PULL 19/38] migration/rdma: Create rdma_control_save_page() Juan Quintela
2023-10-16 10:06 ` [PULL 20/38] qemu-file: Remove QEMUFileHooks Juan Quintela
2023-10-16 10:06 ` [PULL 21/38] migration/rdma: Move rdma constants from qemu-file.h to rdma.h Juan Quintela
2023-10-16 10:06 ` [PULL 22/38] migration/rdma: Remove qemu_ prefix from exported functions Juan Quintela
2023-10-16 10:06 ` [PULL 23/38] migration/rdma: Check sooner if we are in postcopy for save_page() Juan Quintela
2023-10-16 10:06 ` [PULL 24/38] migration/rdma: Use i as for index instead of idx Juan Quintela
2023-10-16 10:06 ` [PULL 25/38] migration/rdma: Declare for index variables local Juan Quintela
2023-10-16 10:06 ` [PULL 26/38] migration/rdma: Remove all "ret" variables that are used only once Juan Quintela
2023-10-16 10:06 ` [PULL 27/38] migration: Improve json and formatting Juan Quintela
2023-10-16 10:06 ` [PULL 28/38] migration: check for rate_limit_max for RATE_LIMIT_DISABLED Juan Quintela
2023-10-16 10:06 ` [PULL 29/38] multifd: fix counters in multifd_send_thread Juan Quintela
2023-10-16 10:06 ` [PULL 30/38] multifd: reset next_packet_len after sending pages Juan Quintela
2023-10-16 10:06 ` [PULL 31/38] migration/ram: Refactor precopy ram loading code Juan Quintela
2023-10-16 10:07 ` [PULL 32/38] migration/ram: Remove RAMState from xbzrle_cache_zero_page Juan Quintela
2023-10-16 10:07 ` [PULL 33/38] migration/ram: Stop passing QEMUFile around in save_zero_page Juan Quintela
2023-10-16 10:07 ` [PULL 34/38] migration/ram: Move xbzrle zero page handling into save_zero_page Juan Quintela
2023-10-16 10:07 ` [PULL 35/38] migration/ram: Merge save_zero_page functions Juan Quintela
2023-10-16 10:07 ` [PULL 36/38] migration/multifd: Remove direct "socket" references Juan Quintela
2023-10-16 10:07 ` [PULL 37/38] migration/multifd: Unify multifd_send_thread error paths Juan Quintela
2023-10-16 10:07 ` [PULL 38/38] migration/multifd: Clarify Error usage in multifd_channel_connect Juan Quintela
2023-10-16 16:31 ` [PULL 00/38] Migration 20231016 patches Stefan Hajnoczi
2023-10-16 17:13   ` Fabiano Rosas
2023-10-16 19:18     ` Stefan Hajnoczi
2023-10-16 20:31       ` Fabiano Rosas
2023-10-17  7:24   ` Juan Quintela
2023-10-17  8:20     ` Thomas Huth
  -- strict thread matches above, loose matches on Subject: below --
2023-10-17  8:29 [PULL 00/38] Migration 20231017 patches Juan Quintela
2023-10-17  8:29 ` [PULL 16/38] migration/rdma: Unfold ram_control_after_iterate() Juan Quintela

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231016100706.2551-17-quintela@redhat.com \
    --to=quintela@redhat.com \
    --cc=armbru@redhat.com \
    --cc=crosa@redhat.com \
    --cc=eblake@redhat.com \
    --cc=fam@euphon.net \
    --cc=farosas@suse.de \
    --cc=jsnow@redhat.com \
    --cc=leobras@redhat.com \
    --cc=lizhijian@fujitsu.com \
    --cc=lvivier@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=thuth@redhat.com \
    --cc=vsementsov@yandex-team.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).