qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fabiano Rosas <farosas@suse.de>
To: peterx@redhat.com, qemu-devel@nongnu.org
Cc: Hao Xiang <hao.xiang@bytedance.com>,
	Bryan Zhang <bryan.zhang@bytedance.com>,
	peterx@redhat.com, Avihai Horon <avihaih@nvidia.com>,
	Yuan Liu <yuan1.liu@intel.com>,
	Prasad Pandit <ppandit@redhat.com>
Subject: Re: [PATCH v2 19/23] migration/multifd: Cleanup multifd_save_cleanup()
Date: Fri, 02 Feb 2024 17:54:23 -0300	[thread overview]
Message-ID: <87a5oih9j4.fsf@suse.de> (raw)
In-Reply-To: <20240202102857.110210-20-peterx@redhat.com>

peterx@redhat.com writes:

> From: Peter Xu <peterx@redhat.com>
>
> Shrink the function by moving relevant works into helpers: move the thread
> join()s into multifd_send_terminate_threads(), then create two more helpers
> to cover channel/state cleanups.
>
> Add a TODO entry for the thread terminate process because p->running is
> still buggy.  We need to fix it at some point but not yet covered.
>
> Suggested-by: Fabiano Rosas <farosas@suse.de>
> Signed-off-by: Peter Xu <peterx@redhat.com>

Reviewed-by: Fabiano Rosas <farosas@suse.de>

minor comment below

> ---
>  migration/multifd.c | 91 +++++++++++++++++++++++++++++----------------
>  1 file changed, 59 insertions(+), 32 deletions(-)
>
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 4ab8e6eff2..4cb0d2cc17 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -593,6 +593,11 @@ static void multifd_send_terminate_threads(void)
>       * always set it.
>       */
>      qatomic_set(&multifd_send_state->exiting, 1);
> +
> +    /*
> +     * Firstly, kick all threads out; no matter whether they are just idle,
> +     * or blocked in an IO system call.
> +     */
>      for (i = 0; i < migrate_multifd_channels(); i++) {
>          MultiFDSendParams *p = &multifd_send_state->params[i];
>  
> @@ -601,6 +606,21 @@ static void multifd_send_terminate_threads(void)
>              qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
>          }
>      }
> +
> +    /*
> +     * Finally recycle all the threads.
> +     *
> +     * TODO: p->running is still buggy, e.g. we can reach here without the
> +     * corresponding multifd_new_send_channel_async() get invoked yet,
> +     * then a new thread can even be created after this function returns.
> +     */

Series on the list:

https://lore.kernel.org/r/20240202191128.1901-1-farosas@suse.de

> +    for (i = 0; i < migrate_multifd_channels(); i++) {
> +        MultiFDSendParams *p = &multifd_send_state->params[i];
> +
> +        if (p->running) {
> +            qemu_thread_join(&p->thread);
> +        }
> +    }
>  }
>  
>  static int multifd_send_channel_destroy(QIOChannel *send)
> @@ -608,6 +628,41 @@ static int multifd_send_channel_destroy(QIOChannel *send)
>      return socket_send_channel_destroy(send);
>  }
>  
> +static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
> +{
> +    if (p->registered_yank) {
> +        migration_ioc_unregister_yank(p->c);
> +    }
> +    multifd_send_channel_destroy(p->c);
> +    p->c = NULL;
> +    qemu_mutex_destroy(&p->mutex);
> +    qemu_sem_destroy(&p->sem);
> +    qemu_sem_destroy(&p->sem_sync);
> +    g_free(p->name);
> +    p->name = NULL;
> +    multifd_pages_clear(p->pages);
> +    p->pages = NULL;
> +    p->packet_len = 0;
> +    g_free(p->packet);
> +    p->packet = NULL;
> +    g_free(p->iov);
> +    p->iov = NULL;
> +    multifd_send_state->ops->send_cleanup(p, errp);
> +
> +    return *errp == NULL;

I think technically this would require the ERRP_GUARD() macro?

> +}
> +
> +static void multifd_send_cleanup_state(void)
> +{
> +    qemu_sem_destroy(&multifd_send_state->channels_ready);
> +    g_free(multifd_send_state->params);
> +    multifd_send_state->params = NULL;
> +    multifd_pages_clear(multifd_send_state->pages);
> +    multifd_send_state->pages = NULL;
> +    g_free(multifd_send_state);
> +    multifd_send_state = NULL;
> +}
> +
>  void multifd_save_cleanup(void)
>  {
>      int i;
> @@ -615,48 +670,20 @@ void multifd_save_cleanup(void)
>      if (!migrate_multifd()) {
>          return;
>      }
> +
>      multifd_send_terminate_threads();
> -    for (i = 0; i < migrate_multifd_channels(); i++) {
> -        MultiFDSendParams *p = &multifd_send_state->params[i];
>  
> -        if (p->running) {
> -            qemu_thread_join(&p->thread);
> -        }
> -    }
>      for (i = 0; i < migrate_multifd_channels(); i++) {
>          MultiFDSendParams *p = &multifd_send_state->params[i];
>          Error *local_err = NULL;
>  
> -        if (p->registered_yank) {
> -            migration_ioc_unregister_yank(p->c);
> -        }
> -        multifd_send_channel_destroy(p->c);
> -        p->c = NULL;
> -        qemu_mutex_destroy(&p->mutex);
> -        qemu_sem_destroy(&p->sem);
> -        qemu_sem_destroy(&p->sem_sync);
> -        g_free(p->name);
> -        p->name = NULL;
> -        multifd_pages_clear(p->pages);
> -        p->pages = NULL;
> -        p->packet_len = 0;
> -        g_free(p->packet);
> -        p->packet = NULL;
> -        g_free(p->iov);
> -        p->iov = NULL;
> -        multifd_send_state->ops->send_cleanup(p, &local_err);
> -        if (local_err) {
> +        if (!multifd_send_cleanup_channel(p, &local_err)) {
>              migrate_set_error(migrate_get_current(), local_err);
>              error_free(local_err);
>          }
>      }
> -    qemu_sem_destroy(&multifd_send_state->channels_ready);
> -    g_free(multifd_send_state->params);
> -    multifd_send_state->params = NULL;
> -    multifd_pages_clear(multifd_send_state->pages);
> -    multifd_send_state->pages = NULL;
> -    g_free(multifd_send_state);
> -    multifd_send_state = NULL;
> +
> +    multifd_send_cleanup_state();
>  }
>  
>  static int multifd_zero_copy_flush(QIOChannel *c)


  reply	other threads:[~2024-02-02 20:54 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-02 10:28 [PATCH v2 00/23] migration/multifd: Refactor ->send_prepare() and cleanups peterx
2024-02-02 10:28 ` [PATCH v2 01/23] migration/multifd: Drop stale comment for multifd zero copy peterx
2024-02-02 10:28 ` [PATCH v2 02/23] migration/multifd: multifd_send_kick_main() peterx
2024-02-02 10:28 ` [PATCH v2 03/23] migration/multifd: Drop MultiFDSendParams.quit, cleanup error paths peterx
2024-02-02 19:15   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 04/23] migration/multifd: Postpone reset of MultiFDPages_t peterx
2024-02-02 10:28 ` [PATCH v2 05/23] migration/multifd: Drop MultiFDSendParams.normal[] array peterx
2024-02-09  0:06   ` [External] " Hao Xiang
2024-02-09 12:20     ` Fabiano Rosas
2024-02-14  2:16       ` Hao Xiang
2024-02-14 17:17         ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 06/23] migration/multifd: Separate SYNC request with normal jobs peterx
2024-02-02 19:21   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 07/23] migration/multifd: Simplify locking in sender thread peterx
2024-02-02 19:23   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 08/23] migration/multifd: Drop pages->num check " peterx
2024-02-02 10:28 ` [PATCH v2 09/23] migration/multifd: Rename p->num_packets and clean it up peterx
2024-02-02 10:28 ` [PATCH v2 10/23] migration/multifd: Move total_normal_pages accounting peterx
2024-02-02 10:28 ` [PATCH v2 11/23] migration/multifd: Move trace_multifd_send|recv() peterx
2024-02-02 10:28 ` [PATCH v2 12/23] migration/multifd: multifd_send_prepare_header() peterx
2024-02-02 10:28 ` [PATCH v2 13/23] migration/multifd: Move header prepare/fill into send_prepare() peterx
2024-02-02 19:26   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 14/23] migration/multifd: Forbid spurious wakeups peterx
2024-02-02 10:28 ` [PATCH v2 15/23] migration/multifd: Split multifd_send_terminate_threads() peterx
2024-02-02 19:28   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 16/23] migration/multifd: Change retval of multifd_queue_page() peterx
2024-02-02 19:29   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 17/23] migration/multifd: Change retval of multifd_send_pages() peterx
2024-02-02 19:30   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 18/23] migration/multifd: Rewrite multifd_queue_page() peterx
2024-02-02 20:47   ` Fabiano Rosas
2024-02-05  4:03     ` Peter Xu
2024-02-02 10:28 ` [PATCH v2 19/23] migration/multifd: Cleanup multifd_save_cleanup() peterx
2024-02-02 20:54   ` Fabiano Rosas [this message]
2024-02-05  4:25     ` Peter Xu
2024-02-02 10:28 ` [PATCH v2 20/23] migration/multifd: Cleanup multifd_load_cleanup() peterx
2024-02-02 20:55   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 21/23] migration/multifd: Stick with send/recv on function names peterx
2024-02-02 21:03   ` Fabiano Rosas
2024-02-02 10:28 ` [PATCH v2 22/23] migration/multifd: Fix MultiFDSendParams.packet_num race peterx
2024-02-02 21:08   ` Fabiano Rosas
2024-02-05  4:05     ` Peter Xu
2024-02-02 10:28 ` [PATCH v2 23/23] migration/multifd: Optimize sender side to be lockless peterx
2024-02-02 21:34   ` Fabiano Rosas
2024-02-05  4:35     ` Peter Xu
2024-02-05 14:10       ` Fabiano Rosas
2024-02-05 14:24         ` Peter Xu
2024-02-05 17:59           ` Fabiano Rosas
2024-02-06  3:05 ` [PATCH v2 00/23] migration/multifd: Refactor ->send_prepare() and cleanups Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87a5oih9j4.fsf@suse.de \
    --to=farosas@suse.de \
    --cc=avihaih@nvidia.com \
    --cc=bryan.zhang@bytedance.com \
    --cc=hao.xiang@bytedance.com \
    --cc=peterx@redhat.com \
    --cc=ppandit@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=yuan1.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).