qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Peter Xu <peterx@redhat.com>
To: Fabiano Rosas <farosas@suse.de>
Cc: qemu-devel@nongnu.org,
	"Maciej S . Szmigiero" <mail@maciej.szmigiero.name>
Subject: Re: [RFC PATCH v2 9/9] migration/multifd: Replace multifd_send_state->pages with client data
Date: Mon, 22 Jul 2024 15:55:14 -0400	[thread overview]
Message-ID: <Zp65Iqiz_rRleE_Q@x1n> (raw)
In-Reply-To: <20240722175914.24022-10-farosas@suse.de>

On Mon, Jul 22, 2024 at 02:59:14PM -0300, Fabiano Rosas wrote:
> Multifd currently has a simple scheduling mechanism that distributes
> work to the various channels by keeping storage space within each
> channel and an extra space that is given to the client. Each time the
> client fills the space with data and calls into multifd, that space is
> given to the next idle channel and a free storage space is taken from
> the channel and given to client for the next iteration.
> 
> This means we always need (#multifd_channels + 1) memory slots to
> operate multifd.
> 
> This is fine, except that the presence of this one extra memory slot
> doesn't allow different types of payloads to be processed at the same
> time in different channels, i.e. the data type of
> multifd_send_state->pages needs to be the same as p->pages.
> 
> For each new data type different from MultiFDPage_t that is to be
> handled, this logic would need to be duplicated by adding new fields
> to multifd_send_state, to the channels and to multifd_send_pages().
> 
> Fix this situation by moving the extra slot into the client and using
> only the generic type MultiFDSendData in the multifd core.
> 
> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> ---
>  migration/multifd.c | 58 ++++++++++++++++++++++++++++-----------------
>  migration/multifd.h |  2 ++
>  migration/ram.c     |  1 +
>  3 files changed, 39 insertions(+), 22 deletions(-)
> 
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 4394ca6ade..0a85951d58 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -49,7 +49,6 @@ typedef struct {
>  
>  struct {
>      MultiFDSendParams *params;
> -    MultiFDSendData *data;
>      /*
>       * Global number of generated multifd packets.
>       *
> @@ -97,6 +96,8 @@ struct {
>      MultiFDMethods *ops;
>  } *multifd_recv_state;
>  
> +/* TODO: move these to multifd-ram.c */
> +static MultiFDSendData *multifd_ram_send;
>  static size_t multifd_ram_payload_size(void)
>  {
>      uint32_t n = MULTIFD_PACKET_SIZE / qemu_target_page_size();
> @@ -118,6 +119,14 @@ static MultiFDSendData *multifd_send_data_alloc(void)
>      return g_malloc0(sizeof(MultiFDPayloadType) + max_payload_size);
>  }
>  
> +void multifd_ram_save_setup(void)
> +{
> +    uint32_t n = MULTIFD_PACKET_SIZE / qemu_target_page_size();
> +
> +    multifd_ram_send = multifd_send_data_alloc();
> +    multifd_ram_send->u.ram.allocated = n;

IIUC this line won't help, as the type is still NONE.. We may need to reset
this in multifd_pages_reset() even if it's a constant to RAM code.

Side note: looks like multifd_ram_send is leaked across the patch.

> +}
> +
>  static bool multifd_use_packets(void)
>  {
>      return !migrate_mapped_ram();
> @@ -620,7 +629,7 @@ static void multifd_send_kick_main(MultiFDSendParams *p)
>   *
>   * Returns true if succeed, false otherwise.
>   */
> -static bool multifd_send_pages(void)
> +static bool multifd_send(MultiFDSendData **send_data)
>  {
>      int i;
>      static int next_channel;
> @@ -661,11 +670,16 @@ static bool multifd_send_pages(void)
>       */
>      smp_mb_acquire();
>  
> -    assert(!p->data->u.ram.num);
> +    assert(multifd_payload_empty(p->data));
>  
> -    tmp = multifd_send_state->data;
> -    multifd_send_state->data = p->data;
> +    /*
> +     * Swap the pointers. The channel gets the client data for
> +     * transferring and the client gets back an unused data slot.
> +     */
> +    tmp = *send_data;
> +    *send_data = p->data;
>      p->data = tmp;
> +
>      /*
>       * Making sure p->data is setup before marking pending_job=true. Pairs
>       * with the qatomic_load_acquire() in multifd_send_thread().
> @@ -697,7 +711,12 @@ bool multifd_queue_page(RAMBlock *block, ram_addr_t offset)
>      MultiFDPages_t *pages;
>  
>  retry:
> -    pages = &multifd_send_state->data->u.ram;
> +    pages = &multifd_ram_send->u.ram;
> +
> +    if (multifd_payload_empty(multifd_ram_send)) {
> +        multifd_pages_reset(pages);
> +        multifd_set_payload_type(multifd_ram_send, MULTIFD_PAYLOAD_RAM);
> +    }
>  
>      /* If the queue is empty, we can already enqueue now */
>      if (multifd_queue_empty(pages)) {
> @@ -715,7 +734,7 @@ retry:
>       * After flush, always retry.
>       */
>      if (pages->block != block || multifd_queue_full(pages)) {
> -        if (!multifd_send_pages()) {
> +        if (!multifd_send(&multifd_ram_send)) {
>              return false;
>          }
>          goto retry;
> @@ -833,6 +852,8 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
>      g_free(p->packet);
>      p->packet = NULL;
>      multifd_send_state->ops->send_cleanup(p, errp);
> +    g_free(p->data);
> +    p->data = NULL;

These two lines look superfluous.

>  
>      return *errp == NULL;
>  }
> @@ -845,8 +866,6 @@ static void multifd_send_cleanup_state(void)
>      qemu_sem_destroy(&multifd_send_state->channels_ready);
>      g_free(multifd_send_state->params);
>      multifd_send_state->params = NULL;
> -    g_free(multifd_send_state->data);
> -    multifd_send_state->data = NULL;
>      g_free(multifd_send_state);
>      multifd_send_state = NULL;
>  }
> @@ -895,15 +914,14 @@ int multifd_send_sync_main(void)
>  {
>      int i;
>      bool flush_zero_copy;
> -    MultiFDPages_t *pages;
>  
>      if (!migrate_multifd()) {
>          return 0;
>      }
> -    pages = &multifd_send_state->data->u.ram;
> -    if (pages->num) {
> -        if (!multifd_send_pages()) {
> -            error_report("%s: multifd_send_pages fail", __func__);
> +
> +    if (!multifd_payload_empty(multifd_ram_send)) {
> +        if (!multifd_send(&multifd_ram_send)) {
> +            error_report("%s: multifd_send fail", __func__);
>              return -1;
>          }
>      }
> @@ -977,13 +995,11 @@ static void *multifd_send_thread(void *opaque)
>  
>          /*
>           * Read pending_job flag before p->data.  Pairs with the
> -         * qatomic_store_release() in multifd_send_pages().
> +         * qatomic_store_release() in multifd_send().
>           */
>          if (qatomic_load_acquire(&p->pending_job)) {
> -            MultiFDPages_t *pages = &p->data->u.ram;
> -
>              p->iovs_num = 0;
> -            assert(pages->num);
> +            assert(!multifd_payload_empty(p->data));
>  
>              ret = multifd_send_state->ops->send_prepare(p, &local_err);
>              if (ret != 0) {
> @@ -1006,13 +1022,13 @@ static void *multifd_send_thread(void *opaque)
>              stat64_add(&mig_stats.multifd_bytes,
>                         p->next_packet_size + p->packet_len);
>  
> -            multifd_pages_reset(pages);
>              p->next_packet_size = 0;
> +            multifd_set_payload_type(p->data, MULTIFD_PAYLOAD_NONE);
>  
>              /*
>               * Making sure p->data is published before saying "we're
>               * free".  Pairs with the smp_mb_acquire() in
> -             * multifd_send_pages().
> +             * multifd_send().
>               */
>              qatomic_store_release(&p->pending_job, false);
>          } else {
> @@ -1206,8 +1222,6 @@ bool multifd_send_setup(void)
>      thread_count = migrate_multifd_channels();
>      multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
>      multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
> -    multifd_send_state->data = multifd_send_data_alloc();
> -    multifd_send_state->data->u.ram.allocated = page_count;
>      qemu_sem_init(&multifd_send_state->channels_created, 0);
>      qemu_sem_init(&multifd_send_state->channels_ready, 0);
>      qatomic_set(&multifd_send_state->exiting, 0);
> diff --git a/migration/multifd.h b/migration/multifd.h
> index c9c01579a0..04c000f435 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -113,6 +113,8 @@ static inline void multifd_set_payload_type(MultiFDSendData *data,
>      data->type = type;
>  }
>  
> +void multifd_ram_save_setup(void);
> +
>  typedef struct {
>      /* Fields are only written at creating/deletion time */
>      /* No lock required for them, they are read only */
> diff --git a/migration/ram.c b/migration/ram.c
> index edec1a2d07..2b90396b3c 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -3058,6 +3058,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
>      migration_ops = g_malloc0(sizeof(MigrationOps));
>  
>      if (migrate_multifd()) {
> +        multifd_ram_save_setup();
>          migration_ops->ram_save_target_page = ram_save_target_page_multifd;
>      } else {
>          migration_ops->ram_save_target_page = ram_save_target_page_legacy;
> -- 
> 2.35.3
> 

-- 
Peter Xu



  reply	other threads:[~2024-07-22 19:56 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-22 17:59 [RFC PATCH v2 0/9] migration/multifd: Remove multifd_send_state->pages Fabiano Rosas
2024-07-22 17:59 ` [RFC PATCH v2 1/9] migration/multifd: Reduce access to p->pages Fabiano Rosas
2024-07-22 17:59 ` [RFC PATCH v2 2/9] migration/multifd: Pass in MultiFDPages_t to file_write_ramblock_iov Fabiano Rosas
2024-07-22 19:21   ` Peter Xu
2024-07-22 17:59 ` [RFC PATCH v2 3/9] migration/multifd: Introduce MultiFDSendData Fabiano Rosas
2024-07-22 19:22   ` Peter Xu
2024-07-22 17:59 ` [RFC PATCH v2 4/9] migration/multifd: Make MultiFDPages_t:offset a flexible array member Fabiano Rosas
2024-07-22 19:20   ` Peter Xu
2024-07-22 20:36     ` Fabiano Rosas
2024-07-22 17:59 ` [RFC PATCH v2 5/9] migration/multifd: Replace p->pages with an union pointer Fabiano Rosas
2024-07-22 17:59 ` [RFC PATCH v2 6/9] migration/multifd: Move pages accounting into multifd_send_zero_page_detect() Fabiano Rosas
2024-07-22 19:29   ` Peter Xu
2024-07-22 20:07     ` Fabiano Rosas
2024-07-22 20:38       ` Peter Xu
2024-07-22 17:59 ` [RFC PATCH v2 7/9] migration/multifd: Isolate ram pages packet data Fabiano Rosas
2024-07-22 19:37   ` Peter Xu
2024-07-22 20:34     ` Fabiano Rosas
2024-07-22 21:06       ` Peter Xu
2024-07-24  9:30         ` Maciej S. Szmigiero
2024-07-22 17:59 ` [RFC PATCH v2 8/9] migration/multifd: Don't send ram data during SYNC Fabiano Rosas
2024-07-22 21:03   ` Peter Xu
2024-07-22 21:24     ` Fabiano Rosas
2024-07-22 17:59 ` [RFC PATCH v2 9/9] migration/multifd: Replace multifd_send_state->pages with client data Fabiano Rosas
2024-07-22 19:55   ` Peter Xu [this message]
2024-07-22 20:26     ` Fabiano Rosas
2024-07-22 20:41       ` Peter Xu
2024-07-22 19:58 ` [RFC PATCH v2 0/9] migration/multifd: Remove multifd_send_state->pages Peter Xu
2024-07-22 20:21   ` Fabiano Rosas
2024-07-22 20:54     ` Peter Xu
2024-07-22 21:20       ` Fabiano Rosas
2024-07-22 23:01         ` Peter Xu
2024-07-23 17:48           ` Fabiano Rosas
2024-07-23 18:20             ` Peter Xu
2024-07-23 20:50               ` Fabiano Rosas
2024-07-23 21:11                 ` Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Zp65Iqiz_rRleE_Q@x1n \
    --to=peterx@redhat.com \
    --cc=farosas@suse.de \
    --cc=mail@maciej.szmigiero.name \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).