qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: Leonardo Bras <leobras@redhat.com>,
	qemu-devel@nongnu.org, Peter Xu <peterx@redhat.com>
Subject: Re: [PATCH v3 21/23] multifd: Support for zero pages transmission
Date: Thu, 2 Dec 2021 11:36:48 +0000	[thread overview]
Message-ID: <Yaiv0PazlhLdsf0O@work-vm> (raw)
In-Reply-To: <20211124100617.19786-22-quintela@redhat.com>

* Juan Quintela (quintela@redhat.com) wrote:
> This patch adds counters and similar.  Logic will be added on the
> following patch.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/multifd.h    | 13 ++++++++++++-
>  migration/multifd.c    | 22 +++++++++++++++++++---
>  migration/trace-events |  2 +-
>  3 files changed, 32 insertions(+), 5 deletions(-)
> 
> diff --git a/migration/multifd.h b/migration/multifd.h
> index 39e55d7f05..973315b545 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -49,7 +49,10 @@ typedef struct {
>      /* size of the next packet that contains pages */
>      uint32_t next_packet_size;
>      uint64_t packet_num;
> -    uint64_t unused[4];    /* Reserved for future use */
> +    /* zero pages */
> +    uint32_t zero_pages;

Had you considered just adding a flag, MULTIFD_FLAG_ZERO to the packet?

> +    uint32_t unused32[1];    /* Reserved for future use */
> +    uint64_t unused64[3];    /* Reserved for future use */
>      char ramblock[256];
>      uint64_t offset[];
>  } __attribute__((packed)) MultiFDPacket_t;
> @@ -117,6 +120,10 @@ typedef struct {
>      ram_addr_t *normal;
>      /* num of non zero pages */
>      uint32_t normal_num;
> +    /* Pages that are  zero */
> +    ram_addr_t *zero;
> +    /* num of zero pages */
> +    uint32_t zero_num;
>      /* used for compression methods */
>      void *data;
>  }  MultiFDSendParams;
> @@ -162,6 +169,10 @@ typedef struct {
>      ram_addr_t *normal;
>      /* num of non zero pages */
>      uint32_t normal_num;
> +    /* Pages that are  zero */
> +    ram_addr_t *zero;
> +    /* num of zero pages */
> +    uint32_t zero_num;
>      /* used for de-compression methods */
>      void *data;
>  } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index d1ab823f98..2e4dffd6c6 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -265,6 +265,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
>      packet->normal_pages = cpu_to_be32(p->normal_num);
>      packet->next_packet_size = cpu_to_be32(p->next_packet_size);
>      packet->packet_num = cpu_to_be64(p->packet_num);
> +    packet->zero_pages = cpu_to_be32(p->zero_num);
>  
>      if (p->pages->block) {
>          strncpy(packet->ramblock, p->pages->block->idstr, 256);
> @@ -327,7 +328,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
>      p->next_packet_size = be32_to_cpu(packet->next_packet_size);
>      p->packet_num = be64_to_cpu(packet->packet_num);
>  
> -    if (p->normal_num == 0) {
> +    p->zero_num = be32_to_cpu(packet->zero_pages);
> +    if (p->zero_num > packet->pages_alloc - p->normal_num) {
> +        error_setg(errp, "multifd: received packet "
> +                   "with %d zero pages and expected maximum pages are %d",
> +                   p->normal_num, packet->pages_alloc - p->zero_num) ;

should that be p->zero_num, packet->pages_alloc - p->normal_num ?
(and be %u)

Dave

> +        return -1;
> +    }
> +
> +    if (p->normal_num == 0 && p->zero_num == 0) {
>          return 0;
>      }
>  
> @@ -550,6 +559,8 @@ void multifd_save_cleanup(void)
>          p->iov = NULL;
>          g_free(p->normal);
>          p->normal = NULL;
> +        g_free(p->zero);
> +        p->zero = NULL;
>          multifd_send_state->ops->send_cleanup(p, &local_err);
>          if (local_err) {
>              migrate_set_error(migrate_get_current(), local_err);
> @@ -638,6 +649,7 @@ static void *multifd_send_thread(void *opaque)
>              uint32_t flags = p->flags;
>              p->iovs_num = 1;
>              p->normal_num = 0;
> +            p->zero_num = 0;
>  
>              for (int i = 0; i < p->pages->num; i++) {
>                  p->normal[p->normal_num] = p->pages->offset[i];
> @@ -659,8 +671,8 @@ static void *multifd_send_thread(void *opaque)
>              p->pages->block = NULL;
>              qemu_mutex_unlock(&p->mutex);
>  
> -            trace_multifd_send(p->id, packet_num, p->normal_num, flags,
> -                               p->next_packet_size);
> +            trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
> +                               flags, p->next_packet_size);
>  
>              p->iov[0].iov_len = p->packet_len;
>              p->iov[0].iov_base = p->packet;
> @@ -910,6 +922,7 @@ int multifd_save_setup(Error **errp)
>          /* We need one extra place for the packet header */
>          p->iov = g_new0(struct iovec, page_count + 1);
>          p->normal = g_new0(ram_addr_t, page_count);
> +        p->zero = g_new0(ram_addr_t, page_count);
>          socket_send_channel_create(multifd_new_send_channel_async, p);
>      }
>  
> @@ -1011,6 +1024,8 @@ int multifd_load_cleanup(Error **errp)
>          p->iov = NULL;
>          g_free(p->normal);
>          p->normal = NULL;
> +        g_free(p->zero);
> +        p->zero = NULL;
>          multifd_recv_state->ops->recv_cleanup(p);
>      }
>      qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1150,6 +1165,7 @@ int multifd_load_setup(Error **errp)
>          p->name = g_strdup_printf("multifdrecv_%d", i);
>          p->iov = g_new0(struct iovec, page_count);
>          p->normal = g_new0(ram_addr_t, page_count);
> +        p->zero = g_new0(ram_addr_t, page_count);
>      }
>  
>      for (i = 0; i < thread_count; i++) {
> diff --git a/migration/trace-events b/migration/trace-events
> index af8dee9af0..608decbdcc 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -124,7 +124,7 @@ multifd_recv_sync_main_wait(uint8_t id) "channel %d"
>  multifd_recv_terminate_threads(bool error) "error %d"
>  multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64
>  multifd_recv_thread_start(uint8_t id) "%d"
> -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " normal pages %d flags 0x%x next packet size %d"
> +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " normal pages %d zero pages %d flags 0x%x next packet size %d"
>  multifd_send_error(uint8_t id) "channel %d"
>  multifd_send_sync_main(long packet_num) "packet num %ld"
>  multifd_send_sync_main_signal(uint8_t id) "channel %d"
> -- 
> 2.33.1
> 
-- 
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK



  reply	other threads:[~2021-12-02 11:37 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-24 10:05 [PATCH v3 00/23] Migration: Transmit and detect zero pages in the multifd threads Juan Quintela
2021-11-24 10:05 ` [PATCH v3 01/23] multifd: Delete useless operation Juan Quintela
2021-11-24 18:48   ` Dr. David Alan Gilbert
2021-11-25  7:24     ` Juan Quintela
2021-11-25 19:46       ` Dr. David Alan Gilbert
2021-11-26  9:39         ` Juan Quintela
2021-11-24 10:05 ` [PATCH v3 02/23] migration: Never call twice qemu_target_page_size() Juan Quintela
2021-11-24 18:52   ` Dr. David Alan Gilbert
2021-11-25  7:26     ` Juan Quintela
2021-11-24 10:05 ` [PATCH v3 03/23] multifd: Rename used field to num Juan Quintela
2021-11-24 19:37   ` Dr. David Alan Gilbert
2021-11-25  7:28     ` Juan Quintela
2021-11-25 18:30       ` Dr. David Alan Gilbert
2021-12-13  9:34   ` Zheng Chuan via
2021-12-13 15:17     ` Dr. David Alan Gilbert
2021-11-24 10:05 ` [PATCH v3 04/23] multifd: Add missing documention Juan Quintela
2021-11-25 18:38   ` Dr. David Alan Gilbert
2021-11-26  9:34     ` Juan Quintela
2021-11-24 10:05 ` [PATCH v3 05/23] multifd: The variable is only used inside the loop Juan Quintela
2021-11-25 18:40   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 06/23] multifd: remove used parameter from send_prepare() method Juan Quintela
2021-11-25 18:51   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 07/23] multifd: remove used parameter from send_recv_pages() method Juan Quintela
2021-11-25 18:53   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 08/23] multifd: Fill offset and block for reception Juan Quintela
2021-11-25 19:41   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 09/23] multifd: Make zstd compression method not use iovs Juan Quintela
2021-11-29 17:16   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 10/23] multifd: Make zlib " Juan Quintela
2021-11-29 17:30   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 11/23] multifd: Move iov from pages to params Juan Quintela
2021-11-29 17:52   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 12/23] multifd: Make zlib use iov's Juan Quintela
2021-11-29 18:01   ` Dr. David Alan Gilbert
2021-11-29 18:21     ` Juan Quintela
2021-11-24 10:06 ` [PATCH v3 13/23] multifd: Make zstd " Juan Quintela
2021-11-29 18:03   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 14/23] multifd: Remove send_write() method Juan Quintela
2021-11-29 18:19   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 15/23] multifd: Use a single writev on the send side Juan Quintela
2021-11-29 18:35   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 16/23] multifd: Unfold "used" variable by its value Juan Quintela
2021-11-30 10:45   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 17/23] multifd: Use normal pages array on the send side Juan Quintela
2021-11-30 10:50   ` Dr. David Alan Gilbert
2021-11-30 12:01     ` Juan Quintela
2021-12-01 10:59       ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 18/23] multifd: Use normal pages array on the recv side Juan Quintela
2021-12-07  7:11   ` Peter Xu
2021-12-10 10:41     ` Juan Quintela
2021-11-24 10:06 ` [PATCH v3 19/23] multifd: recv side only needs the RAMBlock host address Juan Quintela
2021-12-01 18:56   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 20/23] multifd: Rename pages_used to normal_pages Juan Quintela
2021-12-01 19:00   ` Dr. David Alan Gilbert
2021-11-24 10:06 ` [PATCH v3 21/23] multifd: Support for zero pages transmission Juan Quintela
2021-12-02 11:36   ` Dr. David Alan Gilbert [this message]
2021-12-02 12:08     ` Juan Quintela
2021-12-02 16:16       ` Dr. David Alan Gilbert
2021-12-02 16:19         ` Juan Quintela
2021-12-02 16:46           ` Dr. David Alan Gilbert
2021-12-02 16:52             ` Juan Quintela
2021-11-24 10:06 ` [PATCH v3 22/23] multifd: Zero " Juan Quintela
2021-12-02 16:42   ` Dr. David Alan Gilbert
2021-12-02 16:49     ` Juan Quintela
2021-11-24 10:06 ` [PATCH v3 23/23] migration: Use multifd before we check for the zero page Juan Quintela
2021-12-02 17:11   ` Dr. David Alan Gilbert
2021-12-02 17:38     ` Juan Quintela
2021-12-02 17:49       ` Dr. David Alan Gilbert
2021-12-07  7:30       ` Peter Xu
2021-12-13  9:03         ` Juan Quintela
2021-12-15  1:39           ` Peter Xu
2021-11-24 10:24 ` [PATCH v3 00/23] Migration: Transmit and detect zero pages in the multifd threads Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Yaiv0PazlhLdsf0O@work-vm \
    --to=dgilbert@redhat.com \
    --cc=leobras@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).