From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: qemu-devel@nongnu.org,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
"Philippe Mathieu-Daudé" <f4bug@amsat.org>,
"Yanan Wang" <wangyanan55@huawei.com>,
"Eduardo Habkost" <eduardo@habkost.net>,
"Peter Xu" <peterx@redhat.com>,
"Leonardo Bras" <leobras@redhat.com>
Subject: Re: [PATCH v7 11/13] multifd: Support for zero pages transmission
Date: Mon, 18 Jul 2022 18:03:20 +0100 [thread overview]
Message-ID: <YtWSWDKRP1C9baHx@work-vm> (raw)
In-Reply-To: <20220531104318.7494-12-quintela@redhat.com>
* Juan Quintela (quintela@redhat.com) wrote:
> This patch adds counters and similar. Logic will be added on the
> following patch.
Yeh, I think so; the duplicate becing the counter for zero pages still
throws me.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
>
> ---
>
> Added counters for duplicated/non duplicated pages.
> Removed reviewed by from David.
> Add total_zero_pages
> ---
> migration/multifd.h | 17 ++++++++++++++++-
> migration/multifd.c | 36 +++++++++++++++++++++++++++++-------
> migration/ram.c | 2 --
> migration/trace-events | 8 ++++----
> 4 files changed, 49 insertions(+), 14 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index d48597a1ea..c36d7ff876 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -47,7 +47,10 @@ typedef struct {
> /* size of the next packet that contains pages */
> uint32_t next_packet_size;
> uint64_t packet_num;
> - uint64_t unused[4]; /* Reserved for future use */
> + /* zero pages */
> + uint32_t zero_pages;
> + uint32_t unused32[1]; /* Reserved for future use */
> + uint64_t unused64[3]; /* Reserved for future use */
> char ramblock[256];
> uint64_t offset[];
> } __attribute__((packed)) MultiFDPacket_t;
> @@ -127,6 +130,8 @@ typedef struct {
> uint64_t num_packets;
> /* non zero pages sent through this channel */
> uint64_t total_normal_pages;
> + /* zero pages sent through this channel */
> + uint64_t total_zero_pages;
> /* buffers to send */
> struct iovec *iov;
> /* number of iovs used */
> @@ -135,6 +140,10 @@ typedef struct {
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
> /* used for compression methods */
> void *data;
> } MultiFDSendParams;
> @@ -184,12 +193,18 @@ typedef struct {
> uint8_t *host;
> /* non zero pages recv through this channel */
> uint64_t total_normal_pages;
> + /* zero pages recv through this channel */
> + uint64_t total_zero_pages;
> /* buffers to recv */
> struct iovec *iov;
> /* Pages that are not zero */
> ram_addr_t *normal;
> /* num of non zero pages */
> uint32_t normal_num;
> + /* Pages that are zero */
> + ram_addr_t *zero;
> + /* num of zero pages */
> + uint32_t zero_num;
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 056599cbaf..0f7c27c08e 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -263,6 +263,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
> packet->normal_pages = cpu_to_be32(p->normal_num);
> packet->next_packet_size = cpu_to_be32(p->next_packet_size);
> packet->packet_num = cpu_to_be64(p->packet_num);
> + packet->zero_pages = cpu_to_be32(p->zero_num);
>
> if (p->pages->block) {
> strncpy(packet->ramblock, p->pages->block->idstr, 256);
> @@ -323,7 +324,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
> p->next_packet_size = be32_to_cpu(packet->next_packet_size);
> p->packet_num = be64_to_cpu(packet->packet_num);
>
> - if (p->normal_num == 0) {
> + p->zero_num = be32_to_cpu(packet->zero_pages);
> + if (p->zero_num > packet->pages_alloc - p->normal_num) {
> + error_setg(errp, "multifd: received packet "
> + "with %u zero pages and expected maximum pages are %u",
> + p->zero_num, packet->pages_alloc - p->normal_num) ;
> + return -1;
> + }
> +
> + if (p->normal_num == 0 && p->zero_num == 0) {
> return 0;
> }
>
> @@ -432,6 +441,8 @@ static int multifd_send_pages(QEMUFile *f)
> ram_counters.multifd_bytes += p->sent_bytes;
> qemu_file_update_transfer(f, p->sent_bytes);
> p->sent_bytes = 0;
> + ram_counters.normal += p->normal_num;
> + ram_counters.duplicate += p->zero_num;
> qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem);
>
> @@ -545,6 +556,8 @@ void multifd_save_cleanup(void)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_send_state->ops->send_cleanup(p, &local_err);
> if (local_err) {
> migrate_set_error(migrate_get_current(), local_err);
> @@ -664,6 +677,7 @@ static void *multifd_send_thread(void *opaque)
> qemu_mutex_unlock(&p->mutex);
>
> p->normal_num = 0;
> + p->zero_num = 0;
>
> if (use_zero_copy_send) {
> p->iovs_num = 0;
> @@ -685,8 +699,8 @@ static void *multifd_send_thread(void *opaque)
> }
> multifd_send_fill_packet(p);
>
> - trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
> - p->next_packet_size);
> + trace_multifd_send(p->id, packet_num, p->normal_num, p->zero_num,
> + p->flags, p->next_packet_size);
>
> if (use_zero_copy_send) {
> /* Send header first, without zerocopy */
> @@ -710,6 +724,7 @@ static void *multifd_send_thread(void *opaque)
> qemu_mutex_lock(&p->mutex);
> p->num_packets++;
> p->total_normal_pages += p->normal_num;
> + p->total_zero_pages += p->zero_num;
> p->pages->num = 0;
> p->pages->block = NULL;
> p->sent_bytes += p->packet_len;;
> @@ -751,7 +766,8 @@ out:
> qemu_mutex_unlock(&p->mutex);
>
> rcu_unregister_thread();
> - trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages);
> + trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages,
> + p->total_zero_pages);
>
> return NULL;
> }
> @@ -938,6 +954,7 @@ int multifd_save_setup(Error **errp)
> p->normal = g_new0(ram_addr_t, page_count);
> p->page_size = qemu_target_page_size();
> p->page_count = page_count;
> + p->zero = g_new0(ram_addr_t, page_count);
>
> if (migrate_use_zero_copy_send()) {
> p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
> @@ -1046,6 +1063,8 @@ int multifd_load_cleanup(Error **errp)
> p->iov = NULL;
> g_free(p->normal);
> p->normal = NULL;
> + g_free(p->zero);
> + p->zero = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1116,13 +1135,14 @@ static void *multifd_recv_thread(void *opaque)
> break;
> }
>
> - trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->flags,
> - p->next_packet_size);
> + trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
> + p->flags, p->next_packet_size);
> sync_needed = p->flags & MULTIFD_FLAG_SYNC;
> /* recv methods don't know how to handle the SYNC flag */
> p->flags &= ~MULTIFD_FLAG_SYNC;
> p->num_packets++;
> p->total_normal_pages += p->normal_num;
> + p->total_normal_pages += p->zero_num;
> qemu_mutex_unlock(&p->mutex);
>
> if (p->normal_num) {
> @@ -1147,7 +1167,8 @@ static void *multifd_recv_thread(void *opaque)
> qemu_mutex_unlock(&p->mutex);
>
> rcu_unregister_thread();
> - trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages);
> + trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages,
> + p->total_zero_pages);
>
> return NULL;
> }
> @@ -1187,6 +1208,7 @@ int multifd_load_setup(Error **errp)
> p->normal = g_new0(ram_addr_t, page_count);
> p->page_count = page_count;
> p->page_size = qemu_target_page_size();
> + p->zero = g_new0(ram_addr_t, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> diff --git a/migration/ram.c b/migration/ram.c
> index 3b2af07341..7ceef7976b 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1356,8 +1356,6 @@ static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
> if (multifd_queue_page(rs->f, block, offset) < 0) {
> return -1;
> }
> - ram_counters.normal++;
> -
> return 1;
> }
>
> diff --git a/migration/trace-events b/migration/trace-events
> index 1aec580e92..d70e89dbb9 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -114,21 +114,21 @@ unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%
>
> # multifd.c
> multifd_new_send_channel_async(uint8_t id) "channel %u"
> -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u"
> +multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
> multifd_recv_new_channel(uint8_t id) "channel %u"
> multifd_recv_sync_main(long packet_num) "packet num %ld"
> multifd_recv_sync_main_signal(uint8_t id) "channel %u"
> multifd_recv_sync_main_wait(uint8_t id) "channel %u"
> multifd_recv_terminate_threads(bool error) "error %d"
> -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64
> +multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64
> multifd_recv_thread_start(uint8_t id) "%u"
> -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u"
> +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normalpages, uint32_t zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u"
> multifd_send_error(uint8_t id) "channel %u"
> multifd_send_sync_main(long packet_num) "packet num %ld"
> multifd_send_sync_main_signal(uint8_t id) "channel %u"
> multifd_send_sync_main_wait(uint8_t id) "channel %u"
> multifd_send_terminate_threads(bool error) "error %d"
> -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64
> +multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64
> multifd_send_thread_start(uint8_t id) "%u"
> multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s"
> multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s"
> --
> 2.35.3
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
next prev parent reply other threads:[~2022-07-18 17:05 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-31 10:43 [PATCH v7 00/13] Migration: Transmit and detect zero pages in the multifd threads Juan Quintela
2022-05-31 10:43 ` [PATCH v7 01/13] multifd: Document the locking of MultiFD{Send/Recv}Params Juan Quintela
2022-06-08 8:49 ` Zhang, Chen
2022-07-19 14:31 ` Dr. David Alan Gilbert
2022-07-14 9:41 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 02/13] multifd: Create page_size fields into both MultiFD{Recv, Send}Params Juan Quintela
2022-07-14 9:58 ` [PATCH v7 02/13] multifd: Create page_size fields into both MultiFD{Recv,Send}Params Dr. David Alan Gilbert
2022-07-19 14:34 ` [PATCH v7 02/13] multifd: Create page_size fields into both MultiFD{Recv, Send}Params Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 03/13] multifd: Create page_count " Juan Quintela
2022-07-14 10:19 ` [PATCH v7 03/13] multifd: Create page_count fields into both MultiFD{Recv,Send}Params Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 04/13] migration: Export ram_transferred_ram() Juan Quintela
2022-06-15 16:20 ` Dr. David Alan Gilbert
2022-07-14 12:43 ` Claudio Fontana
2022-05-31 10:43 ` [PATCH v7 05/13] multifd: Count the number of bytes sent correctly Juan Quintela
2022-06-08 8:50 ` Zhang, Chen
2022-07-14 12:33 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 06/13] migration: Make ram_save_target_page() a pointer Juan Quintela
2022-05-31 10:43 ` [PATCH v7 07/13] multifd: Make flags field thread local Juan Quintela
2022-07-18 11:59 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 08/13] multifd: Prepare to send a packet without the mutex held Juan Quintela
2022-07-18 12:30 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 09/13] multifd: Add property to enable/disable zero_page Juan Quintela
2022-05-31 10:43 ` [PATCH v7 10/13] migration: Export ram_release_page() Juan Quintela
2022-07-13 17:57 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 11/13] multifd: Support for zero pages transmission Juan Quintela
2022-07-18 17:03 ` Dr. David Alan Gilbert [this message]
2022-05-31 10:43 ` [PATCH v7 12/13] multifd: Zero " Juan Quintela
2022-07-18 13:19 ` Dr. David Alan Gilbert
2022-05-31 10:43 ` [PATCH v7 13/13] migration: Use multifd before we check for the zero page Juan Quintela
2022-07-18 13:34 ` Dr. David Alan Gilbert
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YtWSWDKRP1C9baHx@work-vm \
--to=dgilbert@redhat.com \
--cc=eduardo@habkost.net \
--cc=f4bug@amsat.org \
--cc=leobras@redhat.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=wangyanan55@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).