From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: "Eduardo Habkost" <eduardo@habkost.net>,
qemu-devel@nongnu.org, "Peter Xu" <peterx@redhat.com>,
"Philippe Mathieu-Daudé" <f4bug@amsat.org>,
"Yanan Wang" <wangyanan55@huawei.com>,
"Leonardo Bras" <leobras@redhat.com>
Subject: Re: [PATCH v4 08/23] multifd: Move iov from pages to params
Date: Tue, 18 Jan 2022 17:56:57 +0000 [thread overview]
Message-ID: <Yeb/aaVGA+gphb/q@work-vm> (raw)
In-Reply-To: <20220111130024.5392-9-quintela@redhat.com>
* Juan Quintela (quintela@redhat.com) wrote:
> This will allow us to reduce the number of system calls on the next patch.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> migration/multifd.h | 8 ++++++--
> migration/multifd.c | 34 ++++++++++++++++++++++++----------
> 2 files changed, 30 insertions(+), 12 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index e57adc783b..c3f18af364 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -62,8 +62,6 @@ typedef struct {
> uint64_t packet_num;
> /* offset of each page */
> ram_addr_t *offset;
> - /* pointer to each page */
> - struct iovec *iov;
> RAMBlock *block;
> } MultiFDPages_t;
>
> @@ -110,6 +108,10 @@ typedef struct {
> uint64_t num_pages;
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
> + /* buffers to send */
> + struct iovec *iov;
> + /* number of iovs used */
> + uint32_t iovs_num;
> /* used for compression methods */
> void *data;
> } MultiFDSendParams;
> @@ -149,6 +151,8 @@ typedef struct {
> uint64_t num_pages;
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
> + /* buffers to recv */
> + struct iovec *iov;
Why is there the asymmetry between send and recv, where the send
has the iovs_num and the recv doesn't?
Dave
> /* used for de-compression methods */
> void *data;
> } MultiFDRecvParams;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 4d62850258..f75bd3c188 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -86,7 +86,16 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
> */
> static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
> {
> - p->next_packet_size = p->pages->num * qemu_target_page_size();
> + MultiFDPages_t *pages = p->pages;
> + size_t page_size = qemu_target_page_size();
> +
> + for (int i = 0; i < p->pages->num; i++) {
> + p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
> + p->iov[p->iovs_num].iov_len = page_size;
> + p->iovs_num++;
> + }
> +
> + p->next_packet_size = p->pages->num * page_size;
> p->flags |= MULTIFD_FLAG_NOCOMP;
> return 0;
> }
> @@ -104,7 +113,7 @@ static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
> */
> static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
> {
> - return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
> + return qio_channel_writev_all(p->c, p->iov, p->iovs_num, errp);
> }
>
> /**
> @@ -146,13 +155,18 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
> static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
> {
> uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
> + size_t page_size = qemu_target_page_size();
>
> if (flags != MULTIFD_FLAG_NOCOMP) {
> error_setg(errp, "multifd %u: flags received %x flags expected %x",
> p->id, flags, MULTIFD_FLAG_NOCOMP);
> return -1;
> }
> - return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
> + for (int i = 0; i < p->pages->num; i++) {
> + p->iov[i].iov_base = p->pages->block->host + p->pages->offset[i];
> + p->iov[i].iov_len = page_size;
> + }
> + return qio_channel_readv_all(p->c, p->iov, p->pages->num, errp);
> }
>
> static MultiFDMethods multifd_nocomp_ops = {
> @@ -242,7 +256,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
> MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
>
> pages->allocated = size;
> - pages->iov = g_new0(struct iovec, size);
> pages->offset = g_new0(ram_addr_t, size);
>
> return pages;
> @@ -254,8 +267,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
> pages->allocated = 0;
> pages->packet_num = 0;
> pages->block = NULL;
> - g_free(pages->iov);
> - pages->iov = NULL;
> g_free(pages->offset);
> pages->offset = NULL;
> g_free(pages);
> @@ -365,8 +376,6 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
> return -1;
> }
> p->pages->offset[i] = offset;
> - p->pages->iov[i].iov_base = block->host + offset;
> - p->pages->iov[i].iov_len = page_size;
> }
>
> return 0;
> @@ -470,8 +479,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
>
> if (pages->block == block) {
> pages->offset[pages->num] = offset;
> - pages->iov[pages->num].iov_base = block->host + offset;
> - pages->iov[pages->num].iov_len = qemu_target_page_size();
> pages->num++;
>
> if (pages->num < pages->allocated) {
> @@ -567,6 +574,8 @@ void multifd_save_cleanup(void)
> p->packet_len = 0;
> g_free(p->packet);
> p->packet = NULL;
> + g_free(p->iov);
> + p->iov = NULL;
> multifd_send_state->ops->send_cleanup(p, &local_err);
> if (local_err) {
> migrate_set_error(migrate_get_current(), local_err);
> @@ -654,6 +663,7 @@ static void *multifd_send_thread(void *opaque)
> uint32_t used = p->pages->num;
> uint64_t packet_num = p->packet_num;
> uint32_t flags = p->flags;
> + p->iovs_num = 0;
>
> if (used) {
> ret = multifd_send_state->ops->send_prepare(p, &local_err);
> @@ -922,6 +932,7 @@ int multifd_save_setup(Error **errp)
> p->packet->version = cpu_to_be32(MULTIFD_VERSION);
> p->name = g_strdup_printf("multifdsend_%d", i);
> p->tls_hostname = g_strdup(s->hostname);
> + p->iov = g_new0(struct iovec, page_count);
> socket_send_channel_create(multifd_new_send_channel_async, p);
> }
>
> @@ -1021,6 +1032,8 @@ int multifd_load_cleanup(Error **errp)
> p->packet_len = 0;
> g_free(p->packet);
> p->packet = NULL;
> + g_free(p->iov);
> + p->iov = NULL;
> multifd_recv_state->ops->recv_cleanup(p);
> }
> qemu_sem_destroy(&multifd_recv_state->sem_sync);
> @@ -1161,6 +1174,7 @@ int multifd_load_setup(Error **errp)
> + sizeof(uint64_t) * page_count;
> p->packet = g_malloc0(p->packet_len);
> p->name = g_strdup_printf("multifdrecv_%d", i);
> + p->iov = g_new0(struct iovec, page_count);
> }
>
> for (i = 0; i < thread_count; i++) {
> --
> 2.34.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
next prev parent reply other threads:[~2022-01-18 18:07 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-11 13:00 [PATCH v4 00/23] Migration: Transmit and detect zero pages in the multifd threads Juan Quintela
2022-01-11 13:00 ` [PATCH v4 01/23] migration: All this fields are unsigned Juan Quintela
2022-01-11 13:00 ` [PATCH v4 02/23] migration: We only need last_stage in two places Juan Quintela
2022-01-11 13:00 ` [PATCH v4 03/23] migration: ram_release_pages() always receive 1 page as argument Juan Quintela
2022-01-11 13:00 ` [PATCH v4 04/23] migration: Remove masking for compression Juan Quintela
2022-01-11 19:56 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 05/23] migration: simplify do_compress_ram_page Juan Quintela
2022-01-11 20:00 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 06/23] migration: Move ram_release_pages() call to save_zero_page_to_file() Juan Quintela
2022-01-11 13:00 ` [PATCH v4 07/23] multifd: Use proper maximum compression values Juan Quintela
2022-01-13 13:27 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 08/23] multifd: Move iov from pages to params Juan Quintela
2022-01-18 17:56 ` Dr. David Alan Gilbert [this message]
2022-01-25 9:31 ` Juan Quintela
2022-01-27 15:03 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 09/23] multifd: Make zlib use iov's Juan Quintela
2022-01-11 13:00 ` [PATCH v4 10/23] multifd: Make zstd " Juan Quintela
2022-01-11 13:00 ` [PATCH v4 11/23] multifd: Remove send_write() method Juan Quintela
2022-01-18 18:22 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 12/23] multifd: Use a single writev on the send side Juan Quintela
2022-01-11 13:00 ` [PATCH v4 13/23] multifd: Unfold "used" variable by its value Juan Quintela
2022-01-11 13:00 ` [PATCH v4 14/23] multifd: Use normal pages array on the send side Juan Quintela
2022-01-18 18:41 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 15/23] multifd: Use normal pages array on the recv side Juan Quintela
2022-01-18 19:29 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 16/23] multifd: recv side only needs the RAMBlock host address Juan Quintela
2022-01-11 13:00 ` [PATCH v4 17/23] multifd: Rename pages_used to normal_pages Juan Quintela
2022-01-11 13:00 ` [PATCH v4 18/23] migration: Make ram_save_target_page() a pointer Juan Quintela
2022-01-18 19:43 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 19/23] multifd: Add property to enable/disable zero_page Juan Quintela
2022-01-18 19:38 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 20/23] multifd: Support for zero pages transmission Juan Quintela
2022-01-18 19:49 ` Dr. David Alan Gilbert
2022-01-11 13:00 ` [PATCH v4 21/23] multifd: Zero " Juan Quintela
2022-01-18 19:55 ` Dr. David Alan Gilbert
2022-01-25 9:42 ` Juan Quintela
2022-01-27 15:13 ` Dr. David Alan Gilbert
2022-01-27 15:26 ` Juan Quintela
2022-01-11 13:00 ` [PATCH v4 22/23] migration: Use multifd before we check for the zero page Juan Quintela
2022-01-18 20:01 ` Dr. David Alan Gilbert
2022-01-25 9:45 ` Juan Quintela
2022-01-11 13:00 ` [PATCH v4 23/23] migration: Export ram_release_page() Juan Quintela
2022-01-18 20:02 ` Dr. David Alan Gilbert
2022-01-25 10:02 ` Juan Quintela
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Yeb/aaVGA+gphb/q@work-vm \
--to=dgilbert@redhat.com \
--cc=eduardo@habkost.net \
--cc=f4bug@amsat.org \
--cc=leobras@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=wangyanan55@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).