qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: qemu-devel@nongnu.org, lvivier@redhat.com, peterx@redhat.com
Subject: Re: [Qemu-devel] [PATCH v9 10/12] migration: Transfer pages over new channels
Date: Tue, 17 Oct 2017 15:18:55 +0100	[thread overview]
Message-ID: <20171017141854.GC2326@work-vm> (raw)
In-Reply-To: <20171004104636.7963-11-quintela@redhat.com>

* Juan Quintela (quintela@redhat.com) wrote:
> We switch for sending the page number to send real pages.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>

I think this is OK if squashed with the 'test' patch to remove
the test stuff.

Some minor comments below.

> --
> 
> Remove the HACK bit, now we have the function that calculates the size
> of a page exported.
> Rename multifd_pages{_now}, to sent pages
> Remove multifd pages field, it is the same than normal pages
> ---
>  migration/migration.c |  7 ++++++-
>  migration/ram.c       | 39 +++++++++++----------------------------
>  2 files changed, 17 insertions(+), 29 deletions(-)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 54ef095d82..1bd87a4e44 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -2085,6 +2085,7 @@ static void *migration_thread(void *opaque)
>       */
>      int64_t threshold_size = 0;
>      int64_t qemu_file_bytes = 0;
> +    int64_t sent_pages = 0;
>      int64_t start_time = initial_time;
>      int64_t end_time;
>      bool old_vm_running = false;
> @@ -2173,8 +2174,11 @@ static void *migration_thread(void *opaque)
>          current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>          if (current_time >= initial_time + BUFFER_DELAY) {
>              uint64_t qemu_file_bytes_now = qemu_ftell(s->to_dst_file);
> +            uint64_t sent_pages_now = ram_counters.normal;
>              uint64_t transferred_bytes =
> -                qemu_file_bytes_now - qemu_file_bytes;
> +                (qemu_file_bytes_now - qemu_file_bytes) +
> +                (sent_pages_now - sent_pages) *
> +                qemu_target_page_size();

This could do with commenting to explain the difference between the
two sets of counts.

>              uint64_t time_spent = current_time - initial_time;
>              double bandwidth = (double)transferred_bytes / time_spent;
>              threshold_size = bandwidth * s->parameters.downtime_limit;
> @@ -2194,6 +2198,7 @@ static void *migration_thread(void *opaque)
>              qemu_file_reset_rate_limit(s->to_dst_file);
>              initial_time = current_time;
>              qemu_file_bytes = qemu_file_bytes_now;
> +            sent_pages = sent_pages_now;
>          }
>          if (qemu_file_rate_limit(s->to_dst_file)) {
>              /* usleep expects microseconds */
> diff --git a/migration/ram.c b/migration/ram.c
> index 4c16d0775b..981f345294 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -494,21 +494,15 @@ static void *multifd_send_thread(void *opaque)
>          if (p->pages.num) {
>              Error *local_err = NULL;
>              size_t ret;
> -            int i;
>              int num;
>  
>              num = p->pages.num;
>              p->pages.num = 0;
>              qemu_mutex_unlock(&p->mutex);
> -
> -            for (i = 0; i < num; i++) {
> -                ret = qio_channel_write_all(p->c,
> -                         (const char *)&p->pages.iov[i].iov_base,
> -                         sizeof(uint8_t *), &local_err);
> -                if (ret != 0) {
> -                    terminate_multifd_send_threads(local_err);
> -                    return NULL;
> -                }
> +            ret = qio_channel_writev_all(p->c, p->pages.iov, num, &local_err);
> +            if (ret != 0) {
> +                terminate_multifd_send_threads(local_err);
> +                return NULL;
>              }
>              qemu_mutex_lock(&multifd_send_state->mutex);
>              p->done = true;
> @@ -691,7 +685,6 @@ int multifd_load_cleanup(Error **errp)
>  static void *multifd_recv_thread(void *opaque)
>  {
>      MultiFDRecvParams *p = opaque;
> -    uint8_t *recv_address;
>  
>      qemu_sem_post(&p->ready);
>      while (true) {
> @@ -703,27 +696,16 @@ static void *multifd_recv_thread(void *opaque)
>          if (p->pages.num) {
>              Error *local_err = NULL;
>              size_t ret;
> -            int i;
>              int num;
>  
>              num = p->pages.num;
>              p->pages.num = 0;
>  
> -            for (i = 0; i < num; i++) {
> -                ret = qio_channel_read_all(p->c, (char *)&recv_address,
> -                                           sizeof(uint8_t *), &local_err);
> -                if (ret != 0) {
> -                    terminate_multifd_recv_threads(local_err);
> -                    return NULL;
> -                }
> -                if (recv_address != p->pages.iov[i].iov_base) {
> -                    error_setg(&local_err, "received %p and expecting %p (%d)",
> -                               recv_address, p->pages.iov[i].iov_base, i);
> -                    terminate_multifd_recv_threads(local_err);
> -                    return NULL;
> -                }
> +            ret = qio_channel_readv_all(p->c, p->pages.iov, num, &local_err);
> +            if (ret != 0) {
> +                terminate_multifd_recv_threads(local_err);
> +                return NULL;
>              }

A trace or two in each of these threads would probably help understand
what's going on.

> -
>              p->done = true;
>              qemu_mutex_unlock(&p->mutex);
>              qemu_sem_post(&p->ready);
> @@ -1288,8 +1270,10 @@ static int ram_multifd_page(RAMState *rs, PageSearchStatus *pss,
>                               offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
>          fd_num = multifd_send_page(p, rs->migration_dirty_pages == 1);
>          qemu_put_be16(rs->f, fd_num);
> +        if (fd_num != MULTIFD_CONTINUE) {
> +            qemu_fflush(rs->f);
> +        }

Could do with a comment.

Dave

>          ram_counters.transferred += 2; /* size of fd_num */
> -        qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
>          ram_counters.transferred += TARGET_PAGE_SIZE;
>          pages = 1;
>          ram_counters.normal++;
> @@ -3155,7 +3139,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
>          case RAM_SAVE_FLAG_MULTIFD_PAGE:
>              fd_num = qemu_get_be16(f);
>              multifd_recv_page(host, fd_num);
> -            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
>              break;
>  
>          case RAM_SAVE_FLAG_EOS:
> -- 
> 2.13.5
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

  reply	other threads:[~2017-10-17 14:19 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-10-04 10:46 [Qemu-devel] [PATCH v9 00/12] Multifd Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 01/12] qapi: Fix grammar in x-multifd-page-count descriptions Juan Quintela
2017-10-16 16:53   ` Dr. David Alan Gilbert
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 02/12] migration: Improve migration thread error handling Juan Quintela
2017-10-09  9:28   ` Peter Xu
2017-10-16 17:34   ` Dr. David Alan Gilbert
2017-10-16 17:48     ` Dr. David Alan Gilbert
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 03/12] migration: Make migrate_fd_error() the owner of the Error Juan Quintela
2017-10-09  9:34   ` Peter Xu
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 04/12] migration: Start of multiple fd work Juan Quintela
2017-10-09 10:05   ` Peter Xu
2017-10-09 10:15   ` Daniel P. Berrange
2017-10-09 12:32     ` Juan Quintela
2017-10-09 12:32     ` Juan Quintela
2017-10-16 19:11   ` Dr. David Alan Gilbert
2017-12-09 16:46     ` Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 05/12] migration: Create ram_multifd_page Juan Quintela
2017-10-09 13:08   ` Paolo Bonzini
2017-10-16 19:43   ` Dr. David Alan Gilbert
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 06/12] migration: Send the fd number which we are going to use for this page Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 07/12] migration: Create thread infrastructure for multifd recv side Juan Quintela
2017-10-17 11:07   ` Dr. David Alan Gilbert
2018-01-08  9:24     ` Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 08/12] migration: Test new fd infrastructure Juan Quintela
2017-10-17 11:11   ` Dr. David Alan Gilbert
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 09/12] migration: Rename initial_bytes Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 10/12] migration: Transfer pages over new channels Juan Quintela
2017-10-17 14:18   ` Dr. David Alan Gilbert [this message]
2018-01-08  9:40     ` Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 11/12] migration: Flush receive queue Juan Quintela
2017-10-17 14:51   ` Dr. David Alan Gilbert
2017-12-11  9:40     ` Juan Quintela
2017-10-04 10:46 ` [Qemu-devel] [PATCH v9 12/12] migration: Add multifd test Juan Quintela
2017-10-17 15:27   ` Dr. David Alan Gilbert
2017-12-11  9:40     ` Juan Quintela

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171017141854.GC2326@work-vm \
    --to=dgilbert@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).